From a5146c7ef576756661b85bfa9f56f43e09409d43 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Wed, 16 Jul 2025 10:54:26 +0800 Subject: [PATCH 1/2] chore: add translate script to trans blogs and docs --- README.md | 13 +- package.json | 6 +- scripts/python/glossary.json | 86 + scripts/python/requirements.txt | 8 + scripts/python/transalate_mdx.py | 1076 +++++++ scripts/translate_with_exclude.sh | 94 + src/app/[locale]/ElevationScrollAppBar.tsx | 4 +- src/app/[locale]/blog/[name]/layout.tsx | 2 +- src/app/[locale]/blog/layout.tsx | 2 +- .../[category]/[[...paths]]/layout.tsx | 2 +- .../docs/[version]/[category]/version.tsx | 2 +- src/app/[locale]/layout.tsx | 2 +- src/app/[locale]/locale-switch.tsx | 3 +- src/components/I18nProvider.tsx | 2 +- src/components/SidebarMenu.tsx | 2 +- src/locales/client.ts | 1 + src/locales/server.ts | 1 + src/middleware.ts | 2 +- src/utils/markdown.tsx | 17 +- yarn.lock | 2576 +++++++++-------- 20 files changed, 2677 insertions(+), 1224 deletions(-) create mode 100644 scripts/python/glossary.json create mode 100644 scripts/python/requirements.txt create mode 100644 scripts/python/transalate_mdx.py create mode 100755 scripts/translate_with_exclude.sh diff --git a/README.md b/README.md index f4500e9c..bb37c4cc 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,9 @@ kubeblocks-docs │ └─en │ │ │ blog_1.mdx │ │ │ blog_2.mdx -│ │ │ ... +│ └─zh +│ │ blog_1.mdx +│ │ ... │ └─docs │ └─ en @@ -40,10 +42,15 @@ kubeblocks-docs │ │ │ └─ │ │ │ │ doc_1.mdx │ │ │ │ doc_2.mdx -│ │ │ +│ │ │ │ │ └─ release-0.9 +| | +│ └── zh +│ └─ preview +│ └─ release-0.9 +│ └─reports # kubeblocks reports -│ +│ └─publics │ └──img # markdown assets │ diff --git a/package.json b/package.json index f9341705..a79d3b67 100644 --- a/package.json +++ b/package.json @@ -23,7 +23,7 @@ "@mui/icons-material": "^6.4.4", "@mui/material": "^6.4.4", "@mui/material-nextjs": "^6.4.3", - "@next/mdx": "^15.1.7", + "@next/mdx": "^15.2.4", "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-scroll": "^1.8.10", "@types/remark-heading-id": "^1.0.0", @@ -36,7 +36,7 @@ "minisearch": "^7.1.2", "moment": "^2.30.1", "mui-message": "^1.1.0", - "next": "15.1.7", + "next": "15.2.4", "next-international": "^1.3.1", "next-themes": "^0.4.4", "react": "^19.0.0", @@ -71,7 +71,7 @@ "@types/react-slick": "^0.23.13", "@types/react-stickynode": "^4.0.3", "eslint": "^9", - "eslint-config-next": "15.1.7", + "eslint-config-next": "15.2.4", "puppeteer": "^24.10.0", "typescript": "^5" } diff --git a/scripts/python/glossary.json b/scripts/python/glossary.json new file mode 100644 index 00000000..dcf41560 --- /dev/null +++ b/scripts/python/glossary.json @@ -0,0 +1,86 @@ +{ + "technical_terms": { + "KubeBlocks": "KubeBlocks", + "Kubernetes": "Kubernetes", + "kubectl": "kubectl", + "YAML": "YAML", + "API": "API", + "CLI": "CLI", + "deployment": "部署", + "pod": "Pod", + "service": "服务", + "configmap": "ConfigMap", + "secret": "Secret", + "volume": "存储卷", + "persistent volume": "持久卷", + "storage class": "存储类", + "ingress": "Ingress", + "load balancer": "负载均衡器", + "node": "节点", + "container": "容器", + "image": "镜像", + "registry": "镜像仓库", + "helm": "Helm", + "chart": "Chart", + "operator": "Operator", + "CRD": "CRD", + "custom resource": "自定义资源" + }, + "database_terms": { + "Milvus": "Milvus", + "PostgreSQL": "PostgreSQL", + "MySQL": "MySQL", + "Redis": "Redis", + "MongoDB": "MongoDB", + "Elasticsearch": "Elasticsearch", + "Kafka": "Kafka", + "RabbitMQ": "RabbitMQ", + "StarRocks": "StarRocks", + "Qdrant": "Qdrant", + "ETCD": "ETCD", + "MinIO": "MinIO", + "S3": "S3", + "database": "数据库", + "table": "表", + "query": "查询", + "transaction": "事务", + "replica": "副本", + "primary": "主节点", + "secondary": "从节点", + "backup": "备份", + "restore": "恢复", + "failover": "故障转移", + "high availability": "高可用性", + "disaster recovery": "灾难恢复", + "read-only": "只读", + "read-write": "读写", + "connection": "连接", + "timeout": "超时", + "standalone": "单机模式", + "cluster mode": "集群模式", + "vector": "向量", + "embedding": "嵌入", + "vector database": "向量数据库", + "vector search": "向量搜索", + "similarity search": "相似性搜索", + "minor version": "次版本", + "patch version": "补丁版本", + "major version": "主版本" + }, + "operations_terms": { + "metrics": "指标", + "alerting": "告警", + "dashboard": "仪表板", + "scaling": "扩缩容", + "horizontal scaling": "水平扩展", + "vertical scaling": "垂直扩展", + "auto-scaling": "自动扩缩容", + "rolling update": "滚动更新", + "blue-green deployment": "蓝绿部署", + "canary deployment": "金丝雀部署", + "health check": "健康检查", + "liveness probe": "存活探针", + "readiness probe": "就绪探针", + "startup probe": "启动探针" + } +} \ No newline at end of file diff --git a/scripts/python/requirements.txt b/scripts/python/requirements.txt new file mode 100644 index 00000000..ab3f9e09 --- /dev/null +++ b/scripts/python/requirements.txt @@ -0,0 +1,8 @@ +requests>=2.31.0 +PyYAML>=6.0 +openai>=1.0.0 +regex>=2023.5.5 +markdown>=3.4.0 +httpx[socks] +# or alternatively: +# socksio \ No newline at end of file diff --git a/scripts/python/transalate_mdx.py b/scripts/python/transalate_mdx.py new file mode 100644 index 00000000..485f3daa --- /dev/null +++ b/scripts/python/transalate_mdx.py @@ -0,0 +1,1076 @@ +#!/usr/bin/env python3 +""" +Automated translation script - Translate MDX files between different languages +Supports DeepSeek and GPT API with proprietary terminology dictionary functionality +""" + +import os +import re +import yaml +import json +import argparse +import logging +from typing import Dict, List, Tuple, Optional +from pathlib import Path +# import requests # Remove this line +from openai import OpenAI +import shutil + +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +class GlossaryManager: + """Proprietary terminology dictionary manager""" + + def __init__(self, glossary_file: str = None): + self.glossary = {} + self.glossary_file = glossary_file or os.path.join(os.path.dirname(__file__), 'glossary.json') + self.load_glossary() + + def load_glossary(self): + """Load glossary file""" + try: + if os.path.exists(self.glossary_file): + with open(self.glossary_file, 'r', encoding='utf-8') as f: + glossary_data = json.load(f) + # Merge all categories of vocabulary + for category, terms in glossary_data.items(): + self.glossary.update(terms) + logger.info(f"Loaded {len(self.glossary)} proprietary terms") + else: + logger.warning(f"Glossary file does not exist: {self.glossary_file}") + except Exception as e: + logger.error(f"Failed to load glossary file: {e}") + + def get_glossary_prompt(self) -> str: + """Generate glossary prompt""" + if not self.glossary: + return "" + + glossary_text = "\n".join([f"- {en} -> {zh}" for en, zh in self.glossary.items()]) + return f""" + +Please pay special attention to the translation of the following proprietary terms: +{glossary_text} + +For proprietary terms, please strictly follow the translations in the glossary. If a proprietary term is not in the glossary, please keep the original English text.""" + + +class MDXTranslator: + def __init__(self, api_key: str, api_type: str = "deepseek", base_url: str = None, + glossary_file: str = None, override_existing: bool = False, + source_lang: str = "en", dest_lang: str = "zh"): + """ + Initialize translator + + Args: + api_key: API key + api_type: API type ("deepseek" or "gpt") + base_url: Custom API base URL + glossary_file: Glossary file path + override_existing: Whether to override existing translation files + source_lang: Source language code (e.g., "en", "zh", "ja", "fr") + dest_lang: Destination language code (e.g., "en", "zh", "ja", "fr") + """ + self.api_key = api_key + self.api_type = api_type.lower() + self.glossary_manager = GlossaryManager(glossary_file) + self.override_existing = override_existing + self.source_lang = source_lang + self.dest_lang = dest_lang + + # Language mappings for better prompts + self.lang_names = { + "en": "English", + "zh": "Chinese", + "ja": "Japanese", + "fr": "French", + "de": "German", + "es": "Spanish", + "ko": "Korean", + "ru": "Russian" + } + + if self.api_type == "deepseek": + self.base_url = base_url or "https://api.deepseek.com/v1" + self.model = "deepseek-chat" + elif self.api_type == "gpt": + self.base_url = base_url or "https://api.openai.com/v1" + self.model = "gpt-3.5-turbo" + else: + raise ValueError("api_type must be 'deepseek' or 'gpt'") + + self.client = OpenAI( + api_key=api_key, + base_url=self.base_url, + http_client=None # This can help bypass proxy issues + ) + + # File types that don't need translation + self.skip_files = ['references/api-reference', 'cli/'] + + def parse_frontmatter(self, content: str) -> Tuple[Dict, str]: + """ + Parse frontmatter + + Returns: + (frontmatter_dict, content_without_frontmatter) + """ + if not content.startswith('---'): + return {}, content + + # Find the second --- + end_match = re.search(r'\n---\n', content[3:]) + if not end_match: + return {}, content + + frontmatter_content = content[3:end_match.start() + 3] + remaining_content = content[end_match.end() + 3:] + + try: + frontmatter = yaml.safe_load(frontmatter_content) + return frontmatter or {}, remaining_content + except yaml.YAMLError: + logger.warning("Failed to parse frontmatter") + return {}, content + + + def translate_frontmatter(self, frontmatter: Dict) -> Dict: + """ + Translate frontmatter fields that contain translatable content + + Args: + frontmatter: Dictionary containing frontmatter data + + Returns: + Dictionary with translated frontmatter + """ + if not frontmatter: + return frontmatter + + # Fields that should be translated + translatable_fields = { + 'title', 'description', 'sidebar_label', 'sidebar_position_name', + 'summary', 'abstract', 'excerpt', 'meta_description' + } + + # Fields that should NOT be translated + non_translatable_fields = { + 'id', 'slug', 'sidebar_position', 'hide_title', 'hide_table_of_contents', + 'draft', 'unlisted', 'date', 'authors', 'tags', 'keywords', + 'image', 'custom_edit_url', 'pagination_prev', 'pagination_next' + } + + translated_frontmatter = {} + + for key, value in frontmatter.items(): + if key in non_translatable_fields: + # Keep non-translatable fields as-is + translated_frontmatter[key] = value + elif key in translatable_fields and isinstance(value, str) and value.strip(): + # Translate translatable string fields + try: + # logger.info(f"Translating frontmatter field '{key}': {value}") + translated_value = self.translate_frontmatter_field(value) + translated_frontmatter[key] = translated_value + except Exception as e: + logger.warning(f"Failed to translate frontmatter field '{key}': {e}") + translated_frontmatter[key] = value # Keep original on failure + elif isinstance(value, list): + # Handle arrays (like tags, authors) + if key == 'tags' or key == 'keywords': + # Don't translate tags/keywords, keep as-is + translated_frontmatter[key] = value + else: + # For other arrays, translate string elements + translated_array = [] + for item in value: + if isinstance(item, str) and item.strip(): + try: + translated_item = self.translate_frontmatter_field(item) + translated_array.append(translated_item) + except Exception as e: + logger.warning(f"Failed to translate array item in '{key}': {e}") + translated_array.append(item) + else: + translated_array.append(item) + translated_frontmatter[key] = translated_array + elif isinstance(value, dict): + # Handle nested objects (recursively translate) + translated_frontmatter[key] = self.translate_frontmatter_dict(value) + else: + # Keep other types as-is (numbers, booleans, etc.) + translated_frontmatter[key] = value + + return translated_frontmatter + + def calculate_max_tokens(self, input_text: str, is_frontmatter: bool = False) -> int: + """ + Calculate appropriate max_tokens based on input length and model type + + Args: + input_text: The text to be translated + is_frontmatter: Whether this is frontmatter (shorter content) + + Returns: + Appropriate max_tokens value + """ + # Rough estimation: 1 token ≈ 4 characters for English, 1.5 for Chinese + input_tokens = len(input_text) // 4 + + # System prompt tokens (approximately) + system_prompt_tokens = 200 + + # Translation expansion factor (Chinese is often more compact, but technical terms can be longer) + expansion_factor = 1.2 + + # Calculate expected output tokens + expected_output_tokens = int(input_tokens * expansion_factor) + + # Model-specific limits + if self.api_type == "deepseek": + max_context = 32000 + recommended_max_output = min(8000, max_context - system_prompt_tokens - input_tokens) + elif self.api_type == "gpt": + if "gpt-4" in self.model: + max_context = 32000 if "32k" in self.model else 8000 + else: # gpt-3.5-turbo + max_context = 16000 + recommended_max_output = min(4000, max_context - system_prompt_tokens - input_tokens) + else: + recommended_max_output = 4000 # Conservative default + + # For frontmatter, use smaller limits + if is_frontmatter: + return min(500, recommended_max_output) + + # For regular content, use calculated value but with reasonable bounds + calculated_max = max(expected_output_tokens, 1000) # Minimum 1000 tokens + final_max = min(calculated_max, recommended_max_output) + + logger.debug(f"Input tokens: ~{input_tokens}, Expected output: ~{expected_output_tokens}, Max tokens: {final_max}") + return final_max + + def get_translation_prompt(self, is_frontmatter: bool = False) -> str: + """Generate translation system prompt based on source and destination languages""" + source_name = self.lang_names.get(self.source_lang, self.source_lang.upper()) + dest_name = self.lang_names.get(self.dest_lang, self.dest_lang.upper()) + + glossary_prompt = self.glossary_manager.get_glossary_prompt() + + if is_frontmatter: + return f"""You are a professional technical documentation translation assistant. Please translate the following {source_name} text to {dest_name} with these requirements: +1. This is frontmatter metadata, so keep it concise and clear +2. Maintain technical term accuracy +3. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name} +4. The result should be natural and fluent {dest_name} +5. Do not add any extra formatting or markdown{glossary_prompt} +""" + else: + return f"""You are a professional technical documentation translation assistant. Please translate the following {source_name} technical documentation to {dest_name} with these requirements: +1. Maintain technical term accuracy +2. Keep Markdown formatting unchanged, including line breaks and spacing +3. Do not translate content in import statements +4. Do not touch the code block content +5. Do not translate links or images +6. Preserve all formatting, spacing, and line breaks exactly as in the original +7. Translation should be natural and fluent, conforming to {dest_name} expression habits +8. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name} and add {dest_name} explanations on first occurrence{glossary_prompt} + +IMPORTANT: Preserve exact spacing and line breaks between text and code blocks.""" + + def translate_frontmatter_field(self, text: str) -> str: + """ + Translate a single frontmatter field + + Args: + text: Text to translate + + Returns: + Translated text + """ + if not text.strip(): + return text + + try: + # Calculate appropriate max_tokens + max_tokens = self.calculate_max_tokens(text, is_frontmatter=True) + + source_name = self.lang_names.get(self.source_lang, self.source_lang.upper()) + + messages = [ + { + "role": "system", + "content": self.get_translation_prompt(is_frontmatter=True) + }, + { + "role": "user", + "content": f"Please translate: {text}" + } + ] + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.2, # Lower temperature for more consistent translations + max_tokens=max_tokens + ) + + translated = response.choices[0].message.content.strip() + return translated + + except Exception as e: + logger.error(f"Failed to translate frontmatter field: {e}") + return text + + def translate_frontmatter_dict(self, obj: Dict) -> Dict: + """ + Recursively translate nested dictionary objects in frontmatter + + Args: + obj: Dictionary object to translate + + Returns: + Translated dictionary + """ + translated_obj = {} + + for key, value in obj.items(): + if isinstance(value, str) and value.strip(): + try: + translated_obj[key] = self.translate_frontmatter_field(value) + except Exception as e: + logger.warning(f"Failed to translate nested field '{key}': {e}") + translated_obj[key] = value + elif isinstance(value, dict): + translated_obj[key] = self.translate_frontmatter_dict(value) + elif isinstance(value, list): + translated_array = [] + for item in value: + if isinstance(item, str) and item.strip(): + try: + translated_array.append(self.translate_frontmatter_field(item)) + except Exception as e: + logger.warning(f"Failed to translate array item: {e}") + translated_array.append(item) + elif isinstance(item, dict): + translated_array.append(self.translate_frontmatter_dict(item)) + else: + translated_array.append(item) + translated_obj[key] = translated_array + else: + translated_obj[key] = value + + return translated_obj + + def split_content_by_h2(self, content: str) -> List[str]: + """ + Split document content by H2 headings + + Args: + content: Document content to split + + Returns: + List of split content chunks + """ + if not content.strip(): + return [content] + + # Use regex to match H2 headings + h2_pattern = r'^## .+$' + lines = content.split('\n') + chunks = [] + current_chunk = [] + + for line in lines: + if re.match(h2_pattern, line): + # If encountering H2 heading, save current chunk and start new chunk + if current_chunk: + chunks.append('\n'.join(current_chunk)) + current_chunk = [] + current_chunk.append(line) + else: + current_chunk.append(line) + + # Add the last chunk + if current_chunk: + chunks.append('\n'.join(current_chunk)) + + # If no H2 headings found, return original content + if not chunks: + return [content] + + # Filter out empty chunks + chunks = [chunk for chunk in chunks if chunk.strip()] + + logger.info(f"Document split into {len(chunks)} chunks") + return chunks + + def split_large_chunk(self, chunk: str, max_length: int) -> List[str]: + """ + Split a large chunk into smaller pieces by paragraphs or sentences + + Args: + chunk: The large chunk to split + max_length: Maximum length for each sub-chunk + + Returns: + List of smaller chunks + """ + if len(chunk) <= max_length: + return [chunk] + + # First try to split by double newlines (paragraphs) + paragraphs = chunk.split('\n\n') + + sub_chunks = [] + current_sub_chunk = "" + + for paragraph in paragraphs: + # If single paragraph is too long, split by sentences + if len(paragraph) > max_length: + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + current_sub_chunk = "" + + # Split long paragraph by sentences + sentences = re.split(r'(?<=[.!?])\s+', paragraph) + current_sentence_chunk = "" + + for sentence in sentences: + potential_length = len(current_sentence_chunk) + len(sentence) + (1 if current_sentence_chunk else 0) + + if potential_length <= max_length: + if current_sentence_chunk: + current_sentence_chunk += " " + sentence + else: + current_sentence_chunk = sentence + else: + if current_sentence_chunk: + sub_chunks.append(current_sentence_chunk.strip()) + current_sentence_chunk = sentence + + if current_sentence_chunk: + sub_chunks.append(current_sentence_chunk.strip()) + else: + # Normal paragraph processing + potential_length = len(current_sub_chunk) + len(paragraph) + (2 if current_sub_chunk else 0) + + if potential_length <= max_length: + if current_sub_chunk: + current_sub_chunk += "\n\n" + paragraph + else: + current_sub_chunk = paragraph + else: + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + current_sub_chunk = paragraph + + # Don't forget the last sub-chunk + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + + # Filter out empty chunks + sub_chunks = [chunk for chunk in sub_chunks if chunk.strip()] + + logger.info(f"Large chunk split into {len(sub_chunks)} sub-chunks") + return sub_chunks + + def translate_text_or_preserve_code(self, content: str) -> str: + """ + Translate text content but preserve code blocks unchanged + + Args: + content: Content that may contain text and code blocks + + Returns: + Translated content with code blocks preserved + """ + if not content.strip(): + return content + + # Check if the entire content is a code block + if content.strip().startswith('```') and content.strip().endswith('```'): + logger.info("Skipping translation for code block") + return content # Don't translate code blocks + + # Split content into code blocks and text + code_block_pattern = r'(```[\s\S]*?```)' + parts = re.split(code_block_pattern, content) + + translated_parts = [] + + for part in parts: + if not part: + continue + + # Check if this part is a code block + if part.startswith('```') and part.endswith('```'): + # This is a code block, keep it unchanged + logger.info("Preserving code block without translation") + translated_parts.append(part) + else: + # This is regular text, translate it + if part.strip(): + logger.info("Translating text content") + translated_part = self.translate_text_chunk(part) + translated_parts.append(translated_part) + else: + translated_parts.append(part) # Keep whitespace/empty parts + + return ''.join(translated_parts) + + def split_chunk_by_sentences_and_code(self, chunk: str, max_length: int) -> List[str]: + """ + Split chunk by parsing sentences and code blocks first + Code blocks are kept as separate elements and won't be translated + + Args: + chunk: The chunk to split + max_length: Maximum length for each sub-chunk + + Returns: + List of sub-chunks + """ + if len(chunk) <= max_length: + return [chunk] + + # Parse chunk into sentences and code blocks + elements = self.parse_sentences_and_code_blocks(chunk) + + # Group elements into sub-chunks + sub_chunks = [] + current_sub_chunk_elements = [] + current_length = 0 + + for element in elements: + element_length = len(element) + + # If it's a code block or a single element exceeds max_length, keep it as separate chunk + if (element.strip().startswith('```') and element.strip().endswith('```')) or element_length > max_length: + # Save current sub-chunk if not empty + if current_sub_chunk_elements: + sub_chunks.append('\n'.join(current_sub_chunk_elements)) + current_sub_chunk_elements = [] + current_length = 0 + + # Add the code block or large element as its own chunk + sub_chunks.append(element) + continue + + # Check if we can add this element to current sub-chunk + potential_length = current_length + element_length + (1 if current_sub_chunk_elements else 0) # +1 for newline + + if potential_length <= max_length: + current_sub_chunk_elements.append(element) + current_length = potential_length + else: + # Current sub-chunk is full, start new one + if current_sub_chunk_elements: + sub_chunks.append('\n'.join(current_sub_chunk_elements)) + + current_sub_chunk_elements = [element] + current_length = element_length + + # Add the last sub-chunk + if current_sub_chunk_elements: + sub_chunks.append('\n'.join(current_sub_chunk_elements)) + + # Filter out empty chunks + sub_chunks = [chunk for chunk in sub_chunks if chunk.strip()] + + logger.info(f"Parsed into {len(elements)} elements, grouped into {len(sub_chunks)} sub-chunks") + return sub_chunks + + def parse_sentences_and_code_blocks(self, text: str) -> List[str]: + """ + Parse text into sentences and code blocks, keeping code blocks intact + + Args: + text: Text to parse + + Returns: + List of sentences and code blocks + """ + elements = [] + + # Split by code blocks first (```...```) + code_block_pattern = r'(```[\s\S]*?```)' + parts = re.split(code_block_pattern, text) + + for part in parts: + if not part.strip(): + continue + + # Check if this part is a code block + if part.startswith('```') and part.endswith('```'): + # This is a code block, keep it intact + elements.append(part) + else: + # This is regular text, split into sentences + sentences = self.split_text_into_sentences(part) + elements.extend(sentences) + + return elements + + def split_text_into_sentences(self, text: str) -> List[str]: + """ + Split text into sentences, preserving markdown structure + + Args: + text: Text to split into sentences + + Returns: + List of sentences + """ + if not text.strip(): + return [] + + sentences = [] + + # Split by lines first to preserve markdown structure (headers, lists, etc.) + lines = text.split('\n') + current_sentence = "" + + for line in lines: + line = line.strip() + + # Empty line - end current sentence and add line break + if not line: + if current_sentence: + sentences.append(current_sentence.strip()) + current_sentence = "" + sentences.append("") # Preserve empty line + continue + + # Markdown headers, lists, or other block elements - treat as separate sentences + if (line.startswith('#') or line.startswith('-') or line.startswith('*') or + line.startswith('1.') or line.startswith('>') or line.startswith('|')): + if current_sentence: + sentences.append(current_sentence.strip()) + current_sentence = "" + sentences.append(line) + continue + + # Regular text - split by sentence endings + # Add current line to sentence + if current_sentence: + current_sentence += " " + line + else: + current_sentence = line + + # Check for sentence endings + sentence_endings = re.finditer(r'[.!?]+(?:\s|$)', current_sentence) + last_end = 0 + + for match in sentence_endings: + end_pos = match.end() + sentence_part = current_sentence[last_end:end_pos].strip() + if sentence_part: + sentences.append(sentence_part) + last_end = end_pos + + # Keep remaining text for next iteration + current_sentence = current_sentence[last_end:].strip() + + # Add any remaining sentence + if current_sentence: + sentences.append(current_sentence.strip()) + + # Filter out empty sentences but preserve intentional empty lines + return [s for s in sentences if s != "" or s == ""] + + + def translate_text_chunk(self, text: str) -> str: + """ + Translate a single text chunk while preserving formatting + """ + if not text.strip(): + return text + + try: + # Calculate appropriate max_tokens + max_tokens = self.calculate_max_tokens(text, is_frontmatter=False) + + messages = [ + { + "role": "system", + "content": self.get_translation_prompt(is_frontmatter=False) + }, + { + "role": "user", + "content": f"Please translate the following content while preserving all formatting, spacing, and line breaks:\n\n{text}" + } + ] + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=0.3, + max_tokens=max_tokens + ) + + translated = response.choices[0].message.content.strip() + + # remove notes like (注:.*?) if dest lang is zh + if self.dest_lang == "zh": + # re.DOTALL flag makes the dot (.) match any character including newlines + # Without DOTALL, dot matches any char except newline + # This ensures we can match and remove notes that span multiple lines + translated = re.sub(r'(注:.*?)', '', translated, flags=re.DOTALL) + + return translated + + except Exception as e: + logger.error(f"Translation failed: {e}") + return text + + def extract_text_and_code_blocks(self, content: str) -> List[Tuple[str, bool]]: + """ + Extract text and code blocks from content, marking each part as text or code + Preserves the original spacing and formatting + + Args: + content: Content to extract from + + Returns: + List of tuples (content_part, is_code_block) + """ + if not content.strip(): + return [(content, False)] + + # Use capturing groups to preserve delimiters and spacing + # This pattern captures code blocks and preserves the text around them + code_block_pattern = r'(```[\s\S]*?```)' + parts = re.split(code_block_pattern, content) + + extracted_parts = [] + for part in parts: + if not part: + continue + + # Check if this part is a code block + is_code_block = part.startswith('```') and part.endswith('```') + extracted_parts.append((part, is_code_block)) + + return extracted_parts + + def translate_chunk_with_code_preservation(self, chunk: str) -> str: + """ + Translate a chunk while preserving code blocks and original formatting + + Args: + chunk: Content chunk to translate + + Returns: + Translated chunk with code blocks preserved + """ + if not chunk.strip(): + return chunk + + # Extract text and code blocks + parts = self.extract_text_and_code_blocks(chunk) + + translated_parts = [] + for part_content, is_code_block in parts: + # logger.info(f"Processing part: {part_content}, is_code_block: {is_code_block}") + if is_code_block: + # Keep code blocks unchanged + logger.debug("Preserving code block without translation") + translated_parts.append(part_content) + else: + # Translate text content + if part_content.strip(): + logger.debug("Translating text content") + # For large text parts, split further if needed + if len(part_content) > 4000: + sub_chunks = self.split_large_text_chunk(part_content, 4000) + translated_sub_parts = [] + for sub_chunk in sub_chunks: + translated_sub_part = self.translate_text_chunk(sub_chunk) + translated_sub_parts.append(translated_sub_part) + # Use original spacing between sub-chunks + translated_part = '\n\n'.join(translated_sub_parts) + else: + translated_part = self.translate_text_chunk(part_content) + translated_parts.append(translated_part) + else: + # Keep whitespace/empty parts unchanged to preserve formatting + translated_parts.append(part_content) + + return '\n\n'.join(translated_parts) + + def split_large_text_chunk(self, text: str, max_length: int) -> List[str]: + """ + Split large text chunk into smaller pieces by paragraphs or sentences + Only handles text content, no code blocks + + Args: + text: Text content to split + max_length: Maximum length for each sub-chunk + + Returns: + List of smaller text chunks + """ + if len(text) <= max_length: + return [text] + + # Split by paragraphs first + paragraphs = text.split('\n\n') + + sub_chunks = [] + current_sub_chunk = "" + + for paragraph in paragraphs: + # If single paragraph is too long, split by sentences + if len(paragraph) > max_length: + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + current_sub_chunk = "" + + # Split long paragraph by sentences + sentences = re.split(r'(?<=[.!?])\s+', paragraph) + current_sentence_chunk = "" + + for sentence in sentences: + potential_length = len(current_sentence_chunk) + len(sentence) + (1 if current_sentence_chunk else 0) + + if potential_length <= max_length: + if current_sentence_chunk: + current_sentence_chunk += " " + sentence + else: + current_sentence_chunk = sentence + else: + if current_sentence_chunk: + sub_chunks.append(current_sentence_chunk.strip()) + current_sentence_chunk = sentence + + if current_sentence_chunk: + sub_chunks.append(current_sentence_chunk.strip()) + else: + # Normal paragraph processing + potential_length = len(current_sub_chunk) + len(paragraph) + (2 if current_sub_chunk else 0) + + if potential_length <= max_length: + if current_sub_chunk: + current_sub_chunk += "\n\n" + paragraph + else: + current_sub_chunk = paragraph + else: + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + current_sub_chunk = paragraph + + # Add the last sub-chunk + if current_sub_chunk: + sub_chunks.append(current_sub_chunk.strip()) + + # Filter out empty chunks + sub_chunks = [chunk for chunk in sub_chunks if chunk.strip()] + + return sub_chunks + + def translate_text(self, text: str) -> str: + """ + Translate text, processing by chunks with clear text/code separation + """ + if not text.strip(): + return text + + # Dynamic threshold based on model capabilities + direct_translation_threshold = 4000 + + # If content is short, translate directly + if len(text) < direct_translation_threshold: + logger.info("Document content is short, translating directly") + return self.translate_chunk_with_code_preservation(text) + + # Split content by H2 headings for better organization + chunks = self.split_content_by_h2(text) + logger.info(f"Document split into {len(chunks)} chunks by H2 headings") + + translated_chunks = [] + for i, chunk in enumerate(chunks): + logger.info(f"Processing chunk {i+1}/{len(chunks)}...") + + # Translate chunk with code preservation + translated_chunk = self.translate_chunk_with_code_preservation(chunk) + translated_chunks.append(translated_chunk) + + # Join all translated chunks + result = '\n\n'.join(translated_chunks) + logger.info(f"Translation completed for all {len(chunks)} chunks") + + return result + + + def translate_mdx_file(self, input_path: str, output_path: str) -> None: + """ + Translate MDX file + """ + logger.info(f"Starting to translate file: {input_path}") + + # Check if file already exists and whether to override + if os.path.exists(output_path) and not self.override_existing: + logger.info(f"File already translated, skipping: {input_path}") + return + elif os.path.exists(output_path) and self.override_existing: + logger.info(f"File already exists, but will override: {input_path}") + + # Read file + with open(input_path, 'r', encoding='utf-8') as f: + content = f.read() + + # Parse frontmatter + frontmatter, main_content = self.parse_frontmatter(content) + + # Translate frontmatter + translated_frontmatter = self.translate_frontmatter(frontmatter) + # # todo: for debug, remove this later + # translated_frontmatter = frontmatter + + # Translate main content + translated_content = self.translate_text(main_content) + + # Reassemble file + if translated_frontmatter: + # write frontmatter before translated content + frontmatter_yaml = yaml.dump(translated_frontmatter, default_flow_style=False, allow_unicode=True) + # add splitter '---' before and after frontmatter + final_content = f"---\n{frontmatter_yaml}---\n{translated_content}" + else: + final_content = translated_content + + # Ensure output directory exists + os.makedirs(os.path.dirname(output_path), exist_ok=True) + + # Write translated file + with open(output_path, 'w', encoding='utf-8') as f: + f.write(final_content) + + logger.info(f"Translation completed, output file: {output_path}") + + def translate_directory(self, input_dir: str, output_dir: str) -> None: + """ + Translate entire directory + """ + input_path = Path(input_dir) + output_path = Path(output_dir) + + if not input_path.exists(): + logger.error(f"Input directory does not exist: {input_dir}") + return + + if not output_path.exists(): + os.makedirs(output_path, exist_ok=True) + + mdx_files = list(input_path.glob("**/*.mdx")) + # Find all yml files + yml_files = list(input_path.glob("**/_category_.yml")) + yaml_files = list(input_path.glob("**/_category_.yaml")) + yml_files.extend(yaml_files) + + # Process YAML files first + for yml_file in yml_files: + # get relative path from input_path to yml_file + relative_path = yml_file.relative_to(input_path) + target_file = output_path / relative_path + + # Ensure the target directory exists + target_file.parent.mkdir(parents=True, exist_ok=True) + # if target_file exists, skip + if target_file.exists() and not self.override_existing: + logger.info(f"Skipping existing YAML file: {yml_file}") + continue + + try: + # Read YAML file with proper loader + with open(yml_file, 'r', encoding='utf-8') as f: + yaml_data = yaml.safe_load(f) + + # translate label if it exists + if yaml_data and 'label' in yaml_data: + original_label = yaml_data['label'] + translated_label = self.translate_frontmatter_field(original_label) + yaml_data['label'] = translated_label + logger.info(f"Translated YAML label: '{original_label}' -> '{translated_label}'") + + # write back to _category_.yml + with open(target_file, 'w', encoding='utf-8') as f: + yaml.dump(yaml_data, f, default_flow_style=False, allow_unicode=True) + + logger.info(f"Processed YAML file: {yml_file} -> {target_file}") + + except Exception as e: + logger.error(f"Failed to process YAML file {yml_file}: {e}") + # Copy original file if translation fails + shutil.copy(yml_file, target_file) + + # Process MDX files + if not mdx_files: + logger.warning(f"No MDX files found in directory {input_dir}") + return + + logger.info(f"Found {len(mdx_files)} MDX files") + + for mdx_file in mdx_files: + # Calculate relative path + relative_path = mdx_file.relative_to(input_path) + # ignore files if in skip_files + if any(skip_file in str(relative_path) for skip_file in self.skip_files): + logger.info(f"Skipping file: {mdx_file.name}, path: {relative_path}") + # Ensure target directory exists before copying + target_file = output_path / relative_path + target_file.parent.mkdir(parents=True, exist_ok=True) + # copy file to output_path + shutil.copy(mdx_file, target_file) + continue + # add progress idx/total files + idx = mdx_files.index(mdx_file) + logger.info(f"Translating file {mdx_file.name}... {idx+1}/{len(mdx_files)}") + output_file = output_path / relative_path + # Translate file + self.translate_mdx_file(str(mdx_file), str(output_file)) + +def main(): + parser = argparse.ArgumentParser(description="MDX file automatic translation tool") + parser.add_argument("--input", "-i", required=True, help="Input directory or file path") + parser.add_argument("--output", "-o", required=True, help="Output directory or file path") + parser.add_argument("--api-key", "-k", required=True, help="API key") + parser.add_argument("--api-type", "-t", choices=["deepseek", "gpt"], default="deepseek", help="API type") + parser.add_argument("--base-url", "-u", help="Custom API base URL") + parser.add_argument("--glossary", "-g", help="Glossary file path") + parser.add_argument("--override", action="store_true", help="Override existing translation files") + parser.add_argument("--source-lang", "-s", default="en", help="Source language code (e.g., en, zh, ja, fr)") + parser.add_argument("--dest-lang", "-d", default="zh", help="Destination language code (e.g., en, zh, ja, fr)") + + args = parser.parse_args() + + # Validate language codes + supported_langs = ["en", "zh", "ja", "fr", "de", "es", "ko", "ru"] + if args.source_lang not in supported_langs: + logger.warning(f"Source language '{args.source_lang}' may not be fully supported. Supported: {supported_langs}") + if args.dest_lang not in supported_langs: + logger.warning(f"Destination language '{args.dest_lang}' may not be fully supported. Supported: {supported_langs}") + + if args.source_lang == args.dest_lang: + logger.error("Source and destination languages cannot be the same") + return + + # Create translator + translator = MDXTranslator( + args.api_key, args.api_type, args.base_url, args.glossary, args.override, + args.source_lang, args.dest_lang + ) + + # Check if input is file or directory + if os.path.isfile(args.input): + translator.translate_mdx_file(args.input, args.output) + elif os.path.isdir(args.input): + translator.translate_directory(args.input, args.output) + else: + logger.error(f"Input path does not exist: {args.input}") + +if __name__ == "__main__": + main() diff --git a/scripts/translate_with_exclude.sh b/scripts/translate_with_exclude.sh new file mode 100755 index 00000000..73e54f67 --- /dev/null +++ b/scripts/translate_with_exclude.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +# Translate folders in specified directory, supports excluding specific directories +# Usage: ./scripts/translate_with_exclude.sh INPUT_DIR OUTPUT_DIR API_KEY [API_TYPE] [EXCLUDE_DIRS...] + +if [ $# -lt 3 ]; then + echo "Usage: $0 INPUT_DIR OUTPUT_DIR API_KEY [API_TYPE] [EXCLUDE_DIRS...]" + echo "Example: $0 docs/en/preview docs/zh/preview YOUR_API_KEY deepseek cli release_notes" + exit 1 +fi + +INPUT_DIR="$1" +OUTPUT_DIR="$2" +API_KEY="$3" +API_TYPE="${4:-deepseek}" + +# Starting from the 5th parameter are directories to exclude +shift 4 +EXCLUDE_DIRS=("$@") + +echo "Input directory: $INPUT_DIR" +echo "Output directory: $OUTPUT_DIR" +echo "API type: $API_TYPE" +echo "Excluded directories: ${EXCLUDE_DIRS[*]}" + +# Check if input directory exists +if [ ! -d "$INPUT_DIR" ]; then + echo "Error: Input directory does not exist: $INPUT_DIR" + exit 1 +fi + +# Install dependencies +echo "Installing Python dependencies..." +python3 -m venv myenv +source myenv/bin/activate +pip install -r scripts/python/requirements.txt --upgrade + +# Get all subdirectories +echo "Scanning directory: $INPUT_DIR" +DIRS_TO_TRANSLATE=() + +for dir in "$INPUT_DIR"/*; do + if [ -d "$dir" ]; then + dir_name=$(basename "$dir") + + # Check if in exclusion list + exclude=false + for exclude_dir in "${EXCLUDE_DIRS[@]}"; do + if [ "$dir_name" = "$exclude_dir" ]; then + exclude=true + break + fi + done + + if [ "$exclude" = false ]; then + DIRS_TO_TRANSLATE+=("$dir_name") + else + echo "Skipping directory: $dir_name" + fi + fi +done + +echo "Found ${#DIRS_TO_TRANSLATE[@]} directories to translate:" +printf '%s\n' "${DIRS_TO_TRANSLATE[@]}" + +# Translate each directory +for dir_name in "${DIRS_TO_TRANSLATE[@]}"; do + input_dir="$INPUT_DIR/$dir_name" + output_dir="$OUTPUT_DIR/$dir_name" + + echo "" + echo "================================" + echo "Translating directory: $dir_name" + echo "Input: $input_dir" + echo "Output: $output_dir" + echo "================================" + + python3 scripts/python/transalate_mdx.py \ + --input "$input_dir" \ + --output "$output_dir" \ + --api-key "$API_KEY" \ + --api-type "$API_TYPE" \ + --glossary "scripts/python/glossary.json" + + if [ $? -eq 0 ]; then + echo "✅ $dir_name translation completed" + else + echo "❌ $dir_name translation failed" + fi +done + +echo "" +echo "🎉 All directories translation completed!" +echo "Translated files saved to: $OUTPUT_DIR" \ No newline at end of file diff --git a/src/app/[locale]/ElevationScrollAppBar.tsx b/src/app/[locale]/ElevationScrollAppBar.tsx index f0285511..136c71bb 100644 --- a/src/app/[locale]/ElevationScrollAppBar.tsx +++ b/src/app/[locale]/ElevationScrollAppBar.tsx @@ -25,7 +25,7 @@ import Link from "next/link"; import SearchModal from "@/components/SearchModal"; import { SlackIconNoColor } from "@/components/icons"; -// import LocaleSwitcher from "./locale-switch"; +import LocaleSwitcher from "./locale-switch"; export const ElevationScrollAppBar = (props: AppBarProps) => { const pathname = usePathname(); @@ -162,7 +162,7 @@ export const ElevationScrollAppBar = (props: AppBarProps) => { - {/* */} + diff --git a/src/app/[locale]/blog/[name]/layout.tsx b/src/app/[locale]/blog/[name]/layout.tsx index 04b3763d..3af3c4a1 100644 --- a/src/app/[locale]/blog/[name]/layout.tsx +++ b/src/app/[locale]/blog/[name]/layout.tsx @@ -9,7 +9,7 @@ export default async function BlogsDetail({ params, }: { children: React.ReactNode; - params: Promise<{ locale: "en" }>; + params: Promise<{ locale: "zh" | "en" }>; }) { const { locale } = await params; setStaticParamsLocale(locale); diff --git a/src/app/[locale]/blog/layout.tsx b/src/app/[locale]/blog/layout.tsx index a2802dc7..5855b9ff 100644 --- a/src/app/[locale]/blog/layout.tsx +++ b/src/app/[locale]/blog/layout.tsx @@ -2,7 +2,7 @@ import Footer from "@/components/Footer"; import { Box } from "@mui/material"; import { setStaticParamsLocale } from "next-international/server"; -export default async function BlogsLayout({ children, params }: { children: React.ReactNode, params: Promise<{ locale: "en" }> }) { +export default async function BlogsLayout({ children, params }: { children: React.ReactNode, params: Promise<{ locale: "zh" | "en" }> }) { const { locale } = await params; setStaticParamsLocale(locale); diff --git a/src/app/[locale]/docs/[version]/[category]/[[...paths]]/layout.tsx b/src/app/[locale]/docs/[version]/[category]/[[...paths]]/layout.tsx index 0348a46f..206a9095 100644 --- a/src/app/[locale]/docs/[version]/[category]/[[...paths]]/layout.tsx +++ b/src/app/[locale]/docs/[version]/[category]/[[...paths]]/layout.tsx @@ -5,7 +5,7 @@ export default async function DocsDetail({ params, }: { children: React.ReactNode; - params: Promise<{ locale: "en" }>; + params: Promise<{ locale: "zh" | "en" }>; }) { const { locale } = await params; setStaticParamsLocale(locale); diff --git a/src/app/[locale]/docs/[version]/[category]/version.tsx b/src/app/[locale]/docs/[version]/[category]/version.tsx index 6c8fea17..c55bec59 100644 --- a/src/app/[locale]/docs/[version]/[category]/version.tsx +++ b/src/app/[locale]/docs/[version]/[category]/version.tsx @@ -28,7 +28,7 @@ export default function VersionList({ version, versions }: Props) { const theme = useTheme(); const pathnames = usePathname() .split("/") - .filter((item) => !_.includes(["en"], item)); + .filter((item) => !_.includes(["zh", "en"], item)); return ( ; + params: Promise<{ locale: "zh" | "en" }>; }) { const { locale } = await params; setStaticParamsLocale(locale); diff --git a/src/app/[locale]/locale-switch.tsx b/src/app/[locale]/locale-switch.tsx index 6af52839..074e7e77 100644 --- a/src/app/[locale]/locale-switch.tsx +++ b/src/app/[locale]/locale-switch.tsx @@ -6,12 +6,13 @@ import { useChangeLocale, useCurrentLocale } from "@/locales/client"; import { DropDown } from "@/components/DropDown"; -type LangType = "en"; +type LangType = "en" | "zh"; const lang: { [key in LangType]: string; } = { en: "English", + zh: "简体中文", } as const; export default function LocaleSwitcher() { diff --git a/src/components/I18nProvider.tsx b/src/components/I18nProvider.tsx index e773c31b..5ed2c68c 100644 --- a/src/components/I18nProvider.tsx +++ b/src/components/I18nProvider.tsx @@ -3,7 +3,7 @@ import { I18nProviderClient } from "@/locales/client"; interface Props { - locale: "en"; + locale: "zh" | "en"; children?: React.ReactNode; } export default function I18nProvider({ locale, children }: Props) { diff --git a/src/components/SidebarMenu.tsx b/src/components/SidebarMenu.tsx index fd9adf9e..39f80ff8 100644 --- a/src/components/SidebarMenu.tsx +++ b/src/components/SidebarMenu.tsx @@ -25,7 +25,7 @@ type SidebatMenuItemProps = { }; const isActive = (pathname: string, href?: string) => { - return href && href === pathname.replace(/^\/(en)/, ""); + return href && href === pathname.replace(/^\/(en|zh)/, ""); }; const checkOpen = (pathname: string, item: SidebarMenuItem): boolean => { diff --git a/src/locales/client.ts b/src/locales/client.ts index 6e9454a4..e2d54516 100644 --- a/src/locales/client.ts +++ b/src/locales/client.ts @@ -10,4 +10,5 @@ export const { useChangeLocale, } = createI18nClient({ en: () => import("./en"), + zh: () => import("./zh"), }); diff --git a/src/locales/server.ts b/src/locales/server.ts index 33388426..728dc9a1 100644 --- a/src/locales/server.ts +++ b/src/locales/server.ts @@ -4,4 +4,5 @@ import { createI18nServer } from "next-international/server"; export const { getI18n, getScopedI18n, getStaticParams, getCurrentLocale } = createI18nServer({ en: () => import("./en"), + zh: () => import("./zh"), }); diff --git a/src/middleware.ts b/src/middleware.ts index 55502457..00a1e35b 100644 --- a/src/middleware.ts +++ b/src/middleware.ts @@ -4,7 +4,7 @@ import { createI18nMiddleware } from "next-international/middleware"; import { NextRequest } from "next/server"; const I18nMiddleware = createI18nMiddleware({ - locales: ["en"], + locales: ["en", "zh"], defaultLocale: "en", urlMappingStrategy: "rewriteDefault", // redirect | rewrite | rewriteDefault }); diff --git a/src/utils/markdown.tsx b/src/utils/markdown.tsx index a881b94c..bf148ed2 100644 --- a/src/utils/markdown.tsx +++ b/src/utils/markdown.tsx @@ -51,7 +51,7 @@ export const getMarkDownSideBar = async ( Object.assign(item, { position: metadata.sidebar_position || metadata.position || 0, label: metadata.sidebar_label || metadata.title, - href: urlPath.replace(/^\/(en)/, "/docs"), + href: urlPath.replace(/^\/(en|zh)/, "/docs"), description: metadata.description, hidden: Boolean(metadata.hidden), }); @@ -83,11 +83,16 @@ export const getMarkDownSideBar = async ( }; export const getMarkDownMetaData = async (filepath: string) => { - const isExists = fs.existsSync(filepath); - if (isExists) { - const { data } = grayMatter(fs.readFileSync(filepath, "utf8")); - return data; - } else { + try { + const isExists = fs.existsSync(filepath); + if (isExists) { + const { data } = grayMatter(fs.readFileSync(filepath, "utf8")); + return data; + } else { + return {}; + } + } catch (error) { + console.error(`Error reading markdown metadata from ${filepath}:`, error); return {}; } }; diff --git a/yarn.lock b/yarn.lock index ea478bd2..638dfbad 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,98 +2,116 @@ # yarn lockfile v1 -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.26.2": - version "7.26.2" - resolved "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" - integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.27.1": + version "7.27.1" + resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be" + integrity sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg== dependencies: - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-validator-identifier" "^7.27.1" js-tokens "^4.0.0" - picocolors "^1.0.0" + picocolors "^1.1.1" -"@babel/generator@^7.26.9": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/generator/-/generator-7.26.9.tgz#75a9482ad3d0cc7188a537aa4910bc59db67cbca" - integrity sha512-kEWdzjOAUMW4hAyrzJ0ZaTOu9OmpyDIQicIh0zg0EEcEkYXZb2TjtBhnHi2ViX7PKwZqF4xwqfAm299/QMP3lg== +"@babel/generator@^7.28.0": + version "7.28.0" + resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.28.0.tgz#9cc2f7bd6eb054d77dc66c2664148a0c5118acd2" + integrity sha512-lJjzvrbEeWrhB4P3QBsH7tey117PjLZnDbLiQEKjQ/fNJTjuq4HSqgFA+UNSwZT8D7dxxbnuSBMsa1lrWzKlQg== dependencies: - "@babel/parser" "^7.26.9" - "@babel/types" "^7.26.9" - "@jridgewell/gen-mapping" "^0.3.5" - "@jridgewell/trace-mapping" "^0.3.25" + "@babel/parser" "^7.28.0" + "@babel/types" "^7.28.0" + "@jridgewell/gen-mapping" "^0.3.12" + "@jridgewell/trace-mapping" "^0.3.28" jsesc "^3.0.2" +"@babel/helper-globals@^7.28.0": + version "7.28.0" + resolved "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz#b9430df2aa4e17bc28665eadeae8aa1d985e6674" + integrity sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw== + "@babel/helper-module-imports@^7.16.7": - version "7.25.9" - resolved "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" - integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== + version "7.27.1" + resolved "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz#7ef769a323e2655e126673bb6d2d6913bbead204" + integrity sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w== dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" -"@babel/helper-string-parser@^7.25.9": - version "7.25.9" - resolved "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" - integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== +"@babel/helper-string-parser@^7.27.1": + version "7.27.1" + resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687" + integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA== -"@babel/helper-validator-identifier@^7.25.9": - version "7.25.9" - resolved "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" - integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== +"@babel/helper-validator-identifier@^7.27.1": + version "7.27.1" + resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz#a7054dcc145a967dd4dc8fee845a57c1316c9df8" + integrity sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow== -"@babel/parser@^7.26.9": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/parser/-/parser-7.26.9.tgz#d9e78bee6dc80f9efd8f2349dcfbbcdace280fd5" - integrity sha512-81NWa1njQblgZbQHxWHpxxCzNsa3ZwvFqpUg7P+NNUU6f3UU2jBEg4OlF/J6rl8+PQGh1q6/zWScd001YwcA5A== +"@babel/parser@^7.27.2", "@babel/parser@^7.28.0": + version "7.28.0" + resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.28.0.tgz#979829fbab51a29e13901e5a80713dbcb840825e" + integrity sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g== dependencies: - "@babel/types" "^7.26.9" + "@babel/types" "^7.28.0" "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.26.0", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.26.9.tgz#aa4c6facc65b9cb3f87d75125ffd47781b475433" - integrity sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/template@^7.26.9": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/template/-/template-7.26.9.tgz#4577ad3ddf43d194528cff4e1fa6b232fa609bb2" - integrity sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/parser" "^7.26.9" - "@babel/types" "^7.26.9" - -"@babel/traverse@^7.25.9": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.26.9.tgz#4398f2394ba66d05d988b2ad13c219a2c857461a" - integrity sha512-ZYW7L+pL8ahU5fXmNbPF+iZFHCv5scFak7MZ9bwaRPLUhHh7QQEMjZUg0HevihoqCM5iSYHN61EyCoZvqC+bxg== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.9" - "@babel/parser" "^7.26.9" - "@babel/template" "^7.26.9" - "@babel/types" "^7.26.9" + version "7.27.6" + resolved "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz#ec4070a04d76bae8ddbb10770ba55714a417b7c6" + integrity sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q== + +"@babel/template@^7.27.2": + version "7.27.2" + resolved "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz#fa78ceed3c4e7b63ebf6cb39e5852fca45f6809d" + integrity sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/parser" "^7.27.2" + "@babel/types" "^7.27.1" + +"@babel/traverse@^7.27.1": + version "7.28.0" + resolved "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.0.tgz#518aa113359b062042379e333db18380b537e34b" + integrity sha512-mGe7UK5wWyh0bKRfupsUchrQGqvDbZDbKJw+kcRGSmdHVYrv+ltd0pnpDTVpiTqnaBru9iEvA8pz8W46v0Amwg== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.28.0" + "@babel/helper-globals" "^7.28.0" + "@babel/parser" "^7.28.0" + "@babel/template" "^7.27.2" + "@babel/types" "^7.28.0" debug "^4.3.1" - globals "^11.1.0" -"@babel/types@^7.25.9", "@babel/types@^7.26.9": - version "7.26.9" - resolved "https://registry.npmmirror.com/@babel/types/-/types-7.26.9.tgz#08b43dec79ee8e682c2ac631c010bdcac54a21ce" - integrity sha512-Y3IR1cRnOxOCDvMmNiym7XpXQ93iGDDPHx+Zj+NM+rg0fBaShfQLkg+hKPaZCEvg5N/LeCo4+Rj/i3FuJsIQaw== +"@babel/types@^7.27.1", "@babel/types@^7.28.0": + version "7.28.1" + resolved "https://registry.npmjs.org/@babel/types/-/types-7.28.1.tgz#2aaf3c10b31ba03a77ac84f52b3912a0edef4cf9" + integrity sha512-x0LvFTekgSX+83TI28Y9wYPUfzrnl2aT5+5QLnO6v7mSJYtEEevuDRN0F0uSHRk1G1IWZC43o00Y0xDDrpBGPQ== dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-string-parser" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" -"@emnapi/runtime@^1.2.0": - version "1.3.1" - resolved "https://registry.npmmirror.com/@emnapi/runtime/-/runtime-1.3.1.tgz#0fcaa575afc31f455fd33534c19381cfce6c6f60" - integrity sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw== +"@emnapi/core@^1.4.3": + version "1.4.4" + resolved "https://registry.npmjs.org/@emnapi/core/-/core-1.4.4.tgz#76620673f3033626c6d79b1420d69f06a6bb153c" + integrity sha512-A9CnAbC6ARNMKcIcrQwq6HeHCjpcBZ5wSx4U01WXCqEKlrzB9F9315WDNHkrs2xbx7YjjSxbUYxuN6EQzpcY2g== + dependencies: + "@emnapi/wasi-threads" "1.0.3" + tslib "^2.4.0" + +"@emnapi/runtime@^1.2.0", "@emnapi/runtime@^1.4.3": + version "1.4.4" + resolved "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.4.tgz#19a8f00719c51124e2d0fbf4aaad3fa7b0c92524" + integrity sha512-hHyapA4A3gPaDCNfiqyZUStTMqIkKRshqPIuDOXv1hcBnD4U3l8cP0T1HMCfGRxQ6V64TGCcoswChANyOAwbQg== + dependencies: + tslib "^2.4.0" + +"@emnapi/wasi-threads@1.0.3": + version "1.0.3" + resolved "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.0.3.tgz#83fa228bde0e71668aad6db1af4937473d1d3ab1" + integrity sha512-8K5IFFsQqF9wQNJptGbS6FNKgUTsSRYnTqNCG1vPP8jFdjSv18n2mQfJpkt2Oibo9iBEzcDnDxNwKTzC7svlJw== dependencies: tslib "^2.4.0" "@emotion/babel-plugin@^11.13.5": version "11.13.5" - resolved "https://registry.npmmirror.com/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz#eab8d65dbded74e0ecfd28dc218e75607c4e7bc0" + resolved "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz#eab8d65dbded74e0ecfd28dc218e75607c4e7bc0" integrity sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ== dependencies: "@babel/helper-module-imports" "^7.16.7" @@ -110,7 +128,7 @@ "@emotion/cache@^11.13.5", "@emotion/cache@^11.14.0": version "11.14.0" - resolved "https://registry.npmmirror.com/@emotion/cache/-/cache-11.14.0.tgz#ee44b26986eeb93c8be82bb92f1f7a9b21b2ed76" + resolved "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz#ee44b26986eeb93c8be82bb92f1f7a9b21b2ed76" integrity sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA== dependencies: "@emotion/memoize" "^0.9.0" @@ -121,24 +139,24 @@ "@emotion/hash@^0.9.2": version "0.9.2" - resolved "https://registry.npmmirror.com/@emotion/hash/-/hash-0.9.2.tgz#ff9221b9f58b4dfe61e619a7788734bd63f6898b" + resolved "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz#ff9221b9f58b4dfe61e619a7788734bd63f6898b" integrity sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g== "@emotion/is-prop-valid@^1.3.0": version "1.3.1" - resolved "https://registry.npmmirror.com/@emotion/is-prop-valid/-/is-prop-valid-1.3.1.tgz#8d5cf1132f836d7adbe42cf0b49df7816fc88240" + resolved "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.3.1.tgz#8d5cf1132f836d7adbe42cf0b49df7816fc88240" integrity sha512-/ACwoqx7XQi9knQs/G0qKvv5teDMhD7bXYns9N/wM8ah8iNb8jZ2uNO0YOgiq2o2poIvVtJS2YALasQuMSQ7Kw== dependencies: "@emotion/memoize" "^0.9.0" "@emotion/memoize@^0.9.0": version "0.9.0" - resolved "https://registry.npmmirror.com/@emotion/memoize/-/memoize-0.9.0.tgz#745969d649977776b43fc7648c556aaa462b4102" + resolved "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz#745969d649977776b43fc7648c556aaa462b4102" integrity sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ== "@emotion/react@^11.14.0": version "11.14.0" - resolved "https://registry.npmmirror.com/@emotion/react/-/react-11.14.0.tgz#cfaae35ebc67dd9ef4ea2e9acc6cd29e157dd05d" + resolved "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz#cfaae35ebc67dd9ef4ea2e9acc6cd29e157dd05d" integrity sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA== dependencies: "@babel/runtime" "^7.18.3" @@ -152,7 +170,7 @@ "@emotion/serialize@^1.3.3": version "1.3.3" - resolved "https://registry.npmmirror.com/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" + resolved "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" integrity sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA== dependencies: "@emotion/hash" "^0.9.2" @@ -163,13 +181,13 @@ "@emotion/sheet@^1.4.0": version "1.4.0" - resolved "https://registry.npmmirror.com/@emotion/sheet/-/sheet-1.4.0.tgz#c9299c34d248bc26e82563735f78953d2efca83c" + resolved "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz#c9299c34d248bc26e82563735f78953d2efca83c" integrity sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg== "@emotion/styled@^11.14.0": - version "11.14.0" - resolved "https://registry.npmmirror.com/@emotion/styled/-/styled-11.14.0.tgz#f47ca7219b1a295186d7661583376fcea95f0ff3" - integrity sha512-XxfOnXFffatap2IyCeJyNov3kiDQWoR08gPUQxvbL7fxKryGBKUZUkG6Hz48DZwVrJSVh9sJboyV1Ds4OW6SgA== + version "11.14.1" + resolved "https://registry.npmjs.org/@emotion/styled/-/styled-11.14.1.tgz#8c34bed2948e83e1980370305614c20955aacd1c" + integrity sha512-qEEJt42DuToa3gurlH4Qqc1kVpNq8wO8cJtDzU46TjlzWjDlsVyevtYCRijVq3SrHsROS+gVQ8Fnea108GnKzw== dependencies: "@babel/runtime" "^7.18.3" "@emotion/babel-plugin" "^11.13.5" @@ -180,63 +198,61 @@ "@emotion/unitless@^0.10.0": version "0.10.0" - resolved "https://registry.npmmirror.com/@emotion/unitless/-/unitless-0.10.0.tgz#2af2f7c7e5150f497bdabd848ce7b218a27cf745" + resolved "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz#2af2f7c7e5150f497bdabd848ce7b218a27cf745" integrity sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg== "@emotion/use-insertion-effect-with-fallbacks@^1.2.0": version "1.2.0" - resolved "https://registry.npmmirror.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz#8a8cb77b590e09affb960f4ff1e9a89e532738bf" + resolved "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz#8a8cb77b590e09affb960f4ff1e9a89e532738bf" integrity sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg== "@emotion/utils@^1.4.2": version "1.4.2" - resolved "https://registry.npmmirror.com/@emotion/utils/-/utils-1.4.2.tgz#6df6c45881fcb1c412d6688a311a98b7f59c1b52" + resolved "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz#6df6c45881fcb1c412d6688a311a98b7f59c1b52" integrity sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA== "@emotion/weak-memoize@^0.4.0": version "0.4.0" - resolved "https://registry.npmmirror.com/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz#5e13fac887f08c44f76b0ccaf3370eb00fec9bb6" + resolved "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz#5e13fac887f08c44f76b0ccaf3370eb00fec9bb6" integrity sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg== -"@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": - version "4.4.1" - resolved "https://registry.npmmirror.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz#d1145bf2c20132d6400495d6df4bf59362fd9d56" - integrity sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA== +"@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.7.0": + version "4.7.0" + resolved "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz#607084630c6c033992a082de6e6fbc1a8b52175a" + integrity sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw== dependencies: eslint-visitor-keys "^3.4.3" "@eslint-community/regexpp@^4.10.0", "@eslint-community/regexpp@^4.12.1": version "4.12.1" - resolved "https://registry.npmmirror.com/@eslint-community/regexpp/-/regexpp-4.12.1.tgz#cfc6cffe39df390a3841cde2abccf92eaa7ae0e0" + resolved "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz#cfc6cffe39df390a3841cde2abccf92eaa7ae0e0" integrity sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ== -"@eslint/config-array@^0.19.0": - version "0.19.2" - resolved "https://registry.npmmirror.com/@eslint/config-array/-/config-array-0.19.2.tgz#3060b809e111abfc97adb0bb1172778b90cb46aa" - integrity sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w== +"@eslint/config-array@^0.21.0": + version "0.21.0" + resolved "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz#abdbcbd16b124c638081766392a4d6b509f72636" + integrity sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ== dependencies: "@eslint/object-schema" "^2.1.6" debug "^4.3.1" minimatch "^3.1.2" -"@eslint/core@^0.10.0": - version "0.10.0" - resolved "https://registry.npmmirror.com/@eslint/core/-/core-0.10.0.tgz#23727063c21b335f752dbb3a16450f6f9cbc9091" - integrity sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw== - dependencies: - "@types/json-schema" "^7.0.15" +"@eslint/config-helpers@^0.3.0": + version "0.3.0" + resolved "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.0.tgz#3e09a90dfb87e0005c7694791e58e97077271286" + integrity sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw== -"@eslint/core@^0.11.0": - version "0.11.0" - resolved "https://registry.npmmirror.com/@eslint/core/-/core-0.11.0.tgz#7a9226e850922e42cbd2ba71361eacbe74352a12" - integrity sha512-DWUB2pksgNEb6Bz2fggIy1wh6fGgZP4Xyy/Mt0QZPiloKKXerbqq9D3SBQTlCRYOrcRPu4vuz+CGjwdfqxnoWA== +"@eslint/core@^0.15.0", "@eslint/core@^0.15.1": + version "0.15.1" + resolved "https://registry.npmjs.org/@eslint/core/-/core-0.15.1.tgz#d530d44209cbfe2f82ef86d6ba08760196dd3b60" + integrity sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA== dependencies: "@types/json-schema" "^7.0.15" -"@eslint/eslintrc@^3", "@eslint/eslintrc@^3.2.0": - version "3.2.0" - resolved "https://registry.npmmirror.com/@eslint/eslintrc/-/eslintrc-3.2.0.tgz#57470ac4e2e283a6bf76044d63281196e370542c" - integrity sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w== +"@eslint/eslintrc@^3", "@eslint/eslintrc@^3.3.1": + version "3.3.1" + resolved "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz#e55f7f1dd400600dd066dbba349c4c0bac916964" + integrity sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ== dependencies: ajv "^6.12.4" debug "^4.3.2" @@ -248,32 +264,32 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@9.20.0": - version "9.20.0" - resolved "https://registry.npmmirror.com/@eslint/js/-/js-9.20.0.tgz#7421bcbe74889fcd65d1be59f00130c289856eb4" - integrity sha512-iZA07H9io9Wn836aVTytRaNqh00Sad+EamwOVJT12GTLw1VGMFV/4JaME+JjLtr9fiGaoWgYnS54wrfWsSs4oQ== +"@eslint/js@9.31.0": + version "9.31.0" + resolved "https://registry.npmjs.org/@eslint/js/-/js-9.31.0.tgz#adb1f39953d8c475c4384b67b67541b0d7206ed8" + integrity sha512-LOm5OVt7D4qiKCqoiPbA7LWmI+tbw1VbTUowBcUMgQSuM6poJufkFkYDcQpo5KfgD39TnNySV26QjOh7VFpSyw== "@eslint/object-schema@^2.1.6": version "2.1.6" - resolved "https://registry.npmmirror.com/@eslint/object-schema/-/object-schema-2.1.6.tgz#58369ab5b5b3ca117880c0f6c0b0f32f6950f24f" + resolved "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz#58369ab5b5b3ca117880c0f6c0b0f32f6950f24f" integrity sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA== -"@eslint/plugin-kit@^0.2.5": - version "0.2.5" - resolved "https://registry.npmmirror.com/@eslint/plugin-kit/-/plugin-kit-0.2.5.tgz#ee07372035539e7847ef834e3f5e7b79f09e3a81" - integrity sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A== +"@eslint/plugin-kit@^0.3.1": + version "0.3.3" + resolved "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.3.tgz#32926b59bd407d58d817941e48b2a7049359b1fd" + integrity sha512-1+WqvgNMhmlAambTvT3KPtCl/Ibr68VldY2XY40SL1CE0ZXiakFR/cbTspaF5HsnpDMvcYYoJHfl4980NBjGag== dependencies: - "@eslint/core" "^0.10.0" + "@eslint/core" "^0.15.1" levn "^0.4.1" "@humanfs/core@^0.19.1": version "0.19.1" - resolved "https://registry.npmmirror.com/@humanfs/core/-/core-0.19.1.tgz#17c55ca7d426733fe3c561906b8173c336b40a77" + resolved "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz#17c55ca7d426733fe3c561906b8173c336b40a77" integrity sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== "@humanfs/node@^0.16.6": version "0.16.6" - resolved "https://registry.npmmirror.com/@humanfs/node/-/node-0.16.6.tgz#ee2a10eaabd1131987bf0488fd9b820174cd765e" + resolved "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz#ee2a10eaabd1131987bf0488fd9b820174cd765e" integrity sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw== dependencies: "@humanfs/core" "^0.19.1" @@ -281,172 +297,166 @@ "@humanwhocodes/module-importer@^1.0.1": version "1.0.1" - resolved "https://registry.npmmirror.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c" + resolved "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c" integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== "@humanwhocodes/retry@^0.3.0": version "0.3.1" - resolved "https://registry.npmmirror.com/@humanwhocodes/retry/-/retry-0.3.1.tgz#c72a5c76a9fbaf3488e231b13dc52c0da7bab42a" + resolved "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz#c72a5c76a9fbaf3488e231b13dc52c0da7bab42a" integrity sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA== -"@humanwhocodes/retry@^0.4.1": - version "0.4.1" - resolved "https://registry.npmmirror.com/@humanwhocodes/retry/-/retry-0.4.1.tgz#9a96ce501bc62df46c4031fbd970e3cc6b10f07b" - integrity sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA== +"@humanwhocodes/retry@^0.4.2": + version "0.4.3" + resolved "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz#c2b9d2e374ee62c586d3adbea87199b1d7a7a6ba" + integrity sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ== "@img/sharp-darwin-arm64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz#ef5b5a07862805f1e8145a377c8ba6e98813ca08" + resolved "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz#ef5b5a07862805f1e8145a377c8ba6e98813ca08" integrity sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ== optionalDependencies: "@img/sharp-libvips-darwin-arm64" "1.0.4" "@img/sharp-darwin-x64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz#e03d3451cd9e664faa72948cc70a403ea4063d61" + resolved "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz#e03d3451cd9e664faa72948cc70a403ea4063d61" integrity sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q== optionalDependencies: "@img/sharp-libvips-darwin-x64" "1.0.4" "@img/sharp-libvips-darwin-arm64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz#447c5026700c01a993c7804eb8af5f6e9868c07f" + resolved "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz#447c5026700c01a993c7804eb8af5f6e9868c07f" integrity sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg== "@img/sharp-libvips-darwin-x64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz#e0456f8f7c623f9dbfbdc77383caa72281d86062" + resolved "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz#e0456f8f7c623f9dbfbdc77383caa72281d86062" integrity sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ== "@img/sharp-libvips-linux-arm64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz#979b1c66c9a91f7ff2893556ef267f90ebe51704" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz#979b1c66c9a91f7ff2893556ef267f90ebe51704" integrity sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA== "@img/sharp-libvips-linux-arm@1.0.5": version "1.0.5" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz#99f922d4e15216ec205dcb6891b721bfd2884197" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz#99f922d4e15216ec205dcb6891b721bfd2884197" integrity sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g== "@img/sharp-libvips-linux-s390x@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz#f8a5eb1f374a082f72b3f45e2fb25b8118a8a5ce" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz#f8a5eb1f374a082f72b3f45e2fb25b8118a8a5ce" integrity sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA== "@img/sharp-libvips-linux-x64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz#d4c4619cdd157774906e15770ee119931c7ef5e0" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz#d4c4619cdd157774906e15770ee119931c7ef5e0" integrity sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw== "@img/sharp-libvips-linuxmusl-arm64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz#166778da0f48dd2bded1fa3033cee6b588f0d5d5" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz#166778da0f48dd2bded1fa3033cee6b588f0d5d5" integrity sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA== "@img/sharp-libvips-linuxmusl-x64@1.0.4": version "1.0.4" - resolved "https://registry.npmmirror.com/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz#93794e4d7720b077fcad3e02982f2f1c246751ff" + resolved "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz#93794e4d7720b077fcad3e02982f2f1c246751ff" integrity sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw== "@img/sharp-linux-arm64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz#edb0697e7a8279c9fc829a60fc35644c4839bb22" + resolved "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz#edb0697e7a8279c9fc829a60fc35644c4839bb22" integrity sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA== optionalDependencies: "@img/sharp-libvips-linux-arm64" "1.0.4" "@img/sharp-linux-arm@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz#422c1a352e7b5832842577dc51602bcd5b6f5eff" + resolved "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz#422c1a352e7b5832842577dc51602bcd5b6f5eff" integrity sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ== optionalDependencies: "@img/sharp-libvips-linux-arm" "1.0.5" "@img/sharp-linux-s390x@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz#f5c077926b48e97e4a04d004dfaf175972059667" + resolved "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz#f5c077926b48e97e4a04d004dfaf175972059667" integrity sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q== optionalDependencies: "@img/sharp-libvips-linux-s390x" "1.0.4" "@img/sharp-linux-x64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz#d806e0afd71ae6775cc87f0da8f2d03a7c2209cb" + resolved "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz#d806e0afd71ae6775cc87f0da8f2d03a7c2209cb" integrity sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA== optionalDependencies: "@img/sharp-libvips-linux-x64" "1.0.4" "@img/sharp-linuxmusl-arm64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz#252975b915894fb315af5deea174651e208d3d6b" + resolved "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz#252975b915894fb315af5deea174651e208d3d6b" integrity sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g== optionalDependencies: "@img/sharp-libvips-linuxmusl-arm64" "1.0.4" "@img/sharp-linuxmusl-x64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz#3f4609ac5d8ef8ec7dadee80b560961a60fd4f48" + resolved "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz#3f4609ac5d8ef8ec7dadee80b560961a60fd4f48" integrity sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw== optionalDependencies: "@img/sharp-libvips-linuxmusl-x64" "1.0.4" "@img/sharp-wasm32@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz#6f44f3283069d935bb5ca5813153572f3e6f61a1" + resolved "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz#6f44f3283069d935bb5ca5813153572f3e6f61a1" integrity sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg== dependencies: "@emnapi/runtime" "^1.2.0" "@img/sharp-win32-ia32@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz#1a0c839a40c5351e9885628c85f2e5dfd02b52a9" + resolved "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz#1a0c839a40c5351e9885628c85f2e5dfd02b52a9" integrity sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ== "@img/sharp-win32-x64@0.33.5": version "0.33.5" - resolved "https://registry.npmmirror.com/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz#56f00962ff0c4e0eb93d34a047d29fa995e3e342" + resolved "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz#56f00962ff0c4e0eb93d34a047d29fa995e3e342" integrity sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg== -"@jridgewell/gen-mapping@^0.3.5": - version "0.3.8" - resolved "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz#4f0e06362e01362f823d348f1872b08f666d8142" - integrity sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA== +"@jridgewell/gen-mapping@^0.3.12": + version "0.3.12" + resolved "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.12.tgz#2234ce26c62889f03db3d7fea43c1932ab3e927b" + integrity sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg== dependencies: - "@jridgewell/set-array" "^1.2.1" - "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/sourcemap-codec" "^1.5.0" "@jridgewell/trace-mapping" "^0.3.24" "@jridgewell/resolve-uri@^3.1.0": version "3.1.2" - resolved "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + resolved "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== -"@jridgewell/set-array@^1.2.1": - version "1.2.1" - resolved "https://registry.npmmirror.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" - integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== - -"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": - version "1.5.0" - resolved "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" - integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== +"@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.5.0": + version "1.5.4" + resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.4.tgz#7358043433b2e5da569aa02cbc4c121da3af27d7" + integrity sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw== -"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": - version "0.3.25" - resolved "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" - integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== +"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.28": + version "0.3.29" + resolved "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.29.tgz#a58d31eaadaf92c6695680b2e1d464a9b8fbf7fc" + integrity sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ== dependencies: "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" "@jsdevtools/rehype-toc@^3.0.2": version "3.0.2" - resolved "https://registry.npmmirror.com/@jsdevtools/rehype-toc/-/rehype-toc-3.0.2.tgz#29c32e6b40cd4b5dafd96cb90d5057ac5dab4a51" + resolved "https://registry.npmjs.org/@jsdevtools/rehype-toc/-/rehype-toc-3.0.2.tgz#29c32e6b40cd4b5dafd96cb90d5057ac5dab4a51" integrity sha512-n5JEf16Wr4mdkRMZ8wMP/wN9/sHmTjRPbouXjJH371mZ2LEGDl72t8tEsMRNFerQN/QJtivOxqK1frdGa4QK5Q== "@mdx-js/loader@^3.1.0": version "3.1.0" - resolved "https://registry.npmmirror.com/@mdx-js/loader/-/loader-3.1.0.tgz#715fdab11d0c9567e45049c16a7d9c83cec88214" + resolved "https://registry.npmjs.org/@mdx-js/loader/-/loader-3.1.0.tgz#715fdab11d0c9567e45049c16a7d9c83cec88214" integrity sha512-xU/lwKdOyfXtQGqn3VnJjlDrmKXEvMi1mgYxVmukEUtVycIz1nh7oQ40bKTd4cA7rLStqu0740pnhGYxGoqsCg== dependencies: "@mdx-js/mdx" "^3.0.0" @@ -454,7 +464,7 @@ "@mdx-js/mdx@^3.0.0": version "3.1.0" - resolved "https://registry.npmmirror.com/@mdx-js/mdx/-/mdx-3.1.0.tgz#10235cab8ad7d356c262e8c21c68df5850a97dc3" + resolved "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.0.tgz#10235cab8ad7d356c262e8c21c68df5850a97dc3" integrity sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw== dependencies: "@types/estree" "^1.0.0" @@ -484,40 +494,40 @@ "@mdx-js/react@^3.1.0": version "3.1.0" - resolved "https://registry.npmmirror.com/@mdx-js/react/-/react-3.1.0.tgz#c4522e335b3897b9a845db1dbdd2f966ae8fb0ed" + resolved "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz#c4522e335b3897b9a845db1dbdd2f966ae8fb0ed" integrity sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ== dependencies: "@types/mdx" "^2.0.0" -"@mui/core-downloads-tracker@^6.4.4": - version "6.4.4" - resolved "https://registry.npmmirror.com/@mui/core-downloads-tracker/-/core-downloads-tracker-6.4.4.tgz#7ea43a1185e9cb1dbec77b9fc543b3d8a338d1f0" - integrity sha512-r+J0EditrekkTtO2CnCBCOGpNaDYwJqz8lH4rj6o/anDcskZFJodBlG8aCJkS8DL/CF/9EHS+Gz53EbmYEnQbw== +"@mui/core-downloads-tracker@^6.5.0": + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-6.5.0.tgz#e9f7049d7e7bb1ee05839f7a0ce813755f137432" + integrity sha512-LGb8t8i6M2ZtS3Drn3GbTI1DVhDY6FJ9crEey2lZ0aN2EMZo8IZBZj9wRf4vqbZHaWjsYgtbOnJw5V8UWbmK2Q== "@mui/icons-material@^6.4.4": - version "6.4.4" - resolved "https://registry.npmmirror.com/@mui/icons-material/-/icons-material-6.4.4.tgz#cbc2c9e9c9bbb66f4062085dc69c0b886cd7f888" - integrity sha512-uF1chGaoFmYdRUomK6f8kgJfWosk9A3HXWiVD0vQm+2mE7f25eTQ1E8RRO11LXpnUBqu8Rbv/uGlpnjT/u1Ksg== + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/icons-material/-/icons-material-6.5.0.tgz#26bfa7c8574cc4e57c2f2835bfd6b1efa7f310fa" + integrity sha512-VPuPqXqbBPlcVSA0BmnoE4knW4/xG6Thazo8vCLWkOKusko6DtwFV6B665MMWJ9j0KFohTIf3yx2zYtYacvG1g== dependencies: "@babel/runtime" "^7.26.0" "@mui/material-nextjs@^6.4.3": - version "6.4.3" - resolved "https://registry.npmmirror.com/@mui/material-nextjs/-/material-nextjs-6.4.3.tgz#516eec4b6ca7d79f196f5c8b1b7f91e69e5c95ab" - integrity sha512-4ZRLrcD1HeWpvY8c7MrKYKuaUSobtvqcLYeEfGh/x5ezzPgKizhl7C0jpVVEgf6g+C9OgOGbhLTVfks7Y2IBAQ== + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/material-nextjs/-/material-nextjs-6.5.0.tgz#740993e8175c27e4dd7bd8cec2c1a17e49081d5e" + integrity sha512-VV+4BhmnY32pbPuIevs+rPl0R+bkVvqGCqRqZqNhkdBdX0pn1IHQ5mG/EfYJKoKUkF7q9FKSag5wduSTUxDqnQ== dependencies: "@babel/runtime" "^7.26.0" "@mui/material@^6.4.4": - version "6.4.4" - resolved "https://registry.npmmirror.com/@mui/material/-/material-6.4.4.tgz#fd2879cae5f3ed8a13b6257dbc051e0bbeefa8ee" - integrity sha512-ISVPrIsPQsxnwvS40C4u03AuNSPigFeS2+n1qpuEZ94hDsdMi19dQM2JcC9CHEhXecSIQjP1RTyY0mPiSpSrFQ== + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/material/-/material-6.5.0.tgz#c7eccfe260030433c51b7aec17574bae4504cacc" + integrity sha512-yjvtXoFcrPLGtgKRxFaH6OQPtcLPhkloC0BML6rBG5UeldR0nPULR/2E2BfXdo5JNV7j7lOzrrLX2Qf/iSidow== dependencies: "@babel/runtime" "^7.26.0" - "@mui/core-downloads-tracker" "^6.4.4" - "@mui/system" "^6.4.3" - "@mui/types" "^7.2.21" - "@mui/utils" "^6.4.3" + "@mui/core-downloads-tracker" "^6.5.0" + "@mui/system" "^6.5.0" + "@mui/types" "~7.2.24" + "@mui/utils" "^6.4.9" "@popperjs/core" "^2.11.8" "@types/react-transition-group" "^4.4.12" clsx "^2.1.1" @@ -526,19 +536,19 @@ react-is "^19.0.0" react-transition-group "^4.4.5" -"@mui/private-theming@^6.4.3": - version "6.4.3" - resolved "https://registry.npmmirror.com/@mui/private-theming/-/private-theming-6.4.3.tgz#40d7d95316e9e52d465f0c96da23f9fb8f6a989f" - integrity sha512-7x9HaNwDCeoERc4BoEWLieuzKzXu5ZrhRnEM6AUcRXUScQLvF1NFkTlP59+IJfTbEMgcGg1wWHApyoqcksrBpQ== +"@mui/private-theming@^6.4.9": + version "6.4.9" + resolved "https://registry.npmjs.org/@mui/private-theming/-/private-theming-6.4.9.tgz#0c1d65a638a1740aad0eb715d79e76471abe8175" + integrity sha512-LktcVmI5X17/Q5SkwjCcdOLBzt1hXuc14jYa7NPShog0GBDCDvKtcnP0V7a2s6EiVRlv7BzbWEJzH6+l/zaCxw== dependencies: "@babel/runtime" "^7.26.0" - "@mui/utils" "^6.4.3" + "@mui/utils" "^6.4.9" prop-types "^15.8.1" -"@mui/styled-engine@^6.4.3": - version "6.4.3" - resolved "https://registry.npmmirror.com/@mui/styled-engine/-/styled-engine-6.4.3.tgz#fbd7a6b925dfaeaa84ffbf8ed9be78a0ff0b3d6e" - integrity sha512-OC402VfK+ra2+f12Gef8maY7Y9n7B6CZcoQ9u7mIkh/7PKwW/xH81xwX+yW+Ak1zBT3HYcVjh2X82k5cKMFGoQ== +"@mui/styled-engine@^6.5.0": + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-6.5.0.tgz#cf9b3e706517f5f2989df92d2aea0d2917a77c8a" + integrity sha512-8woC2zAqF4qUDSPIBZ8v3sakj+WgweolpyM/FXf8jAx6FMls+IE4Y8VDZc+zS805J7PRz31vz73n2SovKGaYgw== dependencies: "@babel/runtime" "^7.26.0" "@emotion/cache" "^11.13.5" @@ -547,99 +557,108 @@ csstype "^3.1.3" prop-types "^15.8.1" -"@mui/system@^6.4.3": - version "6.4.3" - resolved "https://registry.npmmirror.com/@mui/system/-/system-6.4.3.tgz#f1e093850c8cc23c6605297c8a4134bea6fe290b" - integrity sha512-Q0iDwnH3+xoxQ0pqVbt8hFdzhq1g2XzzR4Y5pVcICTNtoCLJmpJS3vI4y/OIM1FHFmpfmiEC2IRIq7YcZ8nsmg== +"@mui/system@^6.5.0": + version "6.5.0" + resolved "https://registry.npmjs.org/@mui/system/-/system-6.5.0.tgz#52751ac4e3a546f53bc34fd2ef2731c28a824b92" + integrity sha512-XcbBYxDS+h/lgsoGe78ExXFZXtuIlSBpn/KsZq8PtZcIkUNJInkuDqcLd2rVBQrDC1u+rvVovdaWPf2FHKJf3w== dependencies: "@babel/runtime" "^7.26.0" - "@mui/private-theming" "^6.4.3" - "@mui/styled-engine" "^6.4.3" - "@mui/types" "^7.2.21" - "@mui/utils" "^6.4.3" + "@mui/private-theming" "^6.4.9" + "@mui/styled-engine" "^6.5.0" + "@mui/types" "~7.2.24" + "@mui/utils" "^6.4.9" clsx "^2.1.1" csstype "^3.1.3" prop-types "^15.8.1" -"@mui/types@^7.2.21": - version "7.2.21" - resolved "https://registry.npmmirror.com/@mui/types/-/types-7.2.21.tgz#63f50874eda8e4a021a69aaa8ba9597369befda2" - integrity sha512-6HstngiUxNqLU+/DPqlUJDIPbzUBxIVHb1MmXP0eTWDIROiCR2viugXpEif0PPe2mLqqakPzzRClWAnK+8UJww== +"@mui/types@~7.2.24": + version "7.2.24" + resolved "https://registry.npmjs.org/@mui/types/-/types-7.2.24.tgz#5eff63129d9c29d80bbf2d2e561bd0690314dec2" + integrity sha512-3c8tRt/CbWZ+pEg7QpSwbdxOk36EfmhbKf6AGZsD1EcLDLTSZoxxJ86FVtcjxvjuhdyBiWKSTGZFaXCnidO2kw== -"@mui/utils@^6.4.3": - version "6.4.3" - resolved "https://registry.npmmirror.com/@mui/utils/-/utils-6.4.3.tgz#e08bc3a5ae1552a48dd13ddc7c65e3eebdb4cd58" - integrity sha512-jxHRHh3BqVXE9ABxDm+Tc3wlBooYz/4XPa0+4AI+iF38rV1/+btJmSUgG4shDtSWVs/I97aDn5jBCt6SF2Uq2A== +"@mui/utils@^6.4.9": + version "6.4.9" + resolved "https://registry.npmjs.org/@mui/utils/-/utils-6.4.9.tgz#b0df01daa254c7c32a1a30b30a5179e19ef071a7" + integrity sha512-Y12Q9hbK9g+ZY0T3Rxrx9m2m10gaphDuUMgWxyV5kNJevVxXYCLclYUCC9vXaIk1/NdNDTcW2Yfr2OGvNFNmHg== dependencies: "@babel/runtime" "^7.26.0" - "@mui/types" "^7.2.21" + "@mui/types" "~7.2.24" "@types/prop-types" "^15.7.14" clsx "^2.1.1" prop-types "^15.8.1" react-is "^19.0.0" -"@next/env@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/env/-/env-15.1.7.tgz#14e2678f893aec50ff2dcb7a6665092fb9e1263d" - integrity sha512-d9jnRrkuOH7Mhi+LHav2XW91HOgTAWHxjMPkXMGBc9B2b7614P7kjt8tAplRvJpbSt4nbO1lugcT/kAaWzjlLQ== +"@napi-rs/wasm-runtime@^0.2.11": + version "0.2.12" + resolved "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz#3e78a8b96e6c33a6c517e1894efbd5385a7cb6f2" + integrity sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ== + dependencies: + "@emnapi/core" "^1.4.3" + "@emnapi/runtime" "^1.4.3" + "@tybys/wasm-util" "^0.10.0" + +"@next/env@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/env/-/env-15.2.4.tgz#060f8d8ddb02be5c825eab4ccd9ab619001efffb" + integrity sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g== -"@next/eslint-plugin-next@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/eslint-plugin-next/-/eslint-plugin-next-15.1.7.tgz#f8593c714f51ac6d443fb298584954d5def4392b" - integrity sha512-kRP7RjSxfTO13NE317ek3mSGzoZlI33nc/i5hs1KaWpK+egs85xg0DJ4p32QEiHnR0mVjuUfhRIun7awqfL7pQ== +"@next/eslint-plugin-next@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-15.2.4.tgz#0be0628380bf18313a4e89954d546b01572023aa" + integrity sha512-O8ScvKtnxkp8kL9TpJTTKnMqlkZnS+QxwoQnJwPGBxjBbzd6OVVPEJ5/pMNrktSyXQD/chEfzfFzYLM6JANOOQ== dependencies: fast-glob "3.3.1" -"@next/mdx@^15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/mdx/-/mdx-15.1.7.tgz#1b808246b3725bb7876d8833fa61d4e16a9f8bab" - integrity sha512-olVOjKA1K8b7/cu0zqWecVkwyCUnB9xlKXxB/CeCRoZYlH0zluLHwhWBX0PR9yf3CG7eNLrK+PfuPBF+LdWODQ== +"@next/mdx@^15.2.4": + version "15.3.5" + resolved "https://registry.npmjs.org/@next/mdx/-/mdx-15.3.5.tgz#580fdec4a99b5891b4c2bc4a35a58757be3c6014" + integrity sha512-/2rRCgPKNp2ttQscU13auI+cYYACdPa80Okgi/1+NNJJeWn9yVxwGnqZc3SX30T889bZbLqcY4oUjqYGAygL4g== dependencies: source-map "^0.7.0" -"@next/swc-darwin-arm64@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.1.7.tgz#ecc6eacf174df36a6c73f7c319ed864ec6e08079" - integrity sha512-hPFwzPJDpA8FGj7IKV3Yf1web3oz2YsR8du4amKw8d+jAOHfYHYFpMkoF6vgSY4W6vB29RtZEklK9ayinGiCmQ== - -"@next/swc-darwin-x64@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-darwin-x64/-/swc-darwin-x64-15.1.7.tgz#d25b4c131d13439ea4b263dbcd0fd518a835f31c" - integrity sha512-2qoas+fO3OQKkU0PBUfwTiw/EYpN+kdAx62cePRyY1LqKtP09Vp5UcUntfZYajop5fDFTjSxCHfZVRxzi+9FYQ== - -"@next/swc-linux-arm64-gnu@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.1.7.tgz#b19abc7b56918042b5309f55f7010e7932ee4967" - integrity sha512-sKLLwDX709mPdzxMnRIXLIT9zaX2w0GUlkLYQnKGoXeWUhcvpCrK+yevcwCJPdTdxZEUA0mOXGLdPsGkudGdnA== - -"@next/swc-linux-arm64-musl@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.1.7.tgz#cb2ac35d3024e9d46ce0d4ff03bf491e0773519f" - integrity sha512-zblK1OQbQWdC8fxdX4fpsHDw+VSpBPGEUX4PhSE9hkaWPrWoeIJn+baX53vbsbDRaDKd7bBNcXRovY1hEhFd7w== - -"@next/swc-linux-x64-gnu@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.1.7.tgz#cf6e338a1fbb1c9b019c158a76a7ab4f143929ce" - integrity sha512-GOzXutxuLvLHFDAPsMP2zDBMl1vfUHHpdNpFGhxu90jEzH6nNIgmtw/s1MDwpTOiM+MT5V8+I1hmVFeAUhkbgQ== - -"@next/swc-linux-x64-musl@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.1.7.tgz#94c9117ece8e5851e7e6674f12d6b82b435e3a6f" - integrity sha512-WrZ7jBhR7ATW1z5iEQ0ZJfE2twCNSXbpCSaAunF3BKcVeHFADSI/AW1y5Xt3DzTqPF1FzQlwQTewqetAABhZRQ== - -"@next/swc-win32-arm64-msvc@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.1.7.tgz#4947f3b7f41c7347114985bf3c91e2eacddfe124" - integrity sha512-LDnj1f3OVbou1BqvvXVqouJZKcwq++mV2F+oFHptToZtScIEnhNRJAhJzqAtTE2dB31qDYL45xJwrc+bLeKM2Q== - -"@next/swc-win32-x64-msvc@15.1.7": - version "15.1.7" - resolved "https://registry.npmmirror.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.1.7.tgz#0adb399deb15291b61be94909a97e0d6ce1f61fa" - integrity sha512-dC01f1quuf97viOfW05/K8XYv2iuBgAxJZl7mbCKEjMgdQl5JjAKJ0D2qMKZCgPWDeFbFT0Q0nYWwytEW0DWTQ== +"@next/swc-darwin-arm64@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.4.tgz#3a54f67aa2e0096a9147bd24dff1492e151819ae" + integrity sha512-1AnMfs655ipJEDC/FHkSr0r3lXBgpqKo4K1kiwfUf3iE68rDFXZ1TtHdMvf7D0hMItgDZ7Vuq3JgNMbt/+3bYw== + +"@next/swc-darwin-x64@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.4.tgz#9b540f24afde1b7878623fdba9695344d26b7d67" + integrity sha512-3qK2zb5EwCwxnO2HeO+TRqCubeI/NgCe+kL5dTJlPldV/uwCnUgC7VbEzgmxbfrkbjehL4H9BPztWOEtsoMwew== + +"@next/swc-linux-arm64-gnu@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.4.tgz#417a234c9f4dc5495094a8979859ac528c0f1f58" + integrity sha512-HFN6GKUcrTWvem8AZN7tT95zPb0GUGv9v0d0iyuTb303vbXkkbHDp/DxufB04jNVD+IN9yHy7y/6Mqq0h0YVaQ== + +"@next/swc-linux-arm64-musl@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.4.tgz#9bca76375508a175956f2d51f8547d0d6f9ffa64" + integrity sha512-Oioa0SORWLwi35/kVB8aCk5Uq+5/ZIumMK1kJV+jSdazFm2NzPDztsefzdmzzpx5oGCJ6FkUC7vkaUseNTStNA== + +"@next/swc-linux-x64-gnu@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.4.tgz#c3d5041d53a5b228bf521ed49649e0f2a7aff947" + integrity sha512-yb5WTRaHdkgOqFOZiu6rHV1fAEK0flVpaIN2HB6kxHVSy/dIajWbThS7qON3W9/SNOH2JWkVCyulgGYekMePuw== + +"@next/swc-linux-x64-musl@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.4.tgz#b2a51a108b1c412c69a504556cde0517631768c7" + integrity sha512-Dcdv/ix6srhkM25fgXiyOieFUkz+fOYkHlydWCtB0xMST6X9XYI3yPDKBZt1xuhOytONsIFJFB08xXYsxUwJLw== + +"@next/swc-win32-arm64-msvc@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.4.tgz#7d687b42512abd36f44c2c787d58a1590f174b69" + integrity sha512-dW0i7eukvDxtIhCYkMrZNQfNicPDExt2jPb9AZPpL7cfyUo7QSNl1DjsHjmmKp6qNAqUESyT8YFl/Aw91cNJJg== + +"@next/swc-win32-x64-msvc@15.2.4": + version "15.2.4" + resolved "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.4.tgz#779a0ea272fa4f509387f3b320e2d70803943a95" + integrity sha512-SbnWkJmkS7Xl3kre8SdMF6F/XDh1DTFEhp0jRTj/uB8iPKoU2bb2NDfcu+iifv1+mxQEd1g2vvSxcZbXSKyWiQ== "@nodelib/fs.scandir@2.1.5": version "2.1.5" - resolved "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + resolved "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== dependencies: "@nodelib/fs.stat" "2.0.5" @@ -647,12 +666,12 @@ "@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": version "2.0.5" - resolved "https://registry.npmmirror.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== "@nodelib/fs.walk@^1.2.3": version "1.2.8" - resolved "https://registry.npmmirror.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + resolved "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== dependencies: "@nodelib/fs.scandir" "2.1.5" @@ -660,12 +679,12 @@ "@nolyfill/is-core-module@1.0.39": version "1.0.39" - resolved "https://registry.npmmirror.com/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz#3dc35ba0f1e66b403c00b39344f870298ebb1c8e" + resolved "https://registry.npmjs.org/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz#3dc35ba0f1e66b403c00b39344f870298ebb1c8e" integrity sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA== "@popperjs/core@^2.11.8": version "2.11.8" - resolved "https://registry.npmmirror.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" + resolved "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" integrity sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A== "@puppeteer/browsers@2.10.5": @@ -683,22 +702,22 @@ "@rtsao/scc@^1.1.0": version "1.1.0" - resolved "https://registry.npmmirror.com/@rtsao/scc/-/scc-1.1.0.tgz#927dd2fae9bc3361403ac2c7a00c32ddce9ad7e8" + resolved "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz#927dd2fae9bc3361403ac2c7a00c32ddce9ad7e8" integrity sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g== "@rushstack/eslint-patch@^1.10.3": - version "1.10.5" - resolved "https://registry.npmmirror.com/@rushstack/eslint-patch/-/eslint-patch-1.10.5.tgz#3a1c12c959010a55c17d46b395ed3047b545c246" - integrity sha512-kkKUDVlII2DQiKy7UstOR1ErJP8kUKAQ4oa+SQtM0K+lPdmmjj0YnnxBgtTVYH7mUKtbsxeFC9y0AmK7Yb78/A== + version "1.12.0" + resolved "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.12.0.tgz#326a7b46f6d4cfa54ae25bb888551697873069b4" + integrity sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw== "@swc/counter@0.1.3": version "0.1.3" - resolved "https://registry.npmmirror.com/@swc/counter/-/counter-0.1.3.tgz#cc7463bd02949611c6329596fccd2b0ec782b0e9" + resolved "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz#cc7463bd02949611c6329596fccd2b0ec782b0e9" integrity sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ== "@swc/helpers@0.5.15": version "0.5.15" - resolved "https://registry.npmmirror.com/@swc/helpers/-/helpers-0.5.15.tgz#79efab344c5819ecf83a43f3f9f811fc84b516d7" + resolved "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz#79efab344c5819ecf83a43f3f9f811fc84b516d7" integrity sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g== dependencies: tslib "^2.8.0" @@ -708,162 +727,162 @@ resolved "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz#db4ecfd499a9765ab24002c3b696d02e6d32a12c" integrity sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA== -"@types/acorn@^4.0.0": - version "4.0.6" - resolved "https://registry.npmmirror.com/@types/acorn/-/acorn-4.0.6.tgz#d61ca5480300ac41a7d973dd5b84d0a591154a22" - integrity sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ== +"@tybys/wasm-util@^0.10.0": + version "0.10.0" + resolved "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.0.tgz#2fd3cd754b94b378734ce17058d0507c45c88369" + integrity sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ== dependencies: - "@types/estree" "*" + tslib "^2.4.0" "@types/debug@^4.0.0": version "4.1.12" - resolved "https://registry.npmmirror.com/@types/debug/-/debug-4.1.12.tgz#a155f21690871953410df4b6b6f53187f0500917" + resolved "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz#a155f21690871953410df4b6b6f53187f0500917" integrity sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ== dependencies: "@types/ms" "*" "@types/estree-jsx@^1.0.0": version "1.0.5" - resolved "https://registry.npmmirror.com/@types/estree-jsx/-/estree-jsx-1.0.5.tgz#858a88ea20f34fe65111f005a689fa1ebf70dc18" + resolved "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz#858a88ea20f34fe65111f005a689fa1ebf70dc18" integrity sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg== dependencies: "@types/estree" "*" "@types/estree@*", "@types/estree@^1.0.0", "@types/estree@^1.0.6": - version "1.0.6" - resolved "https://registry.npmmirror.com/@types/estree/-/estree-1.0.6.tgz#628effeeae2064a1b4e79f78e81d87b7e5fc7b50" - integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw== + version "1.0.8" + resolved "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz#958b91c991b1867ced318bedea0e215ee050726e" + integrity sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== "@types/hast@^2.0.0": version "2.3.10" - resolved "https://registry.npmmirror.com/@types/hast/-/hast-2.3.10.tgz#5c9d9e0b304bbb8879b857225c5ebab2d81d7643" + resolved "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz#5c9d9e0b304bbb8879b857225c5ebab2d81d7643" integrity sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw== dependencies: "@types/unist" "^2" "@types/hast@^3.0.0", "@types/hast@^3.0.4": version "3.0.4" - resolved "https://registry.npmmirror.com/@types/hast/-/hast-3.0.4.tgz#1d6b39993b82cea6ad783945b0508c25903e15aa" + resolved "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz#1d6b39993b82cea6ad783945b0508c25903e15aa" integrity sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ== dependencies: "@types/unist" "*" "@types/json-schema@^7.0.15": version "7.0.15" - resolved "https://registry.npmmirror.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + resolved "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/json5@^0.0.29": version "0.0.29" - resolved "https://registry.npmmirror.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" + resolved "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== "@types/lodash@^4.17.15": - version "4.17.15" - resolved "https://registry.npmmirror.com/@types/lodash/-/lodash-4.17.15.tgz#12d4af0ed17cc7600ce1f9980cec48fc17ad1e89" - integrity sha512-w/P33JFeySuhN6JLkysYUK2gEmy9kHHFN7E8ro0tkfmlDOgxBDzWEZ/J8cWA+fHqFevpswDTFZnDx+R9lbL6xw== + version "4.17.20" + resolved "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz#1ca77361d7363432d29f5e55950d9ec1e1c6ea93" + integrity sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA== "@types/mdast@^4.0.0": version "4.0.4" - resolved "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz#7ccf72edd2f1aa7dd3437e180c64373585804dd6" + resolved "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz#7ccf72edd2f1aa7dd3437e180c64373585804dd6" integrity sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA== dependencies: "@types/unist" "*" "@types/mdx@^2.0.0", "@types/mdx@^2.0.13": version "2.0.13" - resolved "https://registry.npmmirror.com/@types/mdx/-/mdx-2.0.13.tgz#68f6877043d377092890ff5b298152b0a21671bd" + resolved "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz#68f6877043d377092890ff5b298152b0a21671bd" integrity sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw== "@types/ms@*": version "2.1.0" - resolved "https://registry.npmmirror.com/@types/ms/-/ms-2.1.0.tgz#052aa67a48eccc4309d7f0191b7e41434b90bb78" + resolved "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz#052aa67a48eccc4309d7f0191b7e41434b90bb78" integrity sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA== "@types/node@*": - version "22.15.30" - resolved "https://registry.npmjs.org/@types/node/-/node-22.15.30.tgz#3a20431783e28dd0b0326f84ab386a2ec81d921d" - integrity sha512-6Q7lr06bEHdlfplU6YRbgG1SFBdlsfNC4/lX+SkhiTs0cpJkOElmWls8PxDFv4yY/xKb8Y6SO0OmSX4wgqTZbA== + version "24.0.13" + resolved "https://registry.npmjs.org/@types/node/-/node-24.0.13.tgz#93ed8c05c7b188a59760be0ce2ee3fa7ad0f83f6" + integrity sha512-Qm9OYVOFHFYg3wJoTSrz80hoec5Lia/dPp84do3X7dZvLikQvM1YpmvTBEdIr/e+U8HTkFjLHLnl78K/qjf+jQ== dependencies: - undici-types "~6.21.0" + undici-types "~7.8.0" "@types/node@^20": - version "20.17.19" - resolved "https://registry.npmmirror.com/@types/node/-/node-20.17.19.tgz#0f2869555719bef266ca6e1827fcdca903c1a697" - integrity sha512-LEwC7o1ifqg/6r2gn9Dns0f1rhK+fPFDoMiceTJ6kWmVk6bgXBI/9IOWfVan4WiAavK9pIVWdX0/e3J+eEUh5A== + version "20.19.7" + resolved "https://registry.npmjs.org/@types/node/-/node-20.19.7.tgz#1d3656e25ec91731c1cce21889e4754ebc988b4e" + integrity sha512-1GM9z6BJOv86qkPvzh2i6VW5+VVrXxCLknfmTkWEqz+6DqosiY28XUWCTmBcJ0ACzKqx/iwdIREfo1fwExIlkA== dependencies: - undici-types "~6.19.2" + undici-types "~6.21.0" "@types/parse-json@^4.0.0": version "4.0.2" - resolved "https://registry.npmmirror.com/@types/parse-json/-/parse-json-4.0.2.tgz#5950e50960793055845e956c427fc2b0d70c5239" + resolved "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz#5950e50960793055845e956c427fc2b0d70c5239" integrity sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw== "@types/prop-types@^15.7.14": - version "15.7.14" - resolved "https://registry.npmmirror.com/@types/prop-types/-/prop-types-15.7.14.tgz#1433419d73b2a7ebfc6918dcefd2ec0d5cd698f2" - integrity sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ== + version "15.7.15" + resolved "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz#e6e5a86d602beaca71ce5163fadf5f95d70931c7" + integrity sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw== "@types/react-copy-to-clipboard@^5.0.7": version "5.0.7" - resolved "https://registry.npmmirror.com/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz#0cb724d4228f1c2f8f5675671b3971c8801d5f45" + resolved "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz#0cb724d4228f1c2f8f5675671b3971c8801d5f45" integrity sha512-Gft19D+as4M+9Whq1oglhmK49vqPhcLzk8WfvfLvaYMIPYanyfLy0+CwFucMJfdKoSFyySPmkkWn8/E6voQXjQ== dependencies: "@types/react" "*" "@types/react-dom@^19": - version "19.0.4" - resolved "https://registry.npmmirror.com/@types/react-dom/-/react-dom-19.0.4.tgz#bedba97f9346bd4c0fe5d39e689713804ec9ac89" - integrity sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg== + version "19.1.6" + resolved "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.6.tgz#4af629da0e9f9c0f506fc4d1caa610399c595d64" + integrity sha512-4hOiT/dwO8Ko0gV1m/TJZYk3y0KBnY9vzDh7W+DH17b2HFSOGgdj33dhihPeuy3l0q23+4e+hoXHV6hCC4dCXw== "@types/react-scroll@^1.8.10": version "1.8.10" - resolved "https://registry.npmmirror.com/@types/react-scroll/-/react-scroll-1.8.10.tgz#585a5c4bd0654434f3e55a08e94ed2e048bae7c7" + resolved "https://registry.npmjs.org/@types/react-scroll/-/react-scroll-1.8.10.tgz#585a5c4bd0654434f3e55a08e94ed2e048bae7c7" integrity sha512-RD4Z7grbdNGOKwKnUBKar6zNxqaW3n8m9QSrfvljW+gmkj1GArb8AFBomVr6xMOgHPD3v1uV3BrIf01py57daQ== dependencies: "@types/react" "*" "@types/react-slick@^0.23.13": version "0.23.13" - resolved "https://registry.npmmirror.com/@types/react-slick/-/react-slick-0.23.13.tgz#037434e73a58063047b121e08565f7185d811f36" + resolved "https://registry.npmjs.org/@types/react-slick/-/react-slick-0.23.13.tgz#037434e73a58063047b121e08565f7185d811f36" integrity sha512-bNZfDhe/L8t5OQzIyhrRhBr/61pfBcWaYJoq6UDqFtv5LMwfg4NsVDD2J8N01JqdAdxLjOt66OZEp6PX+dGs/A== dependencies: "@types/react" "*" "@types/react-stickynode@^4.0.3": version "4.0.3" - resolved "https://registry.npmmirror.com/@types/react-stickynode/-/react-stickynode-4.0.3.tgz#77e592cf84590319648678117424f921e5d1b4da" + resolved "https://registry.npmjs.org/@types/react-stickynode/-/react-stickynode-4.0.3.tgz#77e592cf84590319648678117424f921e5d1b4da" integrity sha512-K7YkwdhXQE4YVxIVweix4nkpdG4onm/dcnKK+qCj0vgUrNiKng+09zOfjF5AlOcC1HQkg5yxVLwp/0AzT84R0w== dependencies: "@types/react" "*" "@types/react-transition-group@^4.4.12": version "4.4.12" - resolved "https://registry.npmmirror.com/@types/react-transition-group/-/react-transition-group-4.4.12.tgz#b5d76568485b02a307238270bfe96cb51ee2a044" + resolved "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.12.tgz#b5d76568485b02a307238270bfe96cb51ee2a044" integrity sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w== "@types/react@*", "@types/react@^19": - version "19.0.10" - resolved "https://registry.npmmirror.com/@types/react/-/react-19.0.10.tgz#d0c66dafd862474190fe95ce11a68de69ed2b0eb" - integrity sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g== + version "19.1.8" + resolved "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz#ff8395f2afb764597265ced15f8dddb0720ae1c3" + integrity sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g== dependencies: csstype "^3.0.2" "@types/remark-heading-id@^1.0.0": version "1.0.0" - resolved "https://registry.npmmirror.com/@types/remark-heading-id/-/remark-heading-id-1.0.0.tgz#2c594c26b4d8306bb857fc357027498242436385" + resolved "https://registry.npmjs.org/@types/remark-heading-id/-/remark-heading-id-1.0.0.tgz#2c594c26b4d8306bb857fc357027498242436385" integrity sha512-V6OgBN2Uv3kaYHOrBI2+j9xIo6N56bMpIFoKVkGltoJtzHr7Vo8pFxDZxNqUXC5NScV991Iq3BYD52BkCFMY+w== dependencies: unified "^11.0.0" "@types/unist@*", "@types/unist@^3.0.0": version "3.0.3" - resolved "https://registry.npmmirror.com/@types/unist/-/unist-3.0.3.tgz#acaab0f919ce69cce629c2d4ed2eb4adc1b6c20c" + resolved "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz#acaab0f919ce69cce629c2d4ed2eb4adc1b6c20c" integrity sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q== "@types/unist@^2", "@types/unist@^2.0.0": version "2.0.11" - resolved "https://registry.npmmirror.com/@types/unist/-/unist-2.0.11.tgz#11af57b127e32487774841f7a4e54eab166d03c4" + resolved "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz#11af57b127e32487774841f7a4e54eab166d03c4" integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA== "@types/yauzl@^2.9.1": @@ -874,109 +893,222 @@ "@types/node" "*" "@typescript-eslint/eslint-plugin@^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.24.1.tgz#d104c2a6212304c649105b18af2c110b4a1dd4ae" - integrity sha512-ll1StnKtBigWIGqvYDVuDmXJHVH4zLVot1yQ4fJtLpL7qacwkxJc1T0bptqw+miBQ/QfUbhl1TcQ4accW5KUyA== + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.36.0.tgz#880ce277f8a30ccf539ec027acac157088f131ae" + integrity sha512-lZNihHUVB6ZZiPBNgOQGSxUASI7UJWhT8nHyUGCnaQ28XFCw98IfrMCG3rUl1uwUWoAvodJQby2KTs79UTcrAg== dependencies: "@eslint-community/regexpp" "^4.10.0" - "@typescript-eslint/scope-manager" "8.24.1" - "@typescript-eslint/type-utils" "8.24.1" - "@typescript-eslint/utils" "8.24.1" - "@typescript-eslint/visitor-keys" "8.24.1" + "@typescript-eslint/scope-manager" "8.36.0" + "@typescript-eslint/type-utils" "8.36.0" + "@typescript-eslint/utils" "8.36.0" + "@typescript-eslint/visitor-keys" "8.36.0" graphemer "^1.4.0" - ignore "^5.3.1" + ignore "^7.0.0" natural-compare "^1.4.0" - ts-api-utils "^2.0.1" + ts-api-utils "^2.1.0" "@typescript-eslint/parser@^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/parser/-/parser-8.24.1.tgz#67965c2d2ddd7eadb2f094c395695db8334ef9a2" - integrity sha512-Tqoa05bu+t5s8CTZFaGpCH2ub3QeT9YDkXbPd3uQ4SfsLoh1/vv2GEYAioPoxCWJJNsenXlC88tRjwoHNts1oQ== - dependencies: - "@typescript-eslint/scope-manager" "8.24.1" - "@typescript-eslint/types" "8.24.1" - "@typescript-eslint/typescript-estree" "8.24.1" - "@typescript-eslint/visitor-keys" "8.24.1" + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.36.0.tgz#003007fe2030013936b6634b9cf52c457d36ed42" + integrity sha512-FuYgkHwZLuPbZjQHzJXrtXreJdFMKl16BFYyRrLxDhWr6Qr7Kbcu2s1Yhu8tsiMXw1S0W1pjfFfYEt+R604s+Q== + dependencies: + "@typescript-eslint/scope-manager" "8.36.0" + "@typescript-eslint/types" "8.36.0" + "@typescript-eslint/typescript-estree" "8.36.0" + "@typescript-eslint/visitor-keys" "8.36.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/scope-manager/-/scope-manager-8.24.1.tgz#1e1e76ec4560aa85077ab36deb9b2bead4ae124e" - integrity sha512-OdQr6BNBzwRjNEXMQyaGyZzgg7wzjYKfX2ZBV3E04hUCBDv3GQCHiz9RpqdUIiVrMgJGkXm3tcEh4vFSHreS2Q== +"@typescript-eslint/project-service@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.36.0.tgz#0c4acdcbe56476a43cdabaac1f08819424a379fd" + integrity sha512-JAhQFIABkWccQYeLMrHadu/fhpzmSQ1F1KXkpzqiVxA/iYI6UnRt2trqXHt1sYEcw1mxLnB9rKMsOxXPxowN/g== dependencies: - "@typescript-eslint/types" "8.24.1" - "@typescript-eslint/visitor-keys" "8.24.1" + "@typescript-eslint/tsconfig-utils" "^8.36.0" + "@typescript-eslint/types" "^8.36.0" + debug "^4.3.4" -"@typescript-eslint/type-utils@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/type-utils/-/type-utils-8.24.1.tgz#99113e1df63d1571309d87eef68967344c78dd65" - integrity sha512-/Do9fmNgCsQ+K4rCz0STI7lYB4phTtEXqqCAs3gZW0pnK7lWNkvWd5iW545GSmApm4AzmQXmSqXPO565B4WVrw== +"@typescript-eslint/scope-manager@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.36.0.tgz#23e4196ed07d7ea3737a584fbebc9a79c3835168" + integrity sha512-wCnapIKnDkN62fYtTGv2+RY8FlnBYA3tNm0fm91kc2BjPhV2vIjwwozJ7LToaLAyb1ca8BxrS7vT+Pvvf7RvqA== dependencies: - "@typescript-eslint/typescript-estree" "8.24.1" - "@typescript-eslint/utils" "8.24.1" - debug "^4.3.4" - ts-api-utils "^2.0.1" + "@typescript-eslint/types" "8.36.0" + "@typescript-eslint/visitor-keys" "8.36.0" -"@typescript-eslint/types@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/types/-/types-8.24.1.tgz#8777a024f3afc4ace5e48f9a804309c6dd38f95a" - integrity sha512-9kqJ+2DkUXiuhoiYIUvIYjGcwle8pcPpdlfkemGvTObzgmYfJ5d0Qm6jwb4NBXP9W1I5tss0VIAnWFumz3mC5A== +"@typescript-eslint/tsconfig-utils@8.36.0", "@typescript-eslint/tsconfig-utils@^8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.36.0.tgz#63ef8a20ae9b5754c6ceacbe87b2fe1aab12ba13" + integrity sha512-Nhh3TIEgN18mNbdXpd5Q8mSCBnrZQeY9V7Ca3dqYvNDStNIGRmJA6dmrIPMJ0kow3C7gcQbpsG2rPzy1Ks/AnA== -"@typescript-eslint/typescript-estree@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/typescript-estree/-/typescript-estree-8.24.1.tgz#3bb479401f8bd471b3c6dd3db89e7256977c54db" - integrity sha512-UPyy4MJ/0RE648DSKQe9g0VDSehPINiejjA6ElqnFaFIhI6ZEiZAkUI0D5MCk0bQcTf/LVqZStvQ6K4lPn/BRg== +"@typescript-eslint/type-utils@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.36.0.tgz#16b092c2cbbb5549f6a4df1382a481586850502f" + integrity sha512-5aaGYG8cVDd6cxfk/ynpYzxBRZJk7w/ymto6uiyUFtdCozQIsQWh7M28/6r57Fwkbweng8qAzoMCPwSJfWlmsg== dependencies: - "@typescript-eslint/types" "8.24.1" - "@typescript-eslint/visitor-keys" "8.24.1" + "@typescript-eslint/typescript-estree" "8.36.0" + "@typescript-eslint/utils" "8.36.0" + debug "^4.3.4" + ts-api-utils "^2.1.0" + +"@typescript-eslint/types@8.36.0", "@typescript-eslint/types@^8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.36.0.tgz#d3d184adc2899e2912c13b17c1590486ef37c7ac" + integrity sha512-xGms6l5cTJKQPZOKM75Dl9yBfNdGeLRsIyufewnxT4vZTrjC0ImQT4fj8QmtJK84F58uSh5HVBSANwcfiXxABQ== + +"@typescript-eslint/typescript-estree@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.36.0.tgz#344857fa79f71715369554a3cbb6b4ff8695a7bc" + integrity sha512-JaS8bDVrfVJX4av0jLpe4ye0BpAaUW7+tnS4Y4ETa3q7NoZgzYbN9zDQTJ8kPb5fQ4n0hliAt9tA4Pfs2zA2Hg== + dependencies: + "@typescript-eslint/project-service" "8.36.0" + "@typescript-eslint/tsconfig-utils" "8.36.0" + "@typescript-eslint/types" "8.36.0" + "@typescript-eslint/visitor-keys" "8.36.0" debug "^4.3.4" fast-glob "^3.3.2" is-glob "^4.0.3" minimatch "^9.0.4" semver "^7.6.0" - ts-api-utils "^2.0.1" + ts-api-utils "^2.1.0" -"@typescript-eslint/utils@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/utils/-/utils-8.24.1.tgz#08d14eac33cfb3456feeee5a275b8ad3349e52ed" - integrity sha512-OOcg3PMMQx9EXspId5iktsI3eMaXVwlhC8BvNnX6B5w9a4dVgpkQZuU8Hy67TolKcl+iFWq0XX+jbDGN4xWxjQ== +"@typescript-eslint/utils@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.36.0.tgz#2c9af5292f14e0aa4b0e9c7ac0406afafb299acf" + integrity sha512-VOqmHu42aEMT+P2qYjylw6zP/3E/HvptRwdn/PZxyV27KhZg2IOszXod4NcXisWzPAGSS4trE/g4moNj6XmH2g== dependencies: - "@eslint-community/eslint-utils" "^4.4.0" - "@typescript-eslint/scope-manager" "8.24.1" - "@typescript-eslint/types" "8.24.1" - "@typescript-eslint/typescript-estree" "8.24.1" + "@eslint-community/eslint-utils" "^4.7.0" + "@typescript-eslint/scope-manager" "8.36.0" + "@typescript-eslint/types" "8.36.0" + "@typescript-eslint/typescript-estree" "8.36.0" -"@typescript-eslint/visitor-keys@8.24.1": - version "8.24.1" - resolved "https://registry.npmmirror.com/@typescript-eslint/visitor-keys/-/visitor-keys-8.24.1.tgz#8bdfe47a89195344b34eb21ef61251562148202b" - integrity sha512-EwVHlp5l+2vp8CoqJm9KikPZgi3gbdZAtabKT9KPShGeOcJhsv4Zdo3oc8T8I0uKEmYoU4ItyxbptjF08enaxg== +"@typescript-eslint/visitor-keys@8.36.0": + version "8.36.0" + resolved "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.36.0.tgz#7dc6ba4dd037979eb3a3bdd2093aa3604bb73674" + integrity sha512-vZrhV2lRPWDuGoxcmrzRZyxAggPL+qp3WzUrlZD+slFueDiYHxeBa34dUXPuC0RmGKzl4lS5kFJYvKCq9cnNDA== dependencies: - "@typescript-eslint/types" "8.24.1" - eslint-visitor-keys "^4.2.0" + "@typescript-eslint/types" "8.36.0" + eslint-visitor-keys "^4.2.1" "@ungap/structured-clone@^1.0.0": version "1.3.0" - resolved "https://registry.npmmirror.com/@ungap/structured-clone/-/structured-clone-1.3.0.tgz#d06bbb384ebcf6c505fde1c3d0ed4ddffe0aaff8" + resolved "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz#d06bbb384ebcf6c505fde1c3d0ed4ddffe0aaff8" integrity sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g== +"@unrs/resolver-binding-android-arm-eabi@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-android-arm-eabi/-/resolver-binding-android-arm-eabi-1.11.1.tgz#9f5b04503088e6a354295e8ea8fe3cb99e43af81" + integrity sha512-ppLRUgHVaGRWUx0R0Ut06Mjo9gBaBkg3v/8AxusGLhsIotbBLuRk51rAzqLC8gq6NyyAojEXglNjzf6R948DNw== + +"@unrs/resolver-binding-android-arm64@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-android-arm64/-/resolver-binding-android-arm64-1.11.1.tgz#7414885431bd7178b989aedc4d25cccb3865bc9f" + integrity sha512-lCxkVtb4wp1v+EoN+HjIG9cIIzPkX5OtM03pQYkG+U5O/wL53LC4QbIeazgiKqluGeVEeBlZahHalCaBvU1a2g== + +"@unrs/resolver-binding-darwin-arm64@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-darwin-arm64/-/resolver-binding-darwin-arm64-1.11.1.tgz#b4a8556f42171fb9c9f7bac8235045e82aa0cbdf" + integrity sha512-gPVA1UjRu1Y/IsB/dQEsp2V1pm44Of6+LWvbLc9SDk1c2KhhDRDBUkQCYVWe6f26uJb3fOK8saWMgtX8IrMk3g== + +"@unrs/resolver-binding-darwin-x64@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-darwin-x64/-/resolver-binding-darwin-x64-1.11.1.tgz#fd4d81257b13f4d1a083890a6a17c00de571f0dc" + integrity sha512-cFzP7rWKd3lZaCsDze07QX1SC24lO8mPty9vdP+YVa3MGdVgPmFc59317b2ioXtgCMKGiCLxJ4HQs62oz6GfRQ== + +"@unrs/resolver-binding-freebsd-x64@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-freebsd-x64/-/resolver-binding-freebsd-x64-1.11.1.tgz#d2513084d0f37c407757e22f32bd924a78cfd99b" + integrity sha512-fqtGgak3zX4DCB6PFpsH5+Kmt/8CIi4Bry4rb1ho6Av2QHTREM+47y282Uqiu3ZRF5IQioJQ5qWRV6jduA+iGw== + +"@unrs/resolver-binding-linux-arm-gnueabihf@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-gnueabihf/-/resolver-binding-linux-arm-gnueabihf-1.11.1.tgz#844d2605d057488d77fab09705f2866b86164e0a" + integrity sha512-u92mvlcYtp9MRKmP+ZvMmtPN34+/3lMHlyMj7wXJDeXxuM0Vgzz0+PPJNsro1m3IZPYChIkn944wW8TYgGKFHw== + +"@unrs/resolver-binding-linux-arm-musleabihf@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-musleabihf/-/resolver-binding-linux-arm-musleabihf-1.11.1.tgz#204892995cefb6bd1d017d52d097193bc61ddad3" + integrity sha512-cINaoY2z7LVCrfHkIcmvj7osTOtm6VVT16b5oQdS4beibX2SYBwgYLmqhBjA1t51CarSaBuX5YNsWLjsqfW5Cw== + +"@unrs/resolver-binding-linux-arm64-gnu@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-gnu/-/resolver-binding-linux-arm64-gnu-1.11.1.tgz#023eb0c3aac46066a10be7a3f362e7b34f3bdf9d" + integrity sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ== + +"@unrs/resolver-binding-linux-arm64-musl@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-musl/-/resolver-binding-linux-arm64-musl-1.11.1.tgz#9e6f9abb06424e3140a60ac996139786f5d99be0" + integrity sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w== + +"@unrs/resolver-binding-linux-ppc64-gnu@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-ppc64-gnu/-/resolver-binding-linux-ppc64-gnu-1.11.1.tgz#b111417f17c9d1b02efbec8e08398f0c5527bb44" + integrity sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA== + +"@unrs/resolver-binding-linux-riscv64-gnu@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-gnu/-/resolver-binding-linux-riscv64-gnu-1.11.1.tgz#92ffbf02748af3e99873945c9a8a5ead01d508a9" + integrity sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ== + +"@unrs/resolver-binding-linux-riscv64-musl@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-musl/-/resolver-binding-linux-riscv64-musl-1.11.1.tgz#0bec6f1258fc390e6b305e9ff44256cb207de165" + integrity sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew== + +"@unrs/resolver-binding-linux-s390x-gnu@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-s390x-gnu/-/resolver-binding-linux-s390x-gnu-1.11.1.tgz#577843a084c5952f5906770633ccfb89dac9bc94" + integrity sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg== + +"@unrs/resolver-binding-linux-x64-gnu@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-gnu/-/resolver-binding-linux-x64-gnu-1.11.1.tgz#36fb318eebdd690f6da32ac5e0499a76fa881935" + integrity sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w== + +"@unrs/resolver-binding-linux-x64-musl@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-musl/-/resolver-binding-linux-x64-musl-1.11.1.tgz#bfb9af75f783f98f6a22c4244214efe4df1853d6" + integrity sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA== + +"@unrs/resolver-binding-wasm32-wasi@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-wasm32-wasi/-/resolver-binding-wasm32-wasi-1.11.1.tgz#752c359dd875684b27429500d88226d7cc72f71d" + integrity sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ== + dependencies: + "@napi-rs/wasm-runtime" "^0.2.11" + +"@unrs/resolver-binding-win32-arm64-msvc@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-win32-arm64-msvc/-/resolver-binding-win32-arm64-msvc-1.11.1.tgz#ce5735e600e4c2fbb409cd051b3b7da4a399af35" + integrity sha512-nRcz5Il4ln0kMhfL8S3hLkxI85BXs3o8EYoattsJNdsX4YUU89iOkVn7g0VHSRxFuVMdM4Q1jEpIId1Ihim/Uw== + +"@unrs/resolver-binding-win32-ia32-msvc@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-win32-ia32-msvc/-/resolver-binding-win32-ia32-msvc-1.11.1.tgz#72fc57bc7c64ec5c3de0d64ee0d1810317bc60a6" + integrity sha512-DCEI6t5i1NmAZp6pFonpD5m7i6aFrpofcp4LA2i8IIq60Jyo28hamKBxNrZcyOwVOZkgsRp9O2sXWBWP8MnvIQ== + +"@unrs/resolver-binding-win32-x64-msvc@1.11.1": + version "1.11.1" + resolved "https://registry.npmjs.org/@unrs/resolver-binding-win32-x64-msvc/-/resolver-binding-win32-x64-msvc-1.11.1.tgz#538b1e103bf8d9864e7b85cc96fa8d6fb6c40777" + integrity sha512-lrW200hZdbfRtztbygyaq/6jP6AKE8qQN2KvPcJ+x7wiD038YtnYtZ82IMNJ69GJibV7bwL3y9FgK+5w/pYt6g== + acorn-jsx@^5.0.0, acorn-jsx@^5.3.2: version "5.3.2" - resolved "https://registry.npmmirror.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" + resolved "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== -acorn@^8.0.0, acorn@^8.14.0: - version "8.14.0" - resolved "https://registry.npmmirror.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" - integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== +acorn@^8.0.0, acorn@^8.15.0: + version "8.15.0" + resolved "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz#a360898bc415edaac46c8241f6383975b930b816" + integrity sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== agent-base@^7.1.0, agent-base@^7.1.2: - version "7.1.3" - resolved "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz#29435eb821bc4194633a5b89e5bc4703bafc25a1" - integrity sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw== + version "7.1.4" + resolved "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz#e3cd76d4c548ee895d3c3fd8dc1f6c5b9032e7a8" + integrity sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ== ajv@^6.12.4: version "6.12.6" - resolved "https://registry.npmmirror.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== dependencies: fast-deep-equal "^3.1.1" @@ -991,51 +1123,53 @@ ansi-regex@^5.0.1: ansi-styles@^4.0.0, ansi-styles@^4.1.0: version "4.3.0" - resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + resolved "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== dependencies: color-convert "^2.0.1" argparse@^1.0.7: version "1.0.10" - resolved "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + resolved "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== dependencies: sprintf-js "~1.0.2" argparse@^2.0.1: version "2.0.1" - resolved "https://registry.npmmirror.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + resolved "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== aria-query@^5.3.2: version "5.3.2" - resolved "https://registry.npmmirror.com/aria-query/-/aria-query-5.3.2.tgz#93f81a43480e33a338f19163a3d10a50c01dcd59" + resolved "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz#93f81a43480e33a338f19163a3d10a50c01dcd59" integrity sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw== array-buffer-byte-length@^1.0.1, array-buffer-byte-length@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz#384d12a37295aec3769ab022ad323a18a51ccf8b" + resolved "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz#384d12a37295aec3769ab022ad323a18a51ccf8b" integrity sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw== dependencies: call-bound "^1.0.3" is-array-buffer "^3.0.5" -array-includes@^3.1.6, array-includes@^3.1.8: - version "3.1.8" - resolved "https://registry.npmmirror.com/array-includes/-/array-includes-3.1.8.tgz#5e370cbe172fdd5dd6530c1d4aadda25281ba97d" - integrity sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ== +array-includes@^3.1.6, array-includes@^3.1.8, array-includes@^3.1.9: + version "3.1.9" + resolved "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz#1f0ccaa08e90cdbc3eb433210f903ad0f17c3f3a" + integrity sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ== dependencies: - call-bind "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.4" define-properties "^1.2.1" - es-abstract "^1.23.2" - es-object-atoms "^1.0.0" - get-intrinsic "^1.2.4" - is-string "^1.0.7" + es-abstract "^1.24.0" + es-object-atoms "^1.1.1" + get-intrinsic "^1.3.0" + is-string "^1.1.1" + math-intrinsics "^1.1.0" array.prototype.findlast@^1.2.5: version "1.2.5" - resolved "https://registry.npmmirror.com/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz#3e4fbcb30a15a7f5bf64cf2faae22d139c2e4904" + resolved "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz#3e4fbcb30a15a7f5bf64cf2faae22d139c2e4904" integrity sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ== dependencies: call-bind "^1.0.7" @@ -1045,21 +1179,22 @@ array.prototype.findlast@^1.2.5: es-object-atoms "^1.0.0" es-shim-unscopables "^1.0.2" -array.prototype.findlastindex@^1.2.5: - version "1.2.5" - resolved "https://registry.npmmirror.com/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz#8c35a755c72908719453f87145ca011e39334d0d" - integrity sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ== +array.prototype.findlastindex@^1.2.6: + version "1.2.6" + resolved "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.6.tgz#cfa1065c81dcb64e34557c9b81d012f6a421c564" + integrity sha512-F/TKATkzseUExPlfvmwQKGITM3DGTK+vkAsCZoDc5daVygbJBnjEUCbgkAvVFsgfXfX4YIqZ/27G3k3tdXrTxQ== dependencies: - call-bind "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.4" define-properties "^1.2.1" - es-abstract "^1.23.2" + es-abstract "^1.23.9" es-errors "^1.3.0" - es-object-atoms "^1.0.0" - es-shim-unscopables "^1.0.2" + es-object-atoms "^1.1.1" + es-shim-unscopables "^1.1.0" -array.prototype.flat@^1.3.1, array.prototype.flat@^1.3.2: +array.prototype.flat@^1.3.1, array.prototype.flat@^1.3.3: version "1.3.3" - resolved "https://registry.npmmirror.com/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz#534aaf9e6e8dd79fb6b9a9917f839ef1ec63afe5" + resolved "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.3.tgz#534aaf9e6e8dd79fb6b9a9917f839ef1ec63afe5" integrity sha512-rwG/ja1neyLqCuGZ5YYrznA62D4mZXg0i1cIskIUKSiqF3Cje9/wXAls9B9s1Wa2fomMsIv8czB8jZcPmxCXFg== dependencies: call-bind "^1.0.8" @@ -1069,7 +1204,7 @@ array.prototype.flat@^1.3.1, array.prototype.flat@^1.3.2: array.prototype.flatmap@^1.3.2, array.prototype.flatmap@^1.3.3: version "1.3.3" - resolved "https://registry.npmmirror.com/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz#712cc792ae70370ae40586264629e33aab5dd38b" + resolved "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz#712cc792ae70370ae40586264629e33aab5dd38b" integrity sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg== dependencies: call-bind "^1.0.8" @@ -1079,7 +1214,7 @@ array.prototype.flatmap@^1.3.2, array.prototype.flatmap@^1.3.3: array.prototype.tosorted@^1.1.4: version "1.1.4" - resolved "https://registry.npmmirror.com/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz#fe954678ff53034e717ea3352a03f0b0b86f7ffc" + resolved "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz#fe954678ff53034e717ea3352a03f0b0b86f7ffc" integrity sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA== dependencies: call-bind "^1.0.7" @@ -1090,7 +1225,7 @@ array.prototype.tosorted@^1.1.4: arraybuffer.prototype.slice@^1.0.4: version "1.0.4" - resolved "https://registry.npmmirror.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz#9d760d84dbdd06d0cbf92c8849615a1a7ab3183c" + resolved "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz#9d760d84dbdd06d0cbf92c8849615a1a7ab3183c" integrity sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ== dependencies: array-buffer-byte-length "^1.0.1" @@ -1103,7 +1238,7 @@ arraybuffer.prototype.slice@^1.0.4: ast-types-flow@^0.0.8: version "0.0.8" - resolved "https://registry.npmmirror.com/ast-types-flow/-/ast-types-flow-0.0.8.tgz#0a85e1c92695769ac13a428bb653e7538bea27d6" + resolved "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz#0a85e1c92695769ac13a428bb653e7538bea27d6" integrity sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ== ast-types@^0.13.4: @@ -1115,29 +1250,29 @@ ast-types@^0.13.4: astring@^1.8.0: version "1.9.0" - resolved "https://registry.npmmirror.com/astring/-/astring-1.9.0.tgz#cc73e6062a7eb03e7d19c22d8b0b3451fd9bfeef" + resolved "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz#cc73e6062a7eb03e7d19c22d8b0b3451fd9bfeef" integrity sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg== async-function@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/async-function/-/async-function-1.0.0.tgz#509c9fca60eaf85034c6829838188e4e4c8ffb2b" + resolved "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz#509c9fca60eaf85034c6829838188e4e4c8ffb2b" integrity sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA== available-typed-arrays@^1.0.7: version "1.0.7" - resolved "https://registry.npmmirror.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + resolved "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== dependencies: possible-typed-array-names "^1.0.0" axe-core@^4.10.0: - version "4.10.2" - resolved "https://registry.npmmirror.com/axe-core/-/axe-core-4.10.2.tgz#85228e3e1d8b8532a27659b332e39b7fa0e022df" - integrity sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w== + version "4.10.3" + resolved "https://registry.npmjs.org/axe-core/-/axe-core-4.10.3.tgz#04145965ac7894faddbac30861e5d8f11bfd14fc" + integrity sha512-Xm7bpRXnDSX2YE2YFfBk2FnF0ep6tmG7xPh8iHee8MIcrgq762Nkce856dYtJYLkuIoYZvGfTs/PbZhideTcEg== axobject-query@^4.1.0: version "4.1.0" - resolved "https://registry.npmmirror.com/axobject-query/-/axobject-query-4.1.0.tgz#28768c76d0e3cff21bc62a9e2d0b6ac30042a1ee" + resolved "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz#28768c76d0e3cff21bc62a9e2d0b6ac30042a1ee" integrity sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ== b4a@^1.6.4: @@ -1147,7 +1282,7 @@ b4a@^1.6.4: babel-plugin-macros@^3.1.0: version "3.1.0" - resolved "https://registry.npmmirror.com/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" + resolved "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" integrity sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg== dependencies: "@babel/runtime" "^7.12.5" @@ -1156,23 +1291,23 @@ babel-plugin-macros@^3.1.0: bail@^2.0.0: version "2.0.2" - resolved "https://registry.npmmirror.com/bail/-/bail-2.0.2.tgz#d26f5cd8fe5d6f832a31517b9f7c356040ba6d5d" + resolved "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz#d26f5cd8fe5d6f832a31517b9f7c356040ba6d5d" integrity sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw== balanced-match@^1.0.0: version "1.0.2" - resolved "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== bare-events@^2.2.0, bare-events@^2.5.4: - version "2.5.4" - resolved "https://registry.npmjs.org/bare-events/-/bare-events-2.5.4.tgz#16143d435e1ed9eafd1ab85f12b89b3357a41745" - integrity sha512-+gFfDkR8pj4/TrWCGUGWmJIkBwuxPS5F+a5yWjOHQt2hHvNZd5YLzadjmDUtFmMM4y429bnKLa8bYBMHcYdnQA== + version "2.6.0" + resolved "https://registry.npmjs.org/bare-events/-/bare-events-2.6.0.tgz#11d9506da109e363a2f3af050fbb005ccdb3ee8f" + integrity sha512-EKZ5BTXYExaNqi3I3f9RtEsaI/xBSGjE0XZCZilPzFAV/goswFHuPd9jEZlPIZ/iNZJwDSao9qRiScySz7MbQg== bare-fs@^4.0.1: - version "4.1.5" - resolved "https://registry.npmjs.org/bare-fs/-/bare-fs-4.1.5.tgz#1d06c076e68cc8bf97010d29af9e3ac3808cdcf7" - integrity sha512-1zccWBMypln0jEE05LzZt+V/8y8AQsQQqxtklqaIyg5nu6OAYFhZxPXinJTSG+kU5qyNmeLgcn9AW7eHiCHVLA== + version "4.1.6" + resolved "https://registry.npmjs.org/bare-fs/-/bare-fs-4.1.6.tgz#0925521e7310f65cb1f154cab264f0b647a7cdef" + integrity sha512-25RsLF33BqooOEFNdMcEhMpJy8EoR88zSMrnOQOaM3USnOK2VmaJ1uaQEwPA6AQjrv1lXChScosN6CzbwbO9OQ== dependencies: bare-events "^2.5.4" bare-path "^3.0.0" @@ -1204,27 +1339,27 @@ basic-ftp@^5.0.2: boolbase@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + resolved "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" integrity sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww== brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + version "1.1.12" + resolved "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz#ab9b454466e5a8cc3a187beaad580412a9c5b843" + integrity sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== dependencies: balanced-match "^1.0.0" concat-map "0.0.1" brace-expansion@^2.0.1: - version "2.0.1" - resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" - integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + version "2.0.2" + resolved "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz#54fc53237a613d854c7bd37463aad17df87214e7" + integrity sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ== dependencies: balanced-match "^1.0.0" braces@^3.0.3: version "3.0.3" - resolved "https://registry.npmmirror.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + resolved "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: fill-range "^7.1.1" @@ -1236,14 +1371,14 @@ buffer-crc32@~0.2.3: busboy@1.6.0: version "1.6.0" - resolved "https://registry.npmmirror.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" + resolved "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" integrity sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA== dependencies: streamsearch "^1.1.0" -call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1: +call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + resolved "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== dependencies: es-errors "^1.3.0" @@ -1251,7 +1386,7 @@ call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1: call-bind@^1.0.7, call-bind@^1.0.8: version "1.0.8" - resolved "https://registry.npmmirror.com/call-bind/-/call-bind-1.0.8.tgz#0736a9660f537e3388826f440d5ec45f744eaa4c" + resolved "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz#0736a9660f537e3388826f440d5ec45f744eaa4c" integrity sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww== dependencies: call-bind-apply-helpers "^1.0.0" @@ -1259,32 +1394,32 @@ call-bind@^1.0.7, call-bind@^1.0.8: get-intrinsic "^1.2.4" set-function-length "^1.2.2" -call-bound@^1.0.2, call-bound@^1.0.3: - version "1.0.3" - resolved "https://registry.npmmirror.com/call-bound/-/call-bound-1.0.3.tgz#41cfd032b593e39176a71533ab4f384aa04fd681" - integrity sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA== +call-bound@^1.0.2, call-bound@^1.0.3, call-bound@^1.0.4: + version "1.0.4" + resolved "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz#238de935d2a2a692928c538c7ccfa91067fd062a" + integrity sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg== dependencies: - call-bind-apply-helpers "^1.0.1" - get-intrinsic "^1.2.6" + call-bind-apply-helpers "^1.0.2" + get-intrinsic "^1.3.0" callsites@^3.0.0: version "3.1.0" - resolved "https://registry.npmmirror.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + resolved "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== caniuse-lite@^1.0.30001579: - version "1.0.30001700" - resolved "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001700.tgz#26cd429cf09b4fd4e745daf4916039c794d720f6" - integrity sha512-2S6XIXwaE7K7erT8dY+kLQcpa5ms63XlRkMkReXjle+kf6c5g38vyMl+Z5y8dSxOFDhcFe+nxnn261PLxBSQsQ== + version "1.0.30001727" + resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001727.tgz#22e9706422ad37aa50556af8c10e40e2d93a8b85" + integrity sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q== ccount@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/ccount/-/ccount-2.0.1.tgz#17a3bf82302e0870d6da43a01311a8bc02a3ecf5" + resolved "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz#17a3bf82302e0870d6da43a01311a8bc02a3ecf5" integrity sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg== chalk@^4.0.0: version "4.1.2" - resolved "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + resolved "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== dependencies: ansi-styles "^4.1.0" @@ -1292,27 +1427,27 @@ chalk@^4.0.0: character-entities-html4@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/character-entities-html4/-/character-entities-html4-2.1.0.tgz#1f1adb940c971a4b22ba39ddca6b618dc6e56b2b" + resolved "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz#1f1adb940c971a4b22ba39ddca6b618dc6e56b2b" integrity sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA== character-entities-legacy@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz#76bc83a90738901d7bc223a9e93759fdd560125b" + resolved "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz#76bc83a90738901d7bc223a9e93759fdd560125b" integrity sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ== character-entities@^2.0.0: version "2.0.2" - resolved "https://registry.npmmirror.com/character-entities/-/character-entities-2.0.2.tgz#2d09c2e72cd9523076ccb21157dff66ad43fcc22" + resolved "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz#2d09c2e72cd9523076ccb21157dff66ad43fcc22" integrity sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ== character-reference-invalid@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz#85c66b041e43b47210faf401278abf808ac45cb9" + resolved "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz#85c66b041e43b47210faf401278abf808ac45cb9" integrity sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw== cheerio-select@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/cheerio-select/-/cheerio-select-2.1.0.tgz#4d8673286b8126ca2a8e42740d5e3c4884ae21b4" + resolved "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz#4d8673286b8126ca2a8e42740d5e3c4884ae21b4" integrity sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g== dependencies: boolbase "^1.0.0" @@ -1323,20 +1458,20 @@ cheerio-select@^2.1.0: domutils "^3.0.1" cheerio@^1.0.0: - version "1.0.0" - resolved "https://registry.npmmirror.com/cheerio/-/cheerio-1.0.0.tgz#1ede4895a82f26e8af71009f961a9b8cb60d6a81" - integrity sha512-quS9HgjQpdaXOvsZz82Oz7uxtXiy6UIsIQcpBj7HRw2M63Skasm9qlDocAM7jNuaxdhpPU7c4kJN+gA5MCu4ww== + version "1.1.0" + resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.1.0.tgz#87b9bec6dd3696e405ea79da7d2749d8308b0953" + integrity sha512-+0hMx9eYhJvWbgpKV9hN7jg0JcwydpopZE4hgi+KvQtByZXPp04NiCWU0LzcAbP63abZckIHkTQaXVF52mX3xQ== dependencies: cheerio-select "^2.1.0" dom-serializer "^2.0.0" domhandler "^5.0.3" - domutils "^3.1.0" + domutils "^3.2.2" encoding-sniffer "^0.2.0" - htmlparser2 "^9.1.0" - parse5 "^7.1.2" - parse5-htmlparser2-tree-adapter "^7.0.0" + htmlparser2 "^10.0.0" + parse5 "^7.3.0" + parse5-htmlparser2-tree-adapter "^7.1.0" parse5-parser-stream "^7.1.2" - undici "^6.19.5" + undici "^7.10.0" whatwg-mimetype "^4.0.0" chromium-bidi@5.1.0: @@ -1349,12 +1484,12 @@ chromium-bidi@5.1.0: classnames@^2.0.0, classnames@^2.2.5: version "2.5.1" - resolved "https://registry.npmmirror.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" + resolved "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow== client-only@0.0.1, client-only@^0.0.1: version "0.0.1" - resolved "https://registry.npmmirror.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" + resolved "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== cliui@^8.0.1: @@ -1368,34 +1503,34 @@ cliui@^8.0.1: clsx@^1.1.0: version "1.2.1" - resolved "https://registry.npmmirror.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" + resolved "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== clsx@^2.1.1: version "2.1.1" - resolved "https://registry.npmmirror.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" + resolved "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA== collapse-white-space@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/collapse-white-space/-/collapse-white-space-2.1.0.tgz#640257174f9f42c740b40f3b55ee752924feefca" + resolved "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz#640257174f9f42c740b40f3b55ee752924feefca" integrity sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw== color-convert@^2.0.1: version "2.0.1" - resolved "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + resolved "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== dependencies: color-name "~1.1.4" color-name@^1.0.0, color-name@~1.1.4: version "1.1.4" - resolved "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== color-string@^1.9.0: version "1.9.1" - resolved "https://registry.npmmirror.com/color-string/-/color-string-1.9.1.tgz#4467f9146f036f855b764dfb5bf8582bf342c7a4" + resolved "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz#4467f9146f036f855b764dfb5bf8582bf342c7a4" integrity sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg== dependencies: color-name "^1.0.0" @@ -1403,7 +1538,7 @@ color-string@^1.9.0: color@^4.2.3: version "4.2.3" - resolved "https://registry.npmmirror.com/color/-/color-4.2.3.tgz#d781ecb5e57224ee43ea9627560107c0e0c6463a" + resolved "https://registry.npmjs.org/color/-/color-4.2.3.tgz#d781ecb5e57224ee43ea9627560107c0e0c6463a" integrity sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A== dependencies: color-convert "^2.0.1" @@ -1411,29 +1546,29 @@ color@^4.2.3: comma-separated-tokens@^2.0.0: version "2.0.3" - resolved "https://registry.npmmirror.com/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz#4e89c9458acb61bc8fef19f4529973b2392839ee" + resolved "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz#4e89c9458acb61bc8fef19f4529973b2392839ee" integrity sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg== concat-map@0.0.1: version "0.0.1" - resolved "https://registry.npmmirror.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== convert-source-map@^1.5.0: version "1.9.0" - resolved "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" + resolved "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A== copy-to-clipboard@^3.3.1: version "3.3.3" - resolved "https://registry.npmmirror.com/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz#55ac43a1db8ae639a4bd99511c148cdd1b83a1b0" + resolved "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz#55ac43a1db8ae639a4bd99511c148cdd1b83a1b0" integrity sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA== dependencies: toggle-selection "^1.0.6" cosmiconfig@^7.0.0: version "7.1.0" - resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" + resolved "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" integrity sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA== dependencies: "@types/parse-json" "^4.0.0" @@ -1454,7 +1589,7 @@ cosmiconfig@^9.0.0: cross-spawn@^7.0.6: version "7.0.6" - resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" @@ -1462,9 +1597,9 @@ cross-spawn@^7.0.6: which "^2.0.1" css-select@^5.1.0: - version "5.1.0" - resolved "https://registry.npmmirror.com/css-select/-/css-select-5.1.0.tgz#b8ebd6554c3637ccc76688804ad3f6a6fdaea8a6" - integrity sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg== + version "5.2.2" + resolved "https://registry.npmjs.org/css-select/-/css-select-5.2.2.tgz#01b6e8d163637bb2dd6c982ca4ed65863682786e" + integrity sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw== dependencies: boolbase "^1.0.0" css-what "^6.1.0" @@ -1473,18 +1608,18 @@ css-select@^5.1.0: nth-check "^2.0.1" css-what@^6.1.0: - version "6.1.0" - resolved "https://registry.npmmirror.com/css-what/-/css-what-6.1.0.tgz#fb5effcf76f1ddea2c81bdfaa4de44e79bac70f4" - integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw== + version "6.2.2" + resolved "https://registry.npmjs.org/css-what/-/css-what-6.2.2.tgz#cdcc8f9b6977719fdfbd1de7aec24abf756b9dea" + integrity sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA== csstype@^3.0.2, csstype@^3.1.3: version "3.1.3" - resolved "https://registry.npmmirror.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + resolved "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== damerau-levenshtein@^1.0.8: version "1.0.8" - resolved "https://registry.npmmirror.com/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz#b43d286ccbd36bc5b2f7ed41caf2d0aba1f8a6e7" + resolved "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz#b43d286ccbd36bc5b2f7ed41caf2d0aba1f8a6e7" integrity sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA== data-uri-to-buffer@^6.0.2: @@ -1494,7 +1629,7 @@ data-uri-to-buffer@^6.0.2: data-view-buffer@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/data-view-buffer/-/data-view-buffer-1.0.2.tgz#211a03ba95ecaf7798a8c7198d79536211f88570" + resolved "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz#211a03ba95ecaf7798a8c7198d79536211f88570" integrity sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ== dependencies: call-bound "^1.0.3" @@ -1503,7 +1638,7 @@ data-view-buffer@^1.0.2: data-view-byte-length@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz#9e80f7ca52453ce3e93d25a35318767ea7704735" + resolved "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz#9e80f7ca52453ce3e93d25a35318767ea7704735" integrity sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ== dependencies: call-bound "^1.0.3" @@ -1512,14 +1647,14 @@ data-view-byte-length@^1.0.2: data-view-byte-offset@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz#068307f9b71ab76dbbe10291389e020856606191" + resolved "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz#068307f9b71ab76dbbe10291389e020856606191" integrity sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ== dependencies: call-bound "^1.0.2" es-errors "^1.3.0" is-data-view "^1.0.1" -debug@4, debug@^4.1.1, debug@^4.4.1: +debug@4, debug@^4.0.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@^4.4.0, debug@^4.4.1: version "4.4.1" resolved "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz#e5a8bc6cbc4c6cd3e64308b0693a3d4fa550189b" integrity sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ== @@ -1528,33 +1663,26 @@ debug@4, debug@^4.1.1, debug@^4.4.1: debug@^3.2.7: version "3.2.7" - resolved "https://registry.npmmirror.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" + resolved "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== dependencies: ms "^2.1.1" -debug@^4.0.0, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@^4.3.7: - version "4.4.0" - resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" - integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== - dependencies: - ms "^2.1.3" - decode-named-character-reference@^1.0.0: - version "1.0.2" - resolved "https://registry.npmmirror.com/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz#daabac9690874c394c81e4162a0304b35d824f0e" - integrity sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg== + version "1.2.0" + resolved "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz#25c32ae6dd5e21889549d40f676030e9514cc0ed" + integrity sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q== dependencies: character-entities "^2.0.0" deep-is@^0.1.3: version "0.1.4" - resolved "https://registry.npmmirror.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" + resolved "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== define-data-property@^1.0.1, define-data-property@^1.1.4: version "1.1.4" - resolved "https://registry.npmmirror.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + resolved "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== dependencies: es-define-property "^1.0.0" @@ -1563,7 +1691,7 @@ define-data-property@^1.0.1, define-data-property@^1.1.4: define-properties@^1.1.3, define-properties@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" + resolved "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== dependencies: define-data-property "^1.0.1" @@ -1581,36 +1709,36 @@ degenerator@^5.0.0: dequal@^2.0.0: version "2.0.3" - resolved "https://registry.npmmirror.com/dequal/-/dequal-2.0.3.tgz#2644214f1997d39ed0ee0ece72335490a7ac67be" + resolved "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz#2644214f1997d39ed0ee0ece72335490a7ac67be" integrity sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA== detect-libc@^2.0.3: - version "2.0.3" - resolved "https://registry.npmmirror.com/detect-libc/-/detect-libc-2.0.3.tgz#f0cd503b40f9939b894697d19ad50895e30cf700" - integrity sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw== + version "2.0.4" + resolved "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz#f04715b8ba815e53b4d8109655b6508a6865a7e8" + integrity sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA== devlop@^1.0.0, devlop@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/devlop/-/devlop-1.1.0.tgz#4db7c2ca4dc6e0e834c30be70c94bbc976dc7018" + resolved "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz#4db7c2ca4dc6e0e834c30be70c94bbc976dc7018" integrity sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA== dependencies: dequal "^2.0.0" -devtools-protocol@0.0.1452169: - version "0.0.1452169" - resolved "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1452169.tgz#25c56a1e9ed0af99b03a00d605a22eae367d85db" - integrity sha512-FOFDVMGrAUNp0dDKsAU1TorWJUx2JOU1k9xdgBKKJF3IBh/Uhl2yswG5r3TEAOrCiGY2QRp1e6LVDQrCsTKO4g== +devtools-protocol@0.0.1464554: + version "0.0.1464554" + resolved "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1464554.tgz#54d88398c99ad93fc81c35569fba9473b6e223d0" + integrity sha512-CAoP3lYfwAGQTaAXYvA6JZR0fjGUb7qec1qf4mToyoH2TZgUFeIqYcjh6f9jNuhHfuZiEdH+PONHYrLhRQX6aw== doctrine@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + resolved "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== dependencies: esutils "^2.0.2" dom-helpers@^5.0.1: version "5.2.1" - resolved "https://registry.npmmirror.com/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" + resolved "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" integrity sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA== dependencies: "@babel/runtime" "^7.8.7" @@ -1618,7 +1746,7 @@ dom-helpers@^5.0.1: dom-serializer@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" + resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg== dependencies: domelementtype "^2.3.0" @@ -1627,19 +1755,19 @@ dom-serializer@^2.0.0: domelementtype@^2.3.0: version "2.3.0" - resolved "https://registry.npmmirror.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" + resolved "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== domhandler@^5.0.2, domhandler@^5.0.3: version "5.0.3" - resolved "https://registry.npmmirror.com/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" + resolved "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== dependencies: domelementtype "^2.3.0" -domutils@^3.0.1, domutils@^3.1.0: +domutils@^3.0.1, domutils@^3.2.1, domutils@^3.2.2: version "3.2.2" - resolved "https://registry.npmmirror.com/domutils/-/domutils-3.2.2.tgz#edbfe2b668b0c1d97c24baf0f1062b132221bc78" + resolved "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz#edbfe2b668b0c1d97c24baf0f1062b132221bc78" integrity sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw== dependencies: dom-serializer "^2.0.0" @@ -1648,7 +1776,7 @@ domutils@^3.0.1, domutils@^3.1.0: dunder-proto@^1.0.0, dunder-proto@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + resolved "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== dependencies: call-bind-apply-helpers "^1.0.1" @@ -1662,42 +1790,39 @@ emoji-regex@^8.0.0: emoji-regex@^9.2.2: version "9.2.2" - resolved "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + resolved "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== encoding-sniffer@^0.2.0: - version "0.2.0" - resolved "https://registry.npmmirror.com/encoding-sniffer/-/encoding-sniffer-0.2.0.tgz#799569d66d443babe82af18c9f403498365ef1d5" - integrity sha512-ju7Wq1kg04I3HtiYIOrUrdfdDvkyO9s5XM8QAj/bN61Yo/Vb4vgJxy5vi4Yxk01gWHbrofpPtpxM8bKger9jhg== + version "0.2.1" + resolved "https://registry.npmjs.org/encoding-sniffer/-/encoding-sniffer-0.2.1.tgz#396ec97ac22ce5a037ba44af1992ac9d46a7b819" + integrity sha512-5gvq20T6vfpekVtqrYQsSCFZ1wEg5+wW0/QaZMWkFr6BqD3NfKs0rLCx4rrVlSWJeZb5NBJgVLswK/w2MWU+Gw== dependencies: iconv-lite "^0.6.3" whatwg-encoding "^3.1.1" end-of-stream@^1.1.0: - version "1.4.4" - resolved "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + version "1.4.5" + resolved "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz#7344d711dea40e0b74abc2ed49778743ccedb08c" + integrity sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg== dependencies: once "^1.4.0" -enhanced-resolve@^5.15.0: - version "5.18.1" - resolved "https://registry.npmmirror.com/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz#728ab082f8b7b6836de51f1637aab5d3b9568faf" - integrity sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg== - dependencies: - graceful-fs "^4.2.4" - tapable "^2.2.0" - enquire.js@^2.1.6: version "2.1.6" - resolved "https://registry.npmmirror.com/enquire.js/-/enquire.js-2.1.6.tgz#3e8780c9b8b835084c3f60e166dbc3c2a3c89814" + resolved "https://registry.npmjs.org/enquire.js/-/enquire.js-2.1.6.tgz#3e8780c9b8b835084c3f60e166dbc3c2a3c89814" integrity sha512-/KujNpO+PT63F7Hlpu4h3pE3TokKRHN26JYmQpPyjkRD/N57R7bPDNojMXdi7uveAKjYB7yQnartCxZnFWr0Xw== -entities@^4.2.0, entities@^4.5.0: +entities@^4.2.0: version "4.5.0" - resolved "https://registry.npmmirror.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + resolved "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== +entities@^6.0.0: + version "6.0.1" + resolved "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz#c28c34a43379ca7f61d074130b2f5f7020a30694" + integrity sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g== + env-paths@^2.2.1: version "2.2.1" resolved "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz#420399d416ce1fbe9bc0a07c62fa68d67fd0f8f2" @@ -1705,32 +1830,32 @@ env-paths@^2.2.1: error-ex@^1.3.1: version "1.3.2" - resolved "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + resolved "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== dependencies: is-arrayish "^0.2.1" -es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23.5, es-abstract@^1.23.6, es-abstract@^1.23.9: - version "1.23.9" - resolved "https://registry.npmmirror.com/es-abstract/-/es-abstract-1.23.9.tgz#5b45994b7de78dada5c1bebf1379646b32b9d606" - integrity sha512-py07lI0wjxAC/DcfK1S6G7iANonniZwTISvdPzk9hzeH0IZIshbuuFxLIU96OyF89Yb9hiqWn8M/bY83KY5vzA== +es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23.5, es-abstract@^1.23.6, es-abstract@^1.23.9, es-abstract@^1.24.0: + version "1.24.0" + resolved "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.0.tgz#c44732d2beb0acc1ed60df840869e3106e7af328" + integrity sha512-WSzPgsdLtTcQwm4CROfS5ju2Wa1QQcVeT37jFjYzdFz1r9ahadC8B8/a4qxJxM+09F18iumCdRmlr96ZYkQvEg== dependencies: array-buffer-byte-length "^1.0.2" arraybuffer.prototype.slice "^1.0.4" available-typed-arrays "^1.0.7" call-bind "^1.0.8" - call-bound "^1.0.3" + call-bound "^1.0.4" data-view-buffer "^1.0.2" data-view-byte-length "^1.0.2" data-view-byte-offset "^1.0.1" es-define-property "^1.0.1" es-errors "^1.3.0" - es-object-atoms "^1.0.0" + es-object-atoms "^1.1.1" es-set-tostringtag "^2.1.0" es-to-primitive "^1.3.0" function.prototype.name "^1.1.8" - get-intrinsic "^1.2.7" - get-proto "^1.0.0" + get-intrinsic "^1.3.0" + get-proto "^1.0.1" get-symbol-description "^1.1.0" globalthis "^1.0.4" gopd "^1.2.0" @@ -1742,21 +1867,24 @@ es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23 is-array-buffer "^3.0.5" is-callable "^1.2.7" is-data-view "^1.0.2" + is-negative-zero "^2.0.3" is-regex "^1.2.1" + is-set "^2.0.3" is-shared-array-buffer "^1.0.4" is-string "^1.1.1" is-typed-array "^1.1.15" - is-weakref "^1.1.0" + is-weakref "^1.1.1" math-intrinsics "^1.1.0" - object-inspect "^1.13.3" + object-inspect "^1.13.4" object-keys "^1.1.1" object.assign "^4.1.7" own-keys "^1.0.1" - regexp.prototype.flags "^1.5.3" + regexp.prototype.flags "^1.5.4" safe-array-concat "^1.1.3" safe-push-apply "^1.0.0" safe-regex-test "^1.1.0" set-proto "^1.0.0" + stop-iteration-iterator "^1.1.0" string.prototype.trim "^1.2.10" string.prototype.trimend "^1.0.9" string.prototype.trimstart "^1.0.8" @@ -1765,21 +1893,21 @@ es-abstract@^1.17.5, es-abstract@^1.23.2, es-abstract@^1.23.3, es-abstract@^1.23 typed-array-byte-offset "^1.0.4" typed-array-length "^1.0.7" unbox-primitive "^1.1.0" - which-typed-array "^1.1.18" + which-typed-array "^1.1.19" es-define-property@^1.0.0, es-define-property@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + resolved "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== es-errors@^1.3.0: version "1.3.0" - resolved "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + resolved "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== es-iterator-helpers@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz#d1dd0f58129054c0ad922e6a9a1e65eef435fe75" + resolved "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz#d1dd0f58129054c0ad922e6a9a1e65eef435fe75" integrity sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w== dependencies: call-bind "^1.0.8" @@ -1799,16 +1927,16 @@ es-iterator-helpers@^1.2.1: iterator.prototype "^1.1.4" safe-array-concat "^1.1.3" -es-object-atoms@^1.0.0: +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + resolved "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== dependencies: es-errors "^1.3.0" es-set-tostringtag@^2.0.3, es-set-tostringtag@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + resolved "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== dependencies: es-errors "^1.3.0" @@ -1816,16 +1944,16 @@ es-set-tostringtag@^2.0.3, es-set-tostringtag@^2.1.0: has-tostringtag "^1.0.2" hasown "^2.0.2" -es-shim-unscopables@^1.0.2: +es-shim-unscopables@^1.0.2, es-shim-unscopables@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz#438df35520dac5d105f3943d927549ea3b00f4b5" + resolved "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.1.0.tgz#438df35520dac5d105f3943d927549ea3b00f4b5" integrity sha512-d9T8ucsEhh8Bi1woXCf+TIKDIROLG5WCkxg8geBCbvk22kzwC5G2OnXVMO6FUsvQlgUUXQ2itephWDLqDzbeCw== dependencies: hasown "^2.0.2" es-to-primitive@^1.3.0: version "1.3.0" - resolved "https://registry.npmmirror.com/es-to-primitive/-/es-to-primitive-1.3.0.tgz#96c89c82cc49fd8794a24835ba3e1ff87f214e18" + resolved "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz#96c89c82cc49fd8794a24835ba3e1ff87f214e18" integrity sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g== dependencies: is-callable "^1.2.7" @@ -1834,7 +1962,7 @@ es-to-primitive@^1.3.0: esast-util-from-estree@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz#8d1cfb51ad534d2f159dc250e604f3478a79f1ad" + resolved "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz#8d1cfb51ad534d2f159dc250e604f3478a79f1ad" integrity sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ== dependencies: "@types/estree-jsx" "^1.0.0" @@ -1844,7 +1972,7 @@ esast-util-from-estree@^2.0.0: esast-util-from-js@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz#5147bec34cc9da44accf52f87f239a40ac3e8225" + resolved "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz#5147bec34cc9da44accf52f87f239a40ac3e8225" integrity sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw== dependencies: "@types/estree-jsx" "^1.0.0" @@ -1859,12 +1987,12 @@ escalade@^3.1.1: escape-string-regexp@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== escape-string-regexp@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz#4683126b500b61762f2dbebace1806e8be31b1c8" + resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz#4683126b500b61762f2dbebace1806e8be31b1c8" integrity sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw== escodegen@^2.1.0: @@ -1878,12 +2006,12 @@ escodegen@^2.1.0: optionalDependencies: source-map "~0.6.1" -eslint-config-next@15.1.7: - version "15.1.7" - resolved "https://registry.npmmirror.com/eslint-config-next/-/eslint-config-next-15.1.7.tgz#80d287e6d1c0742304de8fe57cbc45000c455c11" - integrity sha512-zXoMnYUIy3XHaAoOhrcYkT9UQWvXqWju2K7NNsmb5wd/7XESDwof61eUdW4QhERr3eJ9Ko/vnXqIrj8kk/drYw== +eslint-config-next@15.2.4: + version "15.2.4" + resolved "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-15.2.4.tgz#549084ab268c3ced41ecdbded1892e4c8ecc442e" + integrity sha512-v4gYjd4eYIme8qzaJItpR5MMBXJ0/YV07u7eb50kEnlEmX7yhOjdUdzz70v4fiINYRjLf8X8TbogF0k7wlz6sA== dependencies: - "@next/eslint-plugin-next" "15.1.7" + "@next/eslint-plugin-next" "15.2.4" "@rushstack/eslint-patch" "^1.10.3" "@typescript-eslint/eslint-plugin" "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0" "@typescript-eslint/parser" "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0" @@ -1896,7 +2024,7 @@ eslint-config-next@15.1.7: eslint-import-resolver-node@^0.3.6, eslint-import-resolver-node@^0.3.9: version "0.3.9" - resolved "https://registry.npmmirror.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz#d4eaac52b8a2e7c3cd1903eb00f7e053356118ac" + resolved "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz#d4eaac52b8a2e7c3cd1903eb00f7e053356118ac" integrity sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g== dependencies: debug "^3.2.7" @@ -1904,53 +2032,53 @@ eslint-import-resolver-node@^0.3.6, eslint-import-resolver-node@^0.3.9: resolve "^1.22.4" eslint-import-resolver-typescript@^3.5.2: - version "3.8.1" - resolved "https://registry.npmmirror.com/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.8.1.tgz#43aab1e51080eb157d4a35fb3db9d737ea79e2f6" - integrity sha512-qw5TPA12HTmb9CkcuiNrFtwhM1ae2FWysLeRrTbQ+/JKS///gbL3fQ5LRhAZnzkcqkScOvkB5Y5o+xgyQz1VVg== + version "3.10.1" + resolved "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.10.1.tgz#23dac32efa86a88e2b8232eb244ac499ad636db2" + integrity sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ== dependencies: "@nolyfill/is-core-module" "1.0.39" - debug "^4.3.7" - enhanced-resolve "^5.15.0" + debug "^4.4.0" get-tsconfig "^4.10.0" - is-bun-module "^1.0.2" - stable-hash "^0.0.4" - tinyglobby "^0.2.10" + is-bun-module "^2.0.0" + stable-hash "^0.0.5" + tinyglobby "^0.2.13" + unrs-resolver "^1.6.2" -eslint-module-utils@^2.12.0: - version "2.12.0" - resolved "https://registry.npmmirror.com/eslint-module-utils/-/eslint-module-utils-2.12.0.tgz#fe4cfb948d61f49203d7b08871982b65b9af0b0b" - integrity sha512-wALZ0HFoytlyh/1+4wuZ9FJCD/leWHQzzrxJ8+rebyReSLk7LApMyd3WJaLVoN+D5+WIdJyDK1c6JnE65V4Zyg== +eslint-module-utils@^2.12.1: + version "2.12.1" + resolved "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.1.tgz#f76d3220bfb83c057651359295ab5854eaad75ff" + integrity sha512-L8jSWTze7K2mTg0vos/RuLRS5soomksDPoJLXIslC7c8Wmut3bx7CPpJijDcBZtxQ5lrbUdM+s0OlNbz0DCDNw== dependencies: debug "^3.2.7" eslint-plugin-import@^2.31.0: - version "2.31.0" - resolved "https://registry.npmmirror.com/eslint-plugin-import/-/eslint-plugin-import-2.31.0.tgz#310ce7e720ca1d9c0bb3f69adfd1c6bdd7d9e0e7" - integrity sha512-ixmkI62Rbc2/w8Vfxyh1jQRTdRTF52VxwRVHl/ykPAmqG+Nb7/kNn+byLP0LxPgI7zWA16Jt82SybJInmMia3A== + version "2.32.0" + resolved "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.32.0.tgz#602b55faa6e4caeaa5e970c198b5c00a37708980" + integrity sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA== dependencies: "@rtsao/scc" "^1.1.0" - array-includes "^3.1.8" - array.prototype.findlastindex "^1.2.5" - array.prototype.flat "^1.3.2" - array.prototype.flatmap "^1.3.2" + array-includes "^3.1.9" + array.prototype.findlastindex "^1.2.6" + array.prototype.flat "^1.3.3" + array.prototype.flatmap "^1.3.3" debug "^3.2.7" doctrine "^2.1.0" eslint-import-resolver-node "^0.3.9" - eslint-module-utils "^2.12.0" + eslint-module-utils "^2.12.1" hasown "^2.0.2" - is-core-module "^2.15.1" + is-core-module "^2.16.1" is-glob "^4.0.3" minimatch "^3.1.2" object.fromentries "^2.0.8" object.groupby "^1.0.3" - object.values "^1.2.0" + object.values "^1.2.1" semver "^6.3.1" - string.prototype.trimend "^1.0.8" + string.prototype.trimend "^1.0.9" tsconfig-paths "^3.15.0" eslint-plugin-jsx-a11y@^6.10.0: version "6.10.2" - resolved "https://registry.npmmirror.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz#d2812bb23bf1ab4665f1718ea442e8372e638483" + resolved "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz#d2812bb23bf1ab4665f1718ea442e8372e638483" integrity sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q== dependencies: aria-query "^5.3.2" @@ -1970,14 +2098,14 @@ eslint-plugin-jsx-a11y@^6.10.0: string.prototype.includes "^2.0.1" eslint-plugin-react-hooks@^5.0.0: - version "5.1.0" - resolved "https://registry.npmmirror.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0.tgz#3d34e37d5770866c34b87d5b499f5f0b53bf0854" - integrity sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw== + version "5.2.0" + resolved "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz#1be0080901e6ac31ce7971beed3d3ec0a423d9e3" + integrity sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg== eslint-plugin-react@^7.37.0: - version "7.37.4" - resolved "https://registry.npmmirror.com/eslint-plugin-react/-/eslint-plugin-react-7.37.4.tgz#1b6c80b6175b6ae4b26055ae4d55d04c414c7181" - integrity sha512-BGP0jRmfYyvOyvMoRX/uoUeW+GqNj9y16bPQzqAHf3AYII/tDs+jMN0dBVkl88/OZwNGwrVFxE7riHsXVfy/LQ== + version "7.37.5" + resolved "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.5.tgz#2975511472bdda1b272b34d779335c9b0e877065" + integrity sha512-Qteup0SqU15kdocexFNAJMvCJEfa2xUKNV4CC1xsVMrIIqEy3SQ/rqyxCWNzfrd3/ldy6HMlD2e0JDVpDg2qIA== dependencies: array-includes "^3.1.8" array.prototype.findlast "^1.2.5" @@ -1989,7 +2117,7 @@ eslint-plugin-react@^7.37.0: hasown "^2.0.2" jsx-ast-utils "^2.4.1 || ^3.0.0" minimatch "^3.1.2" - object.entries "^1.1.8" + object.entries "^1.1.9" object.fromentries "^2.0.8" object.values "^1.2.1" prop-types "^15.8.1" @@ -1998,39 +2126,40 @@ eslint-plugin-react@^7.37.0: string.prototype.matchall "^4.0.12" string.prototype.repeat "^1.0.0" -eslint-scope@^8.2.0: - version "8.2.0" - resolved "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-8.2.0.tgz#377aa6f1cb5dc7592cfd0b7f892fd0cf352ce442" - integrity sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A== +eslint-scope@^8.4.0: + version "8.4.0" + resolved "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz#88e646a207fad61436ffa39eb505147200655c82" + integrity sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg== dependencies: esrecurse "^4.3.0" estraverse "^5.2.0" eslint-visitor-keys@^3.4.3: version "3.4.3" - resolved "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" + resolved "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== -eslint-visitor-keys@^4.2.0: - version "4.2.0" - resolved "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz#687bacb2af884fcdda8a6e7d65c606f46a14cd45" - integrity sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw== +eslint-visitor-keys@^4.2.1: + version "4.2.1" + resolved "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz#4cfea60fe7dd0ad8e816e1ed026c1d5251b512c1" + integrity sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== eslint@^9: - version "9.20.1" - resolved "https://registry.npmmirror.com/eslint/-/eslint-9.20.1.tgz#923924c078f5226832449bac86662dd7e53c91d6" - integrity sha512-m1mM33o6dBUjxl2qb6wv6nGNwCAsns1eKtaQ4l/NPHeTvhiUPbtdfMyktxN4B3fgHIgsYh1VT3V9txblpQHq+g== + version "9.31.0" + resolved "https://registry.npmjs.org/eslint/-/eslint-9.31.0.tgz#9a488e6da75bbe05785cd62e43c5ea99356d21ba" + integrity sha512-QldCVh/ztyKJJZLr4jXNUByx3gR+TDYZCRXEktiZoUR3PGy4qCmSbkxcIle8GEwGpb5JBZazlaJ/CxLidXdEbQ== dependencies: "@eslint-community/eslint-utils" "^4.2.0" "@eslint-community/regexpp" "^4.12.1" - "@eslint/config-array" "^0.19.0" - "@eslint/core" "^0.11.0" - "@eslint/eslintrc" "^3.2.0" - "@eslint/js" "9.20.0" - "@eslint/plugin-kit" "^0.2.5" + "@eslint/config-array" "^0.21.0" + "@eslint/config-helpers" "^0.3.0" + "@eslint/core" "^0.15.0" + "@eslint/eslintrc" "^3.3.1" + "@eslint/js" "9.31.0" + "@eslint/plugin-kit" "^0.3.1" "@humanfs/node" "^0.16.6" "@humanwhocodes/module-importer" "^1.0.1" - "@humanwhocodes/retry" "^0.4.1" + "@humanwhocodes/retry" "^0.4.2" "@types/estree" "^1.0.6" "@types/json-schema" "^7.0.15" ajv "^6.12.4" @@ -2038,9 +2167,9 @@ eslint@^9: cross-spawn "^7.0.6" debug "^4.3.2" escape-string-regexp "^4.0.0" - eslint-scope "^8.2.0" - eslint-visitor-keys "^4.2.0" - espree "^10.3.0" + eslint-scope "^8.4.0" + eslint-visitor-keys "^4.2.1" + espree "^10.4.0" esquery "^1.5.0" esutils "^2.0.2" fast-deep-equal "^3.1.3" @@ -2056,49 +2185,49 @@ eslint@^9: natural-compare "^1.4.0" optionator "^0.9.3" -espree@^10.0.1, espree@^10.3.0: - version "10.3.0" - resolved "https://registry.npmmirror.com/espree/-/espree-10.3.0.tgz#29267cf5b0cb98735b65e64ba07e0ed49d1eed8a" - integrity sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg== +espree@^10.0.1, espree@^10.4.0: + version "10.4.0" + resolved "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz#d54f4949d4629005a1fa168d937c3ff1f7e2a837" + integrity sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ== dependencies: - acorn "^8.14.0" + acorn "^8.15.0" acorn-jsx "^5.3.2" - eslint-visitor-keys "^4.2.0" + eslint-visitor-keys "^4.2.1" esprima@^4.0.0, esprima@^4.0.1: version "4.0.1" - resolved "https://registry.npmmirror.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + resolved "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== esquery@^1.5.0: version "1.6.0" - resolved "https://registry.npmmirror.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" + resolved "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg== dependencies: estraverse "^5.1.0" esrecurse@^4.3.0: version "4.3.0" - resolved "https://registry.npmmirror.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + resolved "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== dependencies: estraverse "^5.2.0" estraverse@^5.1.0, estraverse@^5.2.0, estraverse@^5.3.0: version "5.3.0" - resolved "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" + resolved "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== estree-util-attach-comments@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz#344bde6a64c8a31d15231e5ee9e297566a691c2d" + resolved "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz#344bde6a64c8a31d15231e5ee9e297566a691c2d" integrity sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw== dependencies: "@types/estree" "^1.0.0" estree-util-build-jsx@^3.0.0: version "3.0.1" - resolved "https://registry.npmmirror.com/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz#b6d0bced1dcc4f06f25cf0ceda2b2dcaf98168f1" + resolved "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz#b6d0bced1dcc4f06f25cf0ceda2b2dcaf98168f1" integrity sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ== dependencies: "@types/estree-jsx" "^1.0.0" @@ -2108,12 +2237,12 @@ estree-util-build-jsx@^3.0.0: estree-util-is-identifier-name@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz#0b5ef4c4ff13508b34dcd01ecfa945f61fce5dbd" + resolved "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz#0b5ef4c4ff13508b34dcd01ecfa945f61fce5dbd" integrity sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg== estree-util-scope@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/estree-util-scope/-/estree-util-scope-1.0.0.tgz#9cbdfc77f5cb51e3d9ed4ad9c4adbff22d43e585" + resolved "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz#9cbdfc77f5cb51e3d9ed4ad9c4adbff22d43e585" integrity sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ== dependencies: "@types/estree" "^1.0.0" @@ -2121,7 +2250,7 @@ estree-util-scope@^1.0.0: estree-util-to-js@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz#10a6fb924814e6abb62becf0d2bc4dea51d04f17" + resolved "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz#10a6fb924814e6abb62becf0d2bc4dea51d04f17" integrity sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg== dependencies: "@types/estree-jsx" "^1.0.0" @@ -2129,15 +2258,15 @@ estree-util-to-js@^2.0.0: source-map "^0.7.0" estree-util-value-to-estree@^3.0.0: - version "3.3.2" - resolved "https://registry.npmmirror.com/estree-util-value-to-estree/-/estree-util-value-to-estree-3.3.2.tgz#75bb2263850b6f5ac35edd343929c36b51a69806" - integrity sha512-hYH1aSvQI63Cvq3T3loaem6LW4u72F187zW4FHpTrReJSm6W66vYTFNO1vH/chmcOulp1HlAj1pxn8Ag0oXI5Q== + version "3.4.0" + resolved "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz#827122e40c3a756d3c4cf5d5d296fa06026a1a4f" + integrity sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ== dependencies: "@types/estree" "^1.0.0" estree-util-visit@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/estree-util-visit/-/estree-util-visit-2.0.0.tgz#13a9a9f40ff50ed0c022f831ddf4b58d05446feb" + resolved "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz#13a9a9f40ff50ed0c022f831ddf4b58d05446feb" integrity sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww== dependencies: "@types/estree-jsx" "^1.0.0" @@ -2145,31 +2274,31 @@ estree-util-visit@^2.0.0: estree-walker@^3.0.0: version "3.0.3" - resolved "https://registry.npmmirror.com/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d" + resolved "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d" integrity sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g== dependencies: "@types/estree" "^1.0.0" esutils@^2.0.2: version "2.0.3" - resolved "https://registry.npmmirror.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + resolved "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== eventemitter3@^5.0.0: version "5.0.1" - resolved "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-5.0.1.tgz#53f5ffd0a492ac800721bb42c66b841de96423c4" + resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz#53f5ffd0a492ac800721bb42c66b841de96423c4" integrity sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA== extend-shallow@^2.0.1: version "2.0.1" - resolved "https://registry.npmmirror.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + resolved "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" integrity sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug== dependencies: is-extendable "^0.1.0" extend@^3.0.0: version "3.0.2" - resolved "https://registry.npmmirror.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + resolved "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== extract-zip@^2.0.1: @@ -2185,7 +2314,7 @@ extract-zip@^2.0.1: fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" - resolved "https://registry.npmmirror.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + resolved "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== fast-fifo@^1.2.0, fast-fifo@^1.3.2: @@ -2195,7 +2324,7 @@ fast-fifo@^1.2.0, fast-fifo@^1.3.2: fast-glob@3.3.1: version "3.3.1" - resolved "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" + resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== dependencies: "@nodelib/fs.stat" "^2.0.2" @@ -2206,7 +2335,7 @@ fast-glob@3.3.1: fast-glob@^3.3.2, fast-glob@^3.3.3: version "3.3.3" - resolved "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" + resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== dependencies: "@nodelib/fs.stat" "^2.0.2" @@ -2217,24 +2346,24 @@ fast-glob@^3.3.2, fast-glob@^3.3.3: fast-json-stable-stringify@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + resolved "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== fast-levenshtein@^2.0.6: version "2.0.6" - resolved "https://registry.npmmirror.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + resolved "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fastq@^1.6.0: - version "1.19.0" - resolved "https://registry.npmmirror.com/fastq/-/fastq-1.19.0.tgz#a82c6b7c2bb4e44766d865f07997785fecfdcb89" - integrity sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA== + version "1.19.1" + resolved "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz#d50eaba803c8846a883c16492821ebcd2cda55f5" + integrity sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ== dependencies: reusify "^1.0.4" fault@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/fault/-/fault-2.0.1.tgz#d47ca9f37ca26e4bd38374a7c500b5a384755b6c" + resolved "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz#d47ca9f37ca26e4bd38374a7c500b5a384755b6c" integrity sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ== dependencies: format "^0.2.0" @@ -2246,33 +2375,33 @@ fd-slicer@~1.1.0: dependencies: pend "~1.2.0" -fdir@^6.4.3: - version "6.4.3" - resolved "https://registry.npmmirror.com/fdir/-/fdir-6.4.3.tgz#011cdacf837eca9b811c89dbb902df714273db72" - integrity sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw== +fdir@^6.4.4: + version "6.4.6" + resolved "https://registry.npmjs.org/fdir/-/fdir-6.4.6.tgz#2b268c0232697063111bbf3f64810a2a741ba281" + integrity sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w== file-entry-cache@^8.0.0: version "8.0.0" - resolved "https://registry.npmmirror.com/file-entry-cache/-/file-entry-cache-8.0.0.tgz#7787bddcf1131bffb92636c69457bbc0edd6d81f" + resolved "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz#7787bddcf1131bffb92636c69457bbc0edd6d81f" integrity sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== dependencies: flat-cache "^4.0.0" fill-range@^7.1.1: version "7.1.1" - resolved "https://registry.npmmirror.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + resolved "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" find-root@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + resolved "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== find-up@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + resolved "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== dependencies: locate-path "^6.0.0" @@ -2280,37 +2409,37 @@ find-up@^5.0.0: flat-cache@^4.0.0: version "4.0.1" - resolved "https://registry.npmmirror.com/flat-cache/-/flat-cache-4.0.1.tgz#0ece39fcb14ee012f4b0410bd33dd9c1f011127c" + resolved "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz#0ece39fcb14ee012f4b0410bd33dd9c1f011127c" integrity sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== dependencies: flatted "^3.2.9" keyv "^4.5.4" flatted@^3.2.9: - version "3.3.2" - resolved "https://registry.npmmirror.com/flatted/-/flatted-3.3.2.tgz#adba1448a9841bec72b42c532ea23dbbedef1a27" - integrity sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA== + version "3.3.3" + resolved "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz#67c8fad95454a7c7abebf74bb78ee74a44023358" + integrity sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== -for-each@^0.3.3: +for-each@^0.3.3, for-each@^0.3.5: version "0.3.5" - resolved "https://registry.npmmirror.com/for-each/-/for-each-0.3.5.tgz#d650688027826920feeb0af747ee7b9421a41d47" + resolved "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz#d650688027826920feeb0af747ee7b9421a41d47" integrity sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg== dependencies: is-callable "^1.2.7" format@^0.2.0: version "0.2.2" - resolved "https://registry.npmmirror.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" + resolved "https://registry.npmjs.org/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b" integrity sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww== function-bind@^1.1.2: version "1.1.2" - resolved "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== function.prototype.name@^1.1.6, function.prototype.name@^1.1.8: version "1.1.8" - resolved "https://registry.npmmirror.com/function.prototype.name/-/function.prototype.name-1.1.8.tgz#e68e1df7b259a5c949eeef95cdbde53edffabb78" + resolved "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz#e68e1df7b259a5c949eeef95cdbde53edffabb78" integrity sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q== dependencies: call-bind "^1.0.8" @@ -2322,7 +2451,7 @@ function.prototype.name@^1.1.6, function.prototype.name@^1.1.8: functions-have-names@^1.2.3: version "1.2.3" - resolved "https://registry.npmmirror.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" + resolved "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== get-caller-file@^2.0.5: @@ -2330,17 +2459,17 @@ get-caller-file@^2.0.5: resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== -get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.2.6, get-intrinsic@^1.2.7: - version "1.2.7" - resolved "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.2.7.tgz#dcfcb33d3272e15f445d15124bc0a216189b9044" - integrity sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA== +get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.2.6, get-intrinsic@^1.2.7, get-intrinsic@^1.3.0: + version "1.3.0" + resolved "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== dependencies: - call-bind-apply-helpers "^1.0.1" + call-bind-apply-helpers "^1.0.2" es-define-property "^1.0.1" es-errors "^1.3.0" - es-object-atoms "^1.0.0" + es-object-atoms "^1.1.1" function-bind "^1.1.2" - get-proto "^1.0.0" + get-proto "^1.0.1" gopd "^1.2.0" has-symbols "^1.1.0" hasown "^2.0.2" @@ -2348,7 +2477,7 @@ get-intrinsic@^1.2.4, get-intrinsic@^1.2.5, get-intrinsic@^1.2.6, get-intrinsic@ get-proto@^1.0.0, get-proto@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + resolved "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== dependencies: dunder-proto "^1.0.1" @@ -2363,7 +2492,7 @@ get-stream@^5.1.0: get-symbol-description@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/get-symbol-description/-/get-symbol-description-1.1.0.tgz#7bdd54e0befe8ffc9f3b4e203220d9f1e881b6ee" + resolved "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz#7bdd54e0befe8ffc9f3b4e203220d9f1e881b6ee" integrity sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg== dependencies: call-bound "^1.0.3" @@ -2371,16 +2500,16 @@ get-symbol-description@^1.1.0: get-intrinsic "^1.2.6" get-tsconfig@^4.10.0: - version "4.10.0" - resolved "https://registry.npmmirror.com/get-tsconfig/-/get-tsconfig-4.10.0.tgz#403a682b373a823612475a4c2928c7326fc0f6bb" - integrity sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A== + version "4.10.1" + resolved "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.1.tgz#d34c1c01f47d65a606c37aa7a177bc3e56ab4b2e" + integrity sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ== dependencies: resolve-pkg-maps "^1.0.0" get-uri@^6.0.1: - version "6.0.4" - resolved "https://registry.npmjs.org/get-uri/-/get-uri-6.0.4.tgz#6daaee9e12f9759e19e55ba313956883ef50e0a7" - integrity sha512-E1b1lFFLvLgak2whF2xDBcOy6NLVGZBqqjJjsIhvopKfWWEi64pLVTWWehV8KlLerZkfNTA95sTe2OdJKm1OzQ== + version "6.0.5" + resolved "https://registry.npmjs.org/get-uri/-/get-uri-6.0.5.tgz#714892aa4a871db671abc5395e5e9447bc306a16" + integrity sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg== dependencies: basic-ftp "^5.0.2" data-uri-to-buffer "^6.0.2" @@ -2388,31 +2517,26 @@ get-uri@^6.0.1: glob-parent@^5.1.2: version "5.1.2" - resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== dependencies: is-glob "^4.0.1" glob-parent@^6.0.2: version "6.0.2" - resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" + resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== dependencies: is-glob "^4.0.3" -globals@^11.1.0: - version "11.12.0" - resolved "https://registry.npmmirror.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== - globals@^14.0.0: version "14.0.0" - resolved "https://registry.npmmirror.com/globals/-/globals-14.0.0.tgz#898d7413c29babcf6bafe56fcadded858ada724e" + resolved "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz#898d7413c29babcf6bafe56fcadded858ada724e" integrity sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== globalthis@^1.0.4: version "1.0.4" - resolved "https://registry.npmmirror.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" + resolved "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== dependencies: define-properties "^1.2.1" @@ -2420,22 +2544,17 @@ globalthis@^1.0.4: gopd@^1.0.1, gopd@^1.2.0: version "1.2.0" - resolved "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + resolved "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== -graceful-fs@^4.2.4: - version "4.2.11" - resolved "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" - integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== - graphemer@^1.4.0: version "1.4.0" - resolved "https://registry.npmmirror.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" + resolved "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag== gray-matter@^4.0.3: version "4.0.3" - resolved "https://registry.npmmirror.com/gray-matter/-/gray-matter-4.0.3.tgz#e893c064825de73ea1f5f7d88c7a9f7274288798" + resolved "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz#e893c064825de73ea1f5f7d88c7a9f7274288798" integrity sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q== dependencies: js-yaml "^3.13.1" @@ -2445,72 +2564,72 @@ gray-matter@^4.0.3: has-bigints@^1.0.2: version "1.1.0" - resolved "https://registry.npmmirror.com/has-bigints/-/has-bigints-1.1.0.tgz#28607e965ac967e03cd2a2c70a2636a1edad49fe" + resolved "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz#28607e965ac967e03cd2a2c70a2636a1edad49fe" integrity sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg== has-flag@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + resolved "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + resolved "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== dependencies: es-define-property "^1.0.0" has-proto@^1.2.0: version "1.2.0" - resolved "https://registry.npmmirror.com/has-proto/-/has-proto-1.2.0.tgz#5de5a6eabd95fdffd9818b43055e8065e39fe9d5" + resolved "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz#5de5a6eabd95fdffd9818b43055e8065e39fe9d5" integrity sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ== dependencies: dunder-proto "^1.0.0" has-symbols@^1.0.3, has-symbols@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + resolved "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== has-tostringtag@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + resolved "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== dependencies: has-symbols "^1.0.3" hasown@^2.0.2: version "2.0.2" - resolved "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + resolved "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== dependencies: function-bind "^1.1.2" hast-util-is-element@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz#6e31a6532c217e5b533848c7e52c9d9369ca0932" + resolved "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz#6e31a6532c217e5b533848c7e52c9d9369ca0932" integrity sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g== dependencies: "@types/hast" "^3.0.0" hast-util-parse-selector@^3.0.0: version "3.1.1" - resolved "https://registry.npmmirror.com/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz#25ab00ae9e75cbc62cf7a901f68a247eade659e2" + resolved "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-3.1.1.tgz#25ab00ae9e75cbc62cf7a901f68a247eade659e2" integrity sha512-jdlwBjEexy1oGz0aJ2f4GKMaVKkA9jwjr4MjAAI22E5fM/TXVZHuS5OpONtdeIkRKqAaryQ2E9xNQxijoThSZA== dependencies: "@types/hast" "^2.0.0" hast-util-parse-selector@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz#352879fa86e25616036037dd8931fb5f34cb4a27" + resolved "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz#352879fa86e25616036037dd8931fb5f34cb4a27" integrity sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A== dependencies: "@types/hast" "^3.0.0" hast-util-to-estree@^3.0.0: - version "3.1.1" - resolved "https://registry.npmmirror.com/hast-util-to-estree/-/hast-util-to-estree-3.1.1.tgz#b7f0b247d9f62127bb5db34e3a86c93d17279071" - integrity sha512-IWtwwmPskfSmma9RpzCappDUitC8t5jhAynHhc1m2+5trOgsrp7txscUSavc5Ic8PATyAjfrCK1wgtxh2cICVQ== + version "3.1.3" + resolved "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz#e654c1c9374645135695cc0ab9f70b8fcaf733d7" + integrity sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w== dependencies: "@types/estree" "^1.0.0" "@types/estree-jsx" "^1.0.0" @@ -2523,16 +2642,16 @@ hast-util-to-estree@^3.0.0: mdast-util-mdx-expression "^2.0.0" mdast-util-mdx-jsx "^3.0.0" mdast-util-mdxjs-esm "^2.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" - style-to-object "^1.0.0" + style-to-js "^1.0.0" unist-util-position "^5.0.0" zwitch "^2.0.0" hast-util-to-jsx-runtime@^2.0.0: - version "2.3.2" - resolved "https://registry.npmmirror.com/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.2.tgz#6d11b027473e69adeaa00ca4cfb5bb68e3d282fa" - integrity sha512-1ngXYb+V9UT5h+PxNRa1O1FYguZK/XL+gkeqvp7EdHlB9oHUG0eYRo/vY5inBdcqo3RkPMC58/H94HvkbfGdyg== + version "2.3.6" + resolved "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz#ff31897aae59f62232e21594eac7ef6b63333e98" + integrity sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg== dependencies: "@types/estree" "^1.0.0" "@types/hast" "^3.0.0" @@ -2544,15 +2663,15 @@ hast-util-to-jsx-runtime@^2.0.0: mdast-util-mdx-expression "^2.0.0" mdast-util-mdx-jsx "^3.0.0" mdast-util-mdxjs-esm "^2.0.0" - property-information "^6.0.0" + property-information "^7.0.0" space-separated-tokens "^2.0.0" - style-to-object "^1.0.0" + style-to-js "^1.0.0" unist-util-position "^5.0.0" vfile-message "^4.0.0" hast-util-to-text@^4.0.0: version "4.0.2" - resolved "https://registry.npmmirror.com/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz#57b676931e71bf9cb852453678495b3080bfae3e" + resolved "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz#57b676931e71bf9cb852453678495b3080bfae3e" integrity sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A== dependencies: "@types/hast" "^3.0.0" @@ -2562,14 +2681,14 @@ hast-util-to-text@^4.0.0: hast-util-whitespace@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz#7778ed9d3c92dd9e8c5c8f648a49c21fc51cb621" + resolved "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz#7778ed9d3c92dd9e8c5c8f648a49c21fc51cb621" integrity sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw== dependencies: "@types/hast" "^3.0.0" hastscript@^7.0.2: version "7.2.0" - resolved "https://registry.npmmirror.com/hastscript/-/hastscript-7.2.0.tgz#0eafb7afb153d047077fa2a833dc9b7ec604d10b" + resolved "https://registry.npmjs.org/hastscript/-/hastscript-7.2.0.tgz#0eafb7afb153d047077fa2a833dc9b7ec604d10b" integrity sha512-TtYPq24IldU8iKoJQqvZOuhi5CyCQRAbvDOX0x1eW6rsHSxa/1i2CCiptNTotGHJ3VoHRGmqiv6/D3q113ikkw== dependencies: "@types/hast" "^2.0.0" @@ -2580,7 +2699,7 @@ hastscript@^7.0.2: hastscript@^9.0.1: version "9.0.1" - resolved "https://registry.npmmirror.com/hastscript/-/hastscript-9.0.1.tgz#dbc84bef6051d40084342c229c451cd9dc567dff" + resolved "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz#dbc84bef6051d40084342c229c451cd9dc567dff" integrity sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w== dependencies: "@types/hast" "^3.0.0" @@ -2591,25 +2710,25 @@ hastscript@^9.0.1: highlight.js@^11.11.1, highlight.js@~11.11.0: version "11.11.1" - resolved "https://registry.npmmirror.com/highlight.js/-/highlight.js-11.11.1.tgz#fca06fa0e5aeecf6c4d437239135fabc15213585" + resolved "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz#fca06fa0e5aeecf6c4d437239135fabc15213585" integrity sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w== hoist-non-react-statics@^3.3.0, hoist-non-react-statics@^3.3.1: version "3.3.2" - resolved "https://registry.npmmirror.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + resolved "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== dependencies: react-is "^16.7.0" -htmlparser2@^9.1.0: - version "9.1.0" - resolved "https://registry.npmmirror.com/htmlparser2/-/htmlparser2-9.1.0.tgz#cdb498d8a75a51f739b61d3f718136c369bc8c23" - integrity sha512-5zfg6mHUoaer/97TxnGpxmbR7zJtPwIYFMZ/H5ucTlPZhKvtum05yiPK3Mgai3a0DyVxv7qYqoweaEd2nrYQzQ== +htmlparser2@^10.0.0: + version "10.0.0" + resolved "https://registry.npmjs.org/htmlparser2/-/htmlparser2-10.0.0.tgz#77ad249037b66bf8cc99c6e286ef73b83aeb621d" + integrity sha512-TwAZM+zE5Tq3lrEHvOlvwgj1XLWQCtaaibSN11Q+gGBAS7Y1uZSWwXXRe4iF6OXnaq1riyQAPFOBtYc77Mxq0g== dependencies: domelementtype "^2.3.0" domhandler "^5.0.3" - domutils "^3.1.0" - entities "^4.5.0" + domutils "^3.2.1" + entities "^6.0.0" http-proxy-agent@^7.0.0, http-proxy-agent@^7.0.1: version "7.0.2" @@ -2629,19 +2748,24 @@ https-proxy-agent@^7.0.6: iconv-lite@0.6.3, iconv-lite@^0.6.3: version "0.6.3" - resolved "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + resolved "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== dependencies: safer-buffer ">= 2.1.2 < 3.0.0" -ignore@^5.2.0, ignore@^5.3.1: +ignore@^5.2.0: version "5.3.2" - resolved "https://registry.npmmirror.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + resolved "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== +ignore@^7.0.0: + version "7.0.5" + resolved "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz#4cb5f6cd7d4c7ab0365738c7aea888baa6d7efd9" + integrity sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== + import-fresh@^3.2.1, import-fresh@^3.3.0: version "3.3.1" - resolved "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.1.tgz#9cecb56503c0ada1f2741dbbd6546e4b13b57ccf" + resolved "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz#9cecb56503c0ada1f2741dbbd6546e4b13b57ccf" integrity sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== dependencies: parent-module "^1.0.0" @@ -2649,17 +2773,17 @@ import-fresh@^3.2.1, import-fresh@^3.3.0: imurmurhash@^0.1.4: version "0.1.4" - resolved "https://registry.npmmirror.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + resolved "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== inline-style-parser@0.2.4: version "0.2.4" - resolved "https://registry.npmmirror.com/inline-style-parser/-/inline-style-parser-0.2.4.tgz#f4af5fe72e612839fcd453d989a586566d695f22" + resolved "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz#f4af5fe72e612839fcd453d989a586566d695f22" integrity sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q== internal-slot@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/internal-slot/-/internal-slot-1.1.0.tgz#1eac91762947d2f7056bc838d93e13b2e9604961" + resolved "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz#1eac91762947d2f7056bc838d93e13b2e9604961" integrity sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw== dependencies: es-errors "^1.3.0" @@ -2668,7 +2792,7 @@ internal-slot@^1.1.0: international-types@^0.8.1: version "0.8.1" - resolved "https://registry.npmmirror.com/international-types/-/international-types-0.8.1.tgz#c0e593d9911c1a23f64bbd6eb1abb2941fe2353f" + resolved "https://registry.npmjs.org/international-types/-/international-types-0.8.1.tgz#c0e593d9911c1a23f64bbd6eb1abb2941fe2353f" integrity sha512-tajBCAHo4I0LIFlmQ9ZWfjMWVyRffzuvfbXCd6ssFt5u1Zw15DN0UBpVTItXdNa1ls+cpQt3Yw8+TxsfGF8JcA== ip-address@^9.0.5: @@ -2681,12 +2805,12 @@ ip-address@^9.0.5: is-alphabetical@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/is-alphabetical/-/is-alphabetical-2.0.1.tgz#01072053ea7c1036df3c7d19a6daaec7f19e789b" + resolved "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz#01072053ea7c1036df3c7d19a6daaec7f19e789b" integrity sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ== is-alphanumerical@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz#7c03fbe96e3e931113e57f964b0a368cc2dfd875" + resolved "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz#7c03fbe96e3e931113e57f964b0a368cc2dfd875" integrity sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw== dependencies: is-alphabetical "^2.0.0" @@ -2694,7 +2818,7 @@ is-alphanumerical@^2.0.0: is-array-buffer@^3.0.4, is-array-buffer@^3.0.5: version "3.0.5" - resolved "https://registry.npmmirror.com/is-array-buffer/-/is-array-buffer-3.0.5.tgz#65742e1e687bd2cc666253068fd8707fe4d44280" + resolved "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz#65742e1e687bd2cc666253068fd8707fe4d44280" integrity sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A== dependencies: call-bind "^1.0.8" @@ -2703,17 +2827,17 @@ is-array-buffer@^3.0.4, is-array-buffer@^3.0.5: is-arrayish@^0.2.1: version "0.2.1" - resolved "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + resolved "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== is-arrayish@^0.3.1: version "0.3.2" - resolved "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" + resolved "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== is-async-function@^2.0.0: version "2.1.1" - resolved "https://registry.npmmirror.com/is-async-function/-/is-async-function-2.1.1.tgz#3e69018c8e04e73b738793d020bfe884b9fd3523" + resolved "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz#3e69018c8e04e73b738793d020bfe884b9fd3523" integrity sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ== dependencies: async-function "^1.0.0" @@ -2724,41 +2848,41 @@ is-async-function@^2.0.0: is-bigint@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/is-bigint/-/is-bigint-1.1.0.tgz#dda7a3445df57a42583db4228682eba7c4170672" + resolved "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz#dda7a3445df57a42583db4228682eba7c4170672" integrity sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ== dependencies: has-bigints "^1.0.2" is-boolean-object@^1.2.1: version "1.2.2" - resolved "https://registry.npmmirror.com/is-boolean-object/-/is-boolean-object-1.2.2.tgz#7067f47709809a393c71ff5bb3e135d8a9215d9e" + resolved "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz#7067f47709809a393c71ff5bb3e135d8a9215d9e" integrity sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A== dependencies: call-bound "^1.0.3" has-tostringtag "^1.0.2" -is-bun-module@^1.0.2: - version "1.3.0" - resolved "https://registry.npmmirror.com/is-bun-module/-/is-bun-module-1.3.0.tgz#ea4d24fdebfcecc98e81bcbcb506827fee288760" - integrity sha512-DgXeu5UWI0IsMQundYb5UAOzm6G2eVnarJ0byP6Tm55iZNKceD59LNPA2L4VvsScTtHcw0yEkVwSf7PC+QoLSA== +is-bun-module@^2.0.0: + version "2.0.0" + resolved "https://registry.npmjs.org/is-bun-module/-/is-bun-module-2.0.0.tgz#4d7859a87c0fcac950c95e666730e745eae8bddd" + integrity sha512-gNCGbnnnnFAUGKeZ9PdbyeGYJqewpmc2aKHUEMO5nQPWU9lOmv7jcmQIv+qHD8fXW6W7qfuCwX4rY9LNRjXrkQ== dependencies: - semver "^7.6.3" + semver "^7.7.1" is-callable@^1.2.7: version "1.2.7" - resolved "https://registry.npmmirror.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" + resolved "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== -is-core-module@^2.13.0, is-core-module@^2.15.1, is-core-module@^2.16.0: +is-core-module@^2.13.0, is-core-module@^2.16.0, is-core-module@^2.16.1: version "2.16.1" - resolved "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" + resolved "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== dependencies: hasown "^2.0.2" is-data-view@^1.0.1, is-data-view@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/is-data-view/-/is-data-view-1.0.2.tgz#bae0a41b9688986c2188dda6657e56b8f9e63b8e" + resolved "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz#bae0a41b9688986c2188dda6657e56b8f9e63b8e" integrity sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw== dependencies: call-bound "^1.0.2" @@ -2767,7 +2891,7 @@ is-data-view@^1.0.1, is-data-view@^1.0.2: is-date-object@^1.0.5, is-date-object@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/is-date-object/-/is-date-object-1.1.0.tgz#ad85541996fc7aa8b2729701d27b7319f95d82f7" + resolved "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz#ad85541996fc7aa8b2729701d27b7319f95d82f7" integrity sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg== dependencies: call-bound "^1.0.2" @@ -2775,22 +2899,22 @@ is-date-object@^1.0.5, is-date-object@^1.1.0: is-decimal@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/is-decimal/-/is-decimal-2.0.1.tgz#9469d2dc190d0214fd87d78b78caecc0cc14eef7" + resolved "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz#9469d2dc190d0214fd87d78b78caecc0cc14eef7" integrity sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A== is-extendable@^0.1.0: version "0.1.1" - resolved "https://registry.npmmirror.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + resolved "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" integrity sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw== is-extglob@^2.1.1: version "2.1.1" - resolved "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + resolved "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== is-finalizationregistry@^1.1.0: version "1.1.1" - resolved "https://registry.npmmirror.com/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz#eefdcdc6c94ddd0674d9c85887bf93f944a97c90" + resolved "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz#eefdcdc6c94ddd0674d9c85887bf93f944a97c90" integrity sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg== dependencies: call-bound "^1.0.3" @@ -2802,7 +2926,7 @@ is-fullwidth-code-point@^3.0.0: is-generator-function@^1.0.10: version "1.1.0" - resolved "https://registry.npmmirror.com/is-generator-function/-/is-generator-function-1.1.0.tgz#bf3eeda931201394f57b5dba2800f91a238309ca" + resolved "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz#bf3eeda931201394f57b5dba2800f91a238309ca" integrity sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ== dependencies: call-bound "^1.0.3" @@ -2812,24 +2936,29 @@ is-generator-function@^1.0.10: is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3: version "4.0.3" - resolved "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + resolved "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== dependencies: is-extglob "^2.1.1" is-hexadecimal@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz#86b5bf668fca307498d319dfc03289d781a90027" + resolved "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz#86b5bf668fca307498d319dfc03289d781a90027" integrity sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg== is-map@^2.0.3: version "2.0.3" - resolved "https://registry.npmmirror.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" + resolved "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" integrity sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw== +is-negative-zero@^2.0.3: + version "2.0.3" + resolved "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" + integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== + is-number-object@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/is-number-object/-/is-number-object-1.1.1.tgz#144b21e95a1bc148205dcc2814a9134ec41b2541" + resolved "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz#144b21e95a1bc148205dcc2814a9134ec41b2541" integrity sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw== dependencies: call-bound "^1.0.3" @@ -2837,17 +2966,17 @@ is-number-object@^1.1.1: is-number@^7.0.0: version "7.0.0" - resolved "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + resolved "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== is-plain-obj@^4.0.0: version "4.1.0" - resolved "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-4.1.0.tgz#d65025edec3657ce032fd7db63c97883eaed71f0" + resolved "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz#d65025edec3657ce032fd7db63c97883eaed71f0" integrity sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg== is-regex@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/is-regex/-/is-regex-1.2.1.tgz#76d70a3ed10ef9be48eb577887d74205bf0cad22" + resolved "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz#76d70a3ed10ef9be48eb577887d74205bf0cad22" integrity sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g== dependencies: call-bound "^1.0.2" @@ -2857,19 +2986,19 @@ is-regex@^1.2.1: is-set@^2.0.3: version "2.0.3" - resolved "https://registry.npmmirror.com/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" + resolved "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" integrity sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg== is-shared-array-buffer@^1.0.4: version "1.0.4" - resolved "https://registry.npmmirror.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz#9b67844bd9b7f246ba0708c3a93e34269c774f6f" + resolved "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz#9b67844bd9b7f246ba0708c3a93e34269c774f6f" integrity sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A== dependencies: call-bound "^1.0.3" -is-string@^1.0.7, is-string@^1.1.1: +is-string@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/is-string/-/is-string-1.1.1.tgz#92ea3f3d5c5b6e039ca8677e5ac8d07ea773cbb9" + resolved "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz#92ea3f3d5c5b6e039ca8677e5ac8d07ea773cbb9" integrity sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA== dependencies: call-bound "^1.0.3" @@ -2877,7 +3006,7 @@ is-string@^1.0.7, is-string@^1.1.1: is-symbol@^1.0.4, is-symbol@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/is-symbol/-/is-symbol-1.1.1.tgz#f47761279f532e2b05a7024a7506dbbedacd0634" + resolved "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz#f47761279f532e2b05a7024a7506dbbedacd0634" integrity sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w== dependencies: call-bound "^1.0.2" @@ -2886,26 +3015,26 @@ is-symbol@^1.0.4, is-symbol@^1.1.1: is-typed-array@^1.1.13, is-typed-array@^1.1.14, is-typed-array@^1.1.15: version "1.1.15" - resolved "https://registry.npmmirror.com/is-typed-array/-/is-typed-array-1.1.15.tgz#4bfb4a45b61cee83a5a46fba778e4e8d59c0ce0b" + resolved "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz#4bfb4a45b61cee83a5a46fba778e4e8d59c0ce0b" integrity sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ== dependencies: which-typed-array "^1.1.16" is-weakmap@^2.0.2: version "2.0.2" - resolved "https://registry.npmmirror.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" + resolved "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== -is-weakref@^1.0.2, is-weakref@^1.1.0: +is-weakref@^1.0.2, is-weakref@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/is-weakref/-/is-weakref-1.1.1.tgz#eea430182be8d64174bd96bffbc46f21bf3f9293" + resolved "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz#eea430182be8d64174bd96bffbc46f21bf3f9293" integrity sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew== dependencies: call-bound "^1.0.3" is-weakset@^2.0.3: version "2.0.4" - resolved "https://registry.npmmirror.com/is-weakset/-/is-weakset-2.0.4.tgz#c9f5deb0bc1906c6d6f1027f284ddf459249daca" + resolved "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz#c9f5deb0bc1906c6d6f1027f284ddf459249daca" integrity sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ== dependencies: call-bound "^1.0.3" @@ -2913,17 +3042,17 @@ is-weakset@^2.0.3: isarray@^2.0.5: version "2.0.5" - resolved "https://registry.npmmirror.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== isexe@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== iterator.prototype@^1.1.4: version "1.1.5" - resolved "https://registry.npmmirror.com/iterator.prototype/-/iterator.prototype-1.1.5.tgz#12c959a29de32de0aa3bbbb801f4d777066dae39" + resolved "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.5.tgz#12c959a29de32de0aa3bbbb801f4d777066dae39" integrity sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g== dependencies: define-data-property "^1.1.4" @@ -2935,12 +3064,12 @@ iterator.prototype@^1.1.4: "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + resolved "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== js-yaml@^3.13.1: version "3.14.1" - resolved "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== dependencies: argparse "^1.0.7" @@ -2948,7 +3077,7 @@ js-yaml@^3.13.1: js-yaml@^4.0.0, js-yaml@^4.1.0: version "4.1.0" - resolved "https://registry.npmmirror.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== dependencies: argparse "^2.0.1" @@ -2960,46 +3089,46 @@ jsbn@1.1.0: jsesc@^3.0.2: version "3.1.0" - resolved "https://registry.npmmirror.com/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" + resolved "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA== json-buffer@3.0.1: version "3.0.1" - resolved "https://registry.npmmirror.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" + resolved "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== json-parse-even-better-errors@^2.3.0: version "2.3.1" - resolved "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + resolved "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== json-schema-traverse@^0.4.1: version "0.4.1" - resolved "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== json-stable-stringify-without-jsonify@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + resolved "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== json2mq@^0.2.0: version "0.2.0" - resolved "https://registry.npmmirror.com/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a" + resolved "https://registry.npmjs.org/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a" integrity sha512-SzoRg7ux5DWTII9J2qkrZrqV1gt+rTaoufMxEzXbS26Uid0NwaJd123HcoB80TgubEppxxIGdNxCx50fEoEWQA== dependencies: string-convert "^0.2.0" json5@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" + resolved "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA== dependencies: minimist "^1.2.0" "jsx-ast-utils@^2.4.1 || ^3.0.0", jsx-ast-utils@^3.3.5: version "3.3.5" - resolved "https://registry.npmmirror.com/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz#4766bd05a8e2a11af222becd19e15575e52a853a" + resolved "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz#4766bd05a8e2a11af222becd19e15575e52a853a" integrity sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ== dependencies: array-includes "^3.1.6" @@ -3009,31 +3138,31 @@ json5@^1.0.2: keyv@^4.5.4: version "4.5.4" - resolved "https://registry.npmmirror.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" + resolved "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== dependencies: json-buffer "3.0.1" kind-of@^6.0.0, kind-of@^6.0.2: version "6.0.3" - resolved "https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + resolved "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== language-subtag-registry@^0.3.20: version "0.3.23" - resolved "https://registry.npmmirror.com/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz#23529e04d9e3b74679d70142df3fd2eb6ec572e7" + resolved "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz#23529e04d9e3b74679d70142df3fd2eb6ec572e7" integrity sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ== language-tags@^1.0.9: version "1.0.9" - resolved "https://registry.npmmirror.com/language-tags/-/language-tags-1.0.9.tgz#1ffdcd0ec0fafb4b1be7f8b11f306ad0f9c08777" + resolved "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz#1ffdcd0ec0fafb4b1be7f8b11f306ad0f9c08777" integrity sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA== dependencies: language-subtag-registry "^0.3.20" levn@^0.4.1: version "0.4.1" - resolved "https://registry.npmmirror.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + resolved "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== dependencies: prelude-ls "^1.2.1" @@ -3041,51 +3170,51 @@ levn@^0.4.1: lines-and-columns@^1.1.6: version "1.2.4" - resolved "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + resolved "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== locate-path@^6.0.0: version "6.0.0" - resolved "https://registry.npmmirror.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + resolved "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== dependencies: p-locate "^5.0.0" lodash.debounce@^4.0.8: version "4.0.8" - resolved "https://registry.npmmirror.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== lodash.merge@^4.6.2: version "4.6.2" - resolved "https://registry.npmmirror.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== lodash.throttle@^4.1.1: version "4.1.1" - resolved "https://registry.npmmirror.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" + resolved "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" integrity sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ== lodash@^4.17.21: version "4.17.21" - resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== longest-streak@^3.0.0: version "3.1.0" - resolved "https://registry.npmmirror.com/longest-streak/-/longest-streak-3.1.0.tgz#62fa67cd958742a1574af9f39866364102d90cd4" + resolved "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz#62fa67cd958742a1574af9f39866364102d90cd4" integrity sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g== loose-envify@^1.4.0: version "1.4.0" - resolved "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + resolved "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== dependencies: js-tokens "^3.0.0 || ^4.0.0" lowlight@^3.0.0: version "3.3.0" - resolved "https://registry.npmmirror.com/lowlight/-/lowlight-3.3.0.tgz#007b8a5bfcfd27cc65b96246d2de3e9dd4e23c6c" + resolved "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz#007b8a5bfcfd27cc65b96246d2de3e9dd4e23c6c" integrity sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ== dependencies: "@types/hast" "^3.0.0" @@ -3099,22 +3228,22 @@ lru-cache@^7.14.1: markdown-extensions@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/markdown-extensions/-/markdown-extensions-2.0.0.tgz#34bebc83e9938cae16e0e017e4a9814a8330d3c4" + resolved "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz#34bebc83e9938cae16e0e017e4a9814a8330d3c4" integrity sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q== markdown-table@^3.0.0: version "3.0.4" - resolved "https://registry.npmmirror.com/markdown-table/-/markdown-table-3.0.4.tgz#fe44d6d410ff9d6f2ea1797a3f60aa4d2b631c2a" + resolved "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz#fe44d6d410ff9d6f2ea1797a3f60aa4d2b631c2a" integrity sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw== math-intrinsics@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + resolved "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== mdast-util-directive@^3.0.0: version "3.1.0" - resolved "https://registry.npmmirror.com/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz#f3656f4aab6ae3767d3c72cfab5e8055572ccba1" + resolved "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz#f3656f4aab6ae3767d3c72cfab5e8055572ccba1" integrity sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q== dependencies: "@types/mdast" "^4.0.0" @@ -3129,7 +3258,7 @@ mdast-util-directive@^3.0.0: mdast-util-find-and-replace@^3.0.0: version "3.0.2" - resolved "https://registry.npmmirror.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz#70a3174c894e14df722abf43bc250cbae44b11df" + resolved "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz#70a3174c894e14df722abf43bc250cbae44b11df" integrity sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg== dependencies: "@types/mdast" "^4.0.0" @@ -3139,7 +3268,7 @@ mdast-util-find-and-replace@^3.0.0: mdast-util-from-markdown@^2.0.0: version "2.0.2" - resolved "https://registry.npmmirror.com/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz#4850390ca7cf17413a9b9a0fbefcd1bc0eb4160a" + resolved "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz#4850390ca7cf17413a9b9a0fbefcd1bc0eb4160a" integrity sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA== dependencies: "@types/mdast" "^4.0.0" @@ -3157,7 +3286,7 @@ mdast-util-from-markdown@^2.0.0: mdast-util-frontmatter@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz#f5f929eb1eb36c8a7737475c7eb438261f964ee8" + resolved "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz#f5f929eb1eb36c8a7737475c7eb438261f964ee8" integrity sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA== dependencies: "@types/mdast" "^4.0.0" @@ -3169,7 +3298,7 @@ mdast-util-frontmatter@^2.0.0: mdast-util-gfm-autolink-literal@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz#abd557630337bd30a6d5a4bd8252e1c2dc0875d5" + resolved "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz#abd557630337bd30a6d5a4bd8252e1c2dc0875d5" integrity sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ== dependencies: "@types/mdast" "^4.0.0" @@ -3180,7 +3309,7 @@ mdast-util-gfm-autolink-literal@^2.0.0: mdast-util-gfm-footnote@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz#7778e9d9ca3df7238cc2bd3fa2b1bf6a65b19403" + resolved "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz#7778e9d9ca3df7238cc2bd3fa2b1bf6a65b19403" integrity sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ== dependencies: "@types/mdast" "^4.0.0" @@ -3191,7 +3320,7 @@ mdast-util-gfm-footnote@^2.0.0: mdast-util-gfm-strikethrough@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz#d44ef9e8ed283ac8c1165ab0d0dfd058c2764c16" + resolved "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz#d44ef9e8ed283ac8c1165ab0d0dfd058c2764c16" integrity sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg== dependencies: "@types/mdast" "^4.0.0" @@ -3200,7 +3329,7 @@ mdast-util-gfm-strikethrough@^2.0.0: mdast-util-gfm-table@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz#7a435fb6223a72b0862b33afbd712b6dae878d38" + resolved "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz#7a435fb6223a72b0862b33afbd712b6dae878d38" integrity sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg== dependencies: "@types/mdast" "^4.0.0" @@ -3211,7 +3340,7 @@ mdast-util-gfm-table@^2.0.0: mdast-util-gfm-task-list-item@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz#e68095d2f8a4303ef24094ab642e1047b991a936" + resolved "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz#e68095d2f8a4303ef24094ab642e1047b991a936" integrity sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ== dependencies: "@types/mdast" "^4.0.0" @@ -3221,7 +3350,7 @@ mdast-util-gfm-task-list-item@^2.0.0: mdast-util-gfm@^3.0.0: version "3.1.0" - resolved "https://registry.npmmirror.com/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz#2cdf63b92c2a331406b0fb0db4c077c1b0331751" + resolved "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz#2cdf63b92c2a331406b0fb0db4c077c1b0331751" integrity sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ== dependencies: mdast-util-from-markdown "^2.0.0" @@ -3234,7 +3363,7 @@ mdast-util-gfm@^3.0.0: mdast-util-mdx-expression@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz#43f0abac9adc756e2086f63822a38c8d3c3a5096" + resolved "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz#43f0abac9adc756e2086f63822a38c8d3c3a5096" integrity sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ== dependencies: "@types/estree-jsx" "^1.0.0" @@ -3246,7 +3375,7 @@ mdast-util-mdx-expression@^2.0.0: mdast-util-mdx-jsx@^3.0.0: version "3.2.0" - resolved "https://registry.npmmirror.com/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz#fd04c67a2a7499efb905a8a5c578dddc9fdada0d" + resolved "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz#fd04c67a2a7499efb905a8a5c578dddc9fdada0d" integrity sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q== dependencies: "@types/estree-jsx" "^1.0.0" @@ -3264,7 +3393,7 @@ mdast-util-mdx-jsx@^3.0.0: mdast-util-mdx@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz#792f9cf0361b46bee1fdf1ef36beac424a099c41" + resolved "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz#792f9cf0361b46bee1fdf1ef36beac424a099c41" integrity sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w== dependencies: mdast-util-from-markdown "^2.0.0" @@ -3275,7 +3404,7 @@ mdast-util-mdx@^3.0.0: mdast-util-mdxjs-esm@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz#019cfbe757ad62dd557db35a695e7314bcc9fa97" + resolved "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz#019cfbe757ad62dd557db35a695e7314bcc9fa97" integrity sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg== dependencies: "@types/estree-jsx" "^1.0.0" @@ -3287,7 +3416,7 @@ mdast-util-mdxjs-esm@^2.0.0: mdast-util-phrasing@^4.0.0: version "4.1.0" - resolved "https://registry.npmmirror.com/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz#7cc0a8dec30eaf04b7b1a9661a92adb3382aa6e3" + resolved "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz#7cc0a8dec30eaf04b7b1a9661a92adb3382aa6e3" integrity sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w== dependencies: "@types/mdast" "^4.0.0" @@ -3295,7 +3424,7 @@ mdast-util-phrasing@^4.0.0: mdast-util-to-hast@^13.0.0: version "13.2.0" - resolved "https://registry.npmmirror.com/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz#5ca58e5b921cc0a3ded1bc02eed79a4fe4fe41f4" + resolved "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz#5ca58e5b921cc0a3ded1bc02eed79a4fe4fe41f4" integrity sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA== dependencies: "@types/hast" "^3.0.0" @@ -3310,7 +3439,7 @@ mdast-util-to-hast@^13.0.0: mdast-util-to-markdown@^2.0.0: version "2.1.2" - resolved "https://registry.npmmirror.com/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz#f910ffe60897f04bb4b7e7ee434486f76288361b" + resolved "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz#f910ffe60897f04bb4b7e7ee434486f76288361b" integrity sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA== dependencies: "@types/mdast" "^4.0.0" @@ -3325,20 +3454,20 @@ mdast-util-to-markdown@^2.0.0: mdast-util-to-string@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz#7a5121475556a04e7eddeb67b264aae79d312814" + resolved "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz#7a5121475556a04e7eddeb67b264aae79d312814" integrity sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg== dependencies: "@types/mdast" "^4.0.0" merge2@^1.3.0: version "1.4.1" - resolved "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + resolved "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromark-core-commonmark@^2.0.0: - version "2.0.2" - resolved "https://registry.npmmirror.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.2.tgz#6a45bbb139e126b3f8b361a10711ccc7c6e15e93" - integrity sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w== + version "2.0.3" + resolved "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz#c691630e485021a68cf28dbc2b2ca27ebf678cd4" + integrity sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg== dependencies: decode-named-character-reference "^1.0.0" devlop "^1.0.0" @@ -3359,7 +3488,7 @@ micromark-core-commonmark@^2.0.0: micromark-extension-directive@^3.0.0: version "3.0.2" - resolved "https://registry.npmmirror.com/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz#2eb61985d1995a7c1ff7621676a4f32af29409e8" + resolved "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz#2eb61985d1995a7c1ff7621676a4f32af29409e8" integrity sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA== dependencies: devlop "^1.0.0" @@ -3372,7 +3501,7 @@ micromark-extension-directive@^3.0.0: micromark-extension-frontmatter@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz#651c52ffa5d7a8eeed687c513cd869885882d67a" + resolved "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz#651c52ffa5d7a8eeed687c513cd869885882d67a" integrity sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg== dependencies: fault "^2.0.0" @@ -3382,7 +3511,7 @@ micromark-extension-frontmatter@^2.0.0: micromark-extension-gfm-autolink-literal@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz#6286aee9686c4462c1e3552a9d505feddceeb935" + resolved "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz#6286aee9686c4462c1e3552a9d505feddceeb935" integrity sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw== dependencies: micromark-util-character "^2.0.0" @@ -3392,7 +3521,7 @@ micromark-extension-gfm-autolink-literal@^2.0.0: micromark-extension-gfm-footnote@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz#4dab56d4e398b9853f6fe4efac4fc9361f3e0750" + resolved "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz#4dab56d4e398b9853f6fe4efac4fc9361f3e0750" integrity sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw== dependencies: devlop "^1.0.0" @@ -3406,7 +3535,7 @@ micromark-extension-gfm-footnote@^2.0.0: micromark-extension-gfm-strikethrough@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz#86106df8b3a692b5f6a92280d3879be6be46d923" + resolved "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz#86106df8b3a692b5f6a92280d3879be6be46d923" integrity sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw== dependencies: devlop "^1.0.0" @@ -3418,7 +3547,7 @@ micromark-extension-gfm-strikethrough@^2.0.0: micromark-extension-gfm-table@^2.0.0: version "2.1.1" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz#fac70bcbf51fe65f5f44033118d39be8a9b5940b" + resolved "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz#fac70bcbf51fe65f5f44033118d39be8a9b5940b" integrity sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg== dependencies: devlop "^1.0.0" @@ -3429,14 +3558,14 @@ micromark-extension-gfm-table@^2.0.0: micromark-extension-gfm-tagfilter@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz#f26d8a7807b5985fba13cf61465b58ca5ff7dc57" + resolved "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz#f26d8a7807b5985fba13cf61465b58ca5ff7dc57" integrity sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg== dependencies: micromark-util-types "^2.0.0" micromark-extension-gfm-task-list-item@^2.0.0: version "2.1.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz#bcc34d805639829990ec175c3eea12bb5b781f2c" + resolved "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz#bcc34d805639829990ec175c3eea12bb5b781f2c" integrity sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw== dependencies: devlop "^1.0.0" @@ -3447,7 +3576,7 @@ micromark-extension-gfm-task-list-item@^2.0.0: micromark-extension-gfm@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz#3e13376ab95dd7a5cfd0e29560dfe999657b3c5b" + resolved "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz#3e13376ab95dd7a5cfd0e29560dfe999657b3c5b" integrity sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w== dependencies: micromark-extension-gfm-autolink-literal "^2.0.0" @@ -3460,9 +3589,9 @@ micromark-extension-gfm@^3.0.0: micromark-util-types "^2.0.0" micromark-extension-mdx-expression@^3.0.0: - version "3.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz#1407b9ce69916cf5e03a196ad9586889df25302a" - integrity sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ== + version "3.0.1" + resolved "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz#43d058d999532fb3041195a3c3c05c46fa84543b" + integrity sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q== dependencies: "@types/estree" "^1.0.0" devlop "^1.0.0" @@ -3474,11 +3603,10 @@ micromark-extension-mdx-expression@^3.0.0: micromark-util-types "^2.0.0" micromark-extension-mdx-jsx@^3.0.0: - version "3.0.1" - resolved "https://registry.npmmirror.com/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz#5abb83da5ddc8e473a374453e6ea56fbd66b59ad" - integrity sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg== + version "3.0.2" + resolved "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz#ffc98bdb649798902fa9fc5689f67f9c1c902044" + integrity sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ== dependencies: - "@types/acorn" "^4.0.0" "@types/estree" "^1.0.0" devlop "^1.0.0" estree-util-is-identifier-name "^3.0.0" @@ -3492,14 +3620,14 @@ micromark-extension-mdx-jsx@^3.0.0: micromark-extension-mdx-md@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz#1d252881ea35d74698423ab44917e1f5b197b92d" + resolved "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz#1d252881ea35d74698423ab44917e1f5b197b92d" integrity sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ== dependencies: micromark-util-types "^2.0.0" micromark-extension-mdxjs-esm@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz#de21b2b045fd2059bd00d36746081de38390d54a" + resolved "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz#de21b2b045fd2059bd00d36746081de38390d54a" integrity sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A== dependencies: "@types/estree" "^1.0.0" @@ -3514,7 +3642,7 @@ micromark-extension-mdxjs-esm@^3.0.0: micromark-extension-mdxjs@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz#b5a2e0ed449288f3f6f6c544358159557549de18" + resolved "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz#b5a2e0ed449288f3f6f6c544358159557549de18" integrity sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ== dependencies: acorn "^8.0.0" @@ -3528,7 +3656,7 @@ micromark-extension-mdxjs@^3.0.0: micromark-factory-destination@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz#8fef8e0f7081f0474fbdd92deb50c990a0264639" + resolved "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz#8fef8e0f7081f0474fbdd92deb50c990a0264639" integrity sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA== dependencies: micromark-util-character "^2.0.0" @@ -3537,7 +3665,7 @@ micromark-factory-destination@^2.0.0: micromark-factory-label@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz#5267efa97f1e5254efc7f20b459a38cb21058ba1" + resolved "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz#5267efa97f1e5254efc7f20b459a38cb21058ba1" integrity sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg== dependencies: devlop "^1.0.0" @@ -3546,9 +3674,9 @@ micromark-factory-label@^2.0.0: micromark-util-types "^2.0.0" micromark-factory-mdx-expression@^2.0.0: - version "2.0.2" - resolved "https://registry.npmmirror.com/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz#2afaa8ba6d5f63e0cead3e4dee643cad184ca260" - integrity sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw== + version "2.0.3" + resolved "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz#bb09988610589c07d1c1e4425285895041b3dfa9" + integrity sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ== dependencies: "@types/estree" "^1.0.0" devlop "^1.0.0" @@ -3562,7 +3690,7 @@ micromark-factory-mdx-expression@^2.0.0: micromark-factory-space@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz#36d0212e962b2b3121f8525fc7a3c7c029f334fc" + resolved "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz#36d0212e962b2b3121f8525fc7a3c7c029f334fc" integrity sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg== dependencies: micromark-util-character "^2.0.0" @@ -3570,7 +3698,7 @@ micromark-factory-space@^2.0.0: micromark-factory-title@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz#237e4aa5d58a95863f01032d9ee9b090f1de6e94" + resolved "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz#237e4aa5d58a95863f01032d9ee9b090f1de6e94" integrity sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw== dependencies: micromark-factory-space "^2.0.0" @@ -3580,7 +3708,7 @@ micromark-factory-title@^2.0.0: micromark-factory-whitespace@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz#06b26b2983c4d27bfcc657b33e25134d4868b0b1" + resolved "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz#06b26b2983c4d27bfcc657b33e25134d4868b0b1" integrity sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ== dependencies: micromark-factory-space "^2.0.0" @@ -3590,7 +3718,7 @@ micromark-factory-whitespace@^2.0.0: micromark-util-character@^2.0.0: version "2.1.1" - resolved "https://registry.npmmirror.com/micromark-util-character/-/micromark-util-character-2.1.1.tgz#2f987831a40d4c510ac261e89852c4e9703ccda6" + resolved "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz#2f987831a40d4c510ac261e89852c4e9703ccda6" integrity sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q== dependencies: micromark-util-symbol "^2.0.0" @@ -3598,14 +3726,14 @@ micromark-util-character@^2.0.0: micromark-util-chunked@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz#47fbcd93471a3fccab86cff03847fc3552db1051" + resolved "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz#47fbcd93471a3fccab86cff03847fc3552db1051" integrity sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA== dependencies: micromark-util-symbol "^2.0.0" micromark-util-classify-character@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz#d399faf9c45ca14c8b4be98b1ea481bced87b629" + resolved "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz#d399faf9c45ca14c8b4be98b1ea481bced87b629" integrity sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q== dependencies: micromark-util-character "^2.0.0" @@ -3614,7 +3742,7 @@ micromark-util-classify-character@^2.0.0: micromark-util-combine-extensions@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz#2a0f490ab08bff5cc2fd5eec6dd0ca04f89b30a9" + resolved "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz#2a0f490ab08bff5cc2fd5eec6dd0ca04f89b30a9" integrity sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg== dependencies: micromark-util-chunked "^2.0.0" @@ -3622,14 +3750,14 @@ micromark-util-combine-extensions@^2.0.0: micromark-util-decode-numeric-character-reference@^2.0.0: version "2.0.2" - resolved "https://registry.npmmirror.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz#fcf15b660979388e6f118cdb6bf7d79d73d26fe5" + resolved "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz#fcf15b660979388e6f118cdb6bf7d79d73d26fe5" integrity sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw== dependencies: micromark-util-symbol "^2.0.0" micromark-util-decode-string@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz#6cb99582e5d271e84efca8e61a807994d7161eb2" + resolved "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz#6cb99582e5d271e84efca8e61a807994d7161eb2" integrity sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ== dependencies: decode-named-character-reference "^1.0.0" @@ -3639,15 +3767,14 @@ micromark-util-decode-string@^2.0.0: micromark-util-encode@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz#0d51d1c095551cfaac368326963cf55f15f540b8" + resolved "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz#0d51d1c095551cfaac368326963cf55f15f540b8" integrity sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw== micromark-util-events-to-acorn@^2.0.0: - version "2.0.2" - resolved "https://registry.npmmirror.com/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz#4275834f5453c088bd29cd72dfbf80e3327cec07" - integrity sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA== + version "2.0.3" + resolved "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz#e7a8a6b55a47e5a06c720d5a1c4abae8c37c98f3" + integrity sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg== dependencies: - "@types/acorn" "^4.0.0" "@types/estree" "^1.0.0" "@types/unist" "^3.0.0" devlop "^1.0.0" @@ -3658,26 +3785,26 @@ micromark-util-events-to-acorn@^2.0.0: micromark-util-html-tag-name@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz#e40403096481986b41c106627f98f72d4d10b825" + resolved "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz#e40403096481986b41c106627f98f72d4d10b825" integrity sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA== micromark-util-normalize-identifier@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz#c30d77b2e832acf6526f8bf1aa47bc9c9438c16d" + resolved "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz#c30d77b2e832acf6526f8bf1aa47bc9c9438c16d" integrity sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q== dependencies: micromark-util-symbol "^2.0.0" micromark-util-resolve-all@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz#e1a2d62cdd237230a2ae11839027b19381e31e8b" + resolved "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz#e1a2d62cdd237230a2ae11839027b19381e31e8b" integrity sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg== dependencies: micromark-util-types "^2.0.0" micromark-util-sanitize-uri@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz#ab89789b818a58752b73d6b55238621b7faa8fd7" + resolved "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz#ab89789b818a58752b73d6b55238621b7faa8fd7" integrity sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ== dependencies: micromark-util-character "^2.0.0" @@ -3685,9 +3812,9 @@ micromark-util-sanitize-uri@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-subtokenize@^2.0.0: - version "2.0.4" - resolved "https://registry.npmmirror.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.4.tgz#50d8ca981373c717f497dc64a0dbfccce6c03ed2" - integrity sha512-N6hXjrin2GTJDe3MVjf5FuXpm12PGm80BrUAeub9XFXca8JZbP+oIwY4LJSVwFUCL1IPm/WwSVUN7goFHmSGGQ== + version "2.1.0" + resolved "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz#d8ade5ba0f3197a1cf6a2999fbbfe6357a1a19ee" + integrity sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA== dependencies: devlop "^1.0.0" micromark-util-chunked "^2.0.0" @@ -3696,18 +3823,18 @@ micromark-util-subtokenize@^2.0.0: micromark-util-symbol@^2.0.0: version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz#e5da494e8eb2b071a0d08fb34f6cefec6c0a19b8" + resolved "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz#e5da494e8eb2b071a0d08fb34f6cefec6c0a19b8" integrity sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q== micromark-util-types@^2.0.0: - version "2.0.1" - resolved "https://registry.npmmirror.com/micromark-util-types/-/micromark-util-types-2.0.1.tgz#a3edfda3022c6c6b55bfb049ef5b75d70af50709" - integrity sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ== + version "2.0.2" + resolved "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz#f00225f5f5a0ebc3254f96c36b6605c4b393908e" + integrity sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA== micromark@^4.0.0: - version "4.0.1" - resolved "https://registry.npmmirror.com/micromark/-/micromark-4.0.1.tgz#294c2f12364759e5f9e925a767ae3dfde72223ff" - integrity sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw== + version "4.0.2" + resolved "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz#91395a3e1884a198e62116e33c9c568e39936fdb" + integrity sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA== dependencies: "@types/debug" "^4.0.0" debug "^4.0.0" @@ -3729,7 +3856,7 @@ micromark@^4.0.0: micromatch@^4.0.4, micromatch@^4.0.8: version "4.0.8" - resolved "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + resolved "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: braces "^3.0.3" @@ -3737,21 +3864,21 @@ micromatch@^4.0.4, micromatch@^4.0.8: minimatch@^3.1.2: version "3.1.2" - resolved "https://registry.npmmirror.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: brace-expansion "^1.1.7" minimatch@^9.0.4: version "9.0.5" - resolved "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" + resolved "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== dependencies: brace-expansion "^2.0.1" minimist@^1.2.0, minimist@^1.2.6: version "1.2.8" - resolved "https://registry.npmmirror.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== minisearch@^7.1.2: @@ -3766,29 +3893,34 @@ mitt@^3.0.1: moment@^2.30.1: version "2.30.1" - resolved "https://registry.npmmirror.com/moment/-/moment-2.30.1.tgz#f8c91c07b7a786e30c59926df530b4eac96974ae" + resolved "https://registry.npmjs.org/moment/-/moment-2.30.1.tgz#f8c91c07b7a786e30c59926df530b4eac96974ae" integrity sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how== ms@^2.1.1, ms@^2.1.3: version "2.1.3" - resolved "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + resolved "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== mui-message@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/mui-message/-/mui-message-1.1.0.tgz#cffdcad6ab18537a0053e357749fb35da9169c72" + resolved "https://registry.npmjs.org/mui-message/-/mui-message-1.1.0.tgz#cffdcad6ab18537a0053e357749fb35da9169c72" integrity sha512-VMpDyqM037mKnD89jr1lIiRNOa6GIGsHC3ucT2FCMiLwdvm+yyl8M2s0fkL27MFLBjR2iXGpqVF/1gsSQea5zQ== dependencies: notistack "^2.0.8" nanoid@^3.3.6: - version "3.3.8" - resolved "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== + version "3.3.11" + resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== + +napi-postinstall@^0.3.0: + version "0.3.0" + resolved "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.3.0.tgz#888e51d1fb500e86dcf6ace1baccdbb377e654ce" + integrity sha512-M7NqKyhODKV1gRLdkwE7pDsZP2/SC2a2vHkOYh9MCpKMbWVfyVfUw5MaH83Fv6XMjxr5jryUp3IDDL9rlxsTeA== natural-compare@^1.4.0: version "1.4.0" - resolved "https://registry.npmmirror.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + resolved "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== netmask@^2.0.2: @@ -3798,7 +3930,7 @@ netmask@^2.0.2: next-international@^1.3.1: version "1.3.1" - resolved "https://registry.npmmirror.com/next-international/-/next-international-1.3.1.tgz#10db89c586286de0d1be673910d1f9809da4cbbe" + resolved "https://registry.npmjs.org/next-international/-/next-international-1.3.1.tgz#10db89c586286de0d1be673910d1f9809da4cbbe" integrity sha512-ydU9jQe+4MohMWltbZae/yuWeKhmp0QKQqJNNi8WCCMwrly03qfMAHw/tWbT2qgAlG++CxF5jMXmGQZgOHeVOw== dependencies: client-only "^0.0.1" @@ -3806,16 +3938,16 @@ next-international@^1.3.1: server-only "^0.0.1" next-themes@^0.4.4: - version "0.4.4" - resolved "https://registry.npmmirror.com/next-themes/-/next-themes-0.4.4.tgz#ce6f68a4af543821bbc4755b59c0d3ced55c2d13" - integrity sha512-LDQ2qIOJF0VnuVrrMSMLrWGjRMkq+0mpgl6e0juCLqdJ+oo8Q84JRWT6Wh11VDQKkMMe+dVzDKLWs5n87T+PkQ== + version "0.4.6" + resolved "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz#8d7e92d03b8fea6582892a50a928c9b23502e8b6" + integrity sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA== -next@15.1.7: - version "15.1.7" - resolved "https://registry.npmmirror.com/next/-/next-15.1.7.tgz#e814845e7cdb0294aee88ceab0bb962de83e8e6f" - integrity sha512-GNeINPGS9c6OZKCvKypbL8GTsT5GhWPp4DM0fzkXJuXMilOO2EeFxuAY6JZbtk6XIl6Ws10ag3xRINDjSO5+wg== +next@15.2.4: + version "15.2.4" + resolved "https://registry.npmjs.org/next/-/next-15.2.4.tgz#e05225e9511df98e3b2edc713e17f4c970bff961" + integrity sha512-VwL+LAaPSxEkd3lU2xWbgEOtrM8oedmyhBqaVNmgKB+GvZlCy9rgaEc+y2on0wv+l0oSFqLtYD6dcC1eAedUaQ== dependencies: - "@next/env" "15.1.7" + "@next/env" "15.2.4" "@swc/counter" "0.1.3" "@swc/helpers" "0.5.15" busboy "1.6.0" @@ -3823,19 +3955,19 @@ next@15.1.7: postcss "8.4.31" styled-jsx "5.1.6" optionalDependencies: - "@next/swc-darwin-arm64" "15.1.7" - "@next/swc-darwin-x64" "15.1.7" - "@next/swc-linux-arm64-gnu" "15.1.7" - "@next/swc-linux-arm64-musl" "15.1.7" - "@next/swc-linux-x64-gnu" "15.1.7" - "@next/swc-linux-x64-musl" "15.1.7" - "@next/swc-win32-arm64-msvc" "15.1.7" - "@next/swc-win32-x64-msvc" "15.1.7" + "@next/swc-darwin-arm64" "15.2.4" + "@next/swc-darwin-x64" "15.2.4" + "@next/swc-linux-arm64-gnu" "15.2.4" + "@next/swc-linux-arm64-musl" "15.2.4" + "@next/swc-linux-x64-gnu" "15.2.4" + "@next/swc-linux-x64-musl" "15.2.4" + "@next/swc-win32-arm64-msvc" "15.2.4" + "@next/swc-win32-x64-msvc" "15.2.4" sharp "^0.33.5" notistack@^2.0.8: version "2.0.8" - resolved "https://registry.npmmirror.com/notistack/-/notistack-2.0.8.tgz#78cdf34c64e311bf1d1d71c2123396bcdea5e95b" + resolved "https://registry.npmjs.org/notistack/-/notistack-2.0.8.tgz#78cdf34c64e311bf1d1d71c2123396bcdea5e95b" integrity sha512-/IY14wkFp5qjPgKNvAdfL5Jp6q90+MjgKTPh4c81r/lW70KeuX6b9pE/4f8L4FG31cNudbN9siiFS5ql1aSLRw== dependencies: clsx "^1.1.0" @@ -3843,29 +3975,29 @@ notistack@^2.0.8: nth-check@^2.0.1: version "2.1.1" - resolved "https://registry.npmmirror.com/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" + resolved "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" integrity sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w== dependencies: boolbase "^1.0.0" object-assign@^4.1.1: version "4.1.1" - resolved "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== -object-inspect@^1.13.3: +object-inspect@^1.13.3, object-inspect@^1.13.4: version "1.13.4" - resolved "https://registry.npmmirror.com/object-inspect/-/object-inspect-1.13.4.tgz#8375265e21bc20d0fa582c22e1b13485d6e00213" + resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz#8375265e21bc20d0fa582c22e1b13485d6e00213" integrity sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew== object-keys@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + resolved "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== object.assign@^4.1.4, object.assign@^4.1.7: version "4.1.7" - resolved "https://registry.npmmirror.com/object.assign/-/object.assign-4.1.7.tgz#8c14ca1a424c6a561b0bb2a22f66f5049a945d3d" + resolved "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz#8c14ca1a424c6a561b0bb2a22f66f5049a945d3d" integrity sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw== dependencies: call-bind "^1.0.8" @@ -3875,18 +4007,19 @@ object.assign@^4.1.4, object.assign@^4.1.7: has-symbols "^1.1.0" object-keys "^1.1.1" -object.entries@^1.1.8: - version "1.1.8" - resolved "https://registry.npmmirror.com/object.entries/-/object.entries-1.1.8.tgz#bffe6f282e01f4d17807204a24f8edd823599c41" - integrity sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ== +object.entries@^1.1.9: + version "1.1.9" + resolved "https://registry.npmjs.org/object.entries/-/object.entries-1.1.9.tgz#e4770a6a1444afb61bd39f984018b5bede25f8b3" + integrity sha512-8u/hfXFRBD1O0hPUjioLhoWFHRmt6tKA4/vZPyckBr18l1KE9uHrFaFaUi8MDRTpi4uak2goyPTSNJLXX2k2Hw== dependencies: - call-bind "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.4" define-properties "^1.2.1" - es-object-atoms "^1.0.0" + es-object-atoms "^1.1.1" object.fromentries@^2.0.8: version "2.0.8" - resolved "https://registry.npmmirror.com/object.fromentries/-/object.fromentries-2.0.8.tgz#f7195d8a9b97bd95cbc1999ea939ecd1a2b00c65" + resolved "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz#f7195d8a9b97bd95cbc1999ea939ecd1a2b00c65" integrity sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ== dependencies: call-bind "^1.0.7" @@ -3896,16 +4029,16 @@ object.fromentries@^2.0.8: object.groupby@^1.0.3: version "1.0.3" - resolved "https://registry.npmmirror.com/object.groupby/-/object.groupby-1.0.3.tgz#9b125c36238129f6f7b61954a1e7176148d5002e" + resolved "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz#9b125c36238129f6f7b61954a1e7176148d5002e" integrity sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ== dependencies: call-bind "^1.0.7" define-properties "^1.2.1" es-abstract "^1.23.2" -object.values@^1.1.6, object.values@^1.2.0, object.values@^1.2.1: +object.values@^1.1.6, object.values@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/object.values/-/object.values-1.2.1.tgz#deed520a50809ff7f75a7cfd4bc64c7a038c6216" + resolved "https://registry.npmjs.org/object.values/-/object.values-1.2.1.tgz#deed520a50809ff7f75a7cfd4bc64c7a038c6216" integrity sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA== dependencies: call-bind "^1.0.8" @@ -3922,7 +4055,7 @@ once@^1.3.1, once@^1.4.0: optionator@^0.9.3: version "0.9.4" - resolved "https://registry.npmmirror.com/optionator/-/optionator-0.9.4.tgz#7ea1c1a5d91d764fb282139c88fe11e182a3a734" + resolved "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz#7ea1c1a5d91d764fb282139c88fe11e182a3a734" integrity sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== dependencies: deep-is "^0.1.3" @@ -3934,7 +4067,7 @@ optionator@^0.9.3: own-keys@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/own-keys/-/own-keys-1.0.1.tgz#e4006910a2bf913585289676eebd6f390cf51358" + resolved "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz#e4006910a2bf913585289676eebd6f390cf51358" integrity sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg== dependencies: get-intrinsic "^1.2.6" @@ -3943,14 +4076,14 @@ own-keys@^1.0.1: p-limit@^3.0.2: version "3.1.0" - resolved "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + resolved "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== dependencies: yocto-queue "^0.1.0" p-locate@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + resolved "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== dependencies: p-limit "^3.0.2" @@ -3979,14 +4112,14 @@ pac-resolver@^7.0.1: parent-module@^1.0.0: version "1.0.1" - resolved "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + resolved "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== dependencies: callsites "^3.0.0" parse-entities@^4.0.0: version "4.0.2" - resolved "https://registry.npmmirror.com/parse-entities/-/parse-entities-4.0.2.tgz#61d46f5ed28e4ee62e9ddc43d6b010188443f159" + resolved "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz#61d46f5ed28e4ee62e9ddc43d6b010188443f159" integrity sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw== dependencies: "@types/unist" "^2.0.0" @@ -3999,7 +4132,7 @@ parse-entities@^4.0.0: parse-json@^5.0.0, parse-json@^5.2.0: version "5.2.0" - resolved "https://registry.npmmirror.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + resolved "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== dependencies: "@babel/code-frame" "^7.0.0" @@ -4009,12 +4142,12 @@ parse-json@^5.0.0, parse-json@^5.2.0: parse-numeric-range@^1.3.0: version "1.3.0" - resolved "https://registry.npmmirror.com/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz#7c63b61190d61e4d53a1197f0c83c47bb670ffa3" + resolved "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz#7c63b61190d61e4d53a1197f0c83c47bb670ffa3" integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ== -parse5-htmlparser2-tree-adapter@^7.0.0: +parse5-htmlparser2-tree-adapter@^7.1.0: version "7.1.0" - resolved "https://registry.npmmirror.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz#b5a806548ed893a43e24ccb42fbb78069311e81b" + resolved "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz#b5a806548ed893a43e24ccb42fbb78069311e81b" integrity sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g== dependencies: domhandler "^5.0.3" @@ -4022,36 +4155,36 @@ parse5-htmlparser2-tree-adapter@^7.0.0: parse5-parser-stream@^7.1.2: version "7.1.2" - resolved "https://registry.npmmirror.com/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz#d7c20eadc37968d272e2c02660fff92dd27e60e1" + resolved "https://registry.npmjs.org/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz#d7c20eadc37968d272e2c02660fff92dd27e60e1" integrity sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow== dependencies: parse5 "^7.0.0" -parse5@^7.0.0, parse5@^7.1.2: - version "7.2.1" - resolved "https://registry.npmmirror.com/parse5/-/parse5-7.2.1.tgz#8928f55915e6125f430cc44309765bf17556a33a" - integrity sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ== +parse5@^7.0.0, parse5@^7.3.0: + version "7.3.0" + resolved "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz#d7e224fa72399c7a175099f45fc2ad024b05ec05" + integrity sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw== dependencies: - entities "^4.5.0" + entities "^6.0.0" path-exists@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + resolved "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== path-key@^3.1.0: version "3.1.1" - resolved "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + resolved "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== path-parse@^1.0.7: version "1.0.7" - resolved "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== path-type@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + resolved "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== pend@~1.2.0: @@ -4061,32 +4194,32 @@ pend@~1.2.0: performance-now@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + resolved "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== -picocolors@^1.0.0: +picocolors@^1.0.0, picocolors@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + resolved "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== picomatch@^2.3.1: version "2.3.1" - resolved "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== picomatch@^4.0.2: version "4.0.2" - resolved "https://registry.npmmirror.com/picomatch/-/picomatch-4.0.2.tgz#77c742931e8f3b8820946c76cd0c1f13730d1dab" + resolved "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz#77c742931e8f3b8820946c76cd0c1f13730d1dab" integrity sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg== possible-typed-array-names@^1.0.0: version "1.1.0" - resolved "https://registry.npmmirror.com/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz#93e3582bc0e5426586d9d07b79ee40fc841de4ae" + resolved "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz#93e3582bc0e5426586d9d07b79ee40fc841de4ae" integrity sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg== postcss@8.4.31: version "8.4.31" - resolved "https://registry.npmmirror.com/postcss/-/postcss-8.4.31.tgz#92b451050a9f914da6755af352bdc0192508656d" + resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz#92b451050a9f914da6755af352bdc0192508656d" integrity sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ== dependencies: nanoid "^3.3.6" @@ -4095,7 +4228,7 @@ postcss@8.4.31: prelude-ls@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + resolved "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== progress@^2.0.3: @@ -4105,7 +4238,7 @@ progress@^2.0.3: prop-types@^15.6.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: version "15.8.1" - resolved "https://registry.npmmirror.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + resolved "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== dependencies: loose-envify "^1.4.0" @@ -4114,13 +4247,13 @@ prop-types@^15.6.0, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: property-information@^6.0.0: version "6.5.0" - resolved "https://registry.npmmirror.com/property-information/-/property-information-6.5.0.tgz#6212fbb52ba757e92ef4fb9d657563b933b7ffec" + resolved "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz#6212fbb52ba757e92ef4fb9d657563b933b7ffec" integrity sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig== property-information@^7.0.0: - version "7.0.0" - resolved "https://registry.npmmirror.com/property-information/-/property-information-7.0.0.tgz#3508a6d6b0b8eb3ca6eb2c6623b164d2ed2ab112" - integrity sha512-7D/qOz/+Y4X/rzSB6jKxKUsQnphO046ei8qxG59mtM3RG3DHgTK81HrxrmoDVINJb8NKT5ZsRbwHvQ6B68Iyhg== + version "7.1.0" + resolved "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz#b622e8646e02b580205415586b40804d3e8bfd5d" + integrity sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ== proxy-agent@^6.5.0: version "6.5.0" @@ -4142,82 +4275,82 @@ proxy-from-env@^1.1.0: integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== pump@^3.0.0: - version "3.0.2" - resolved "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz#836f3edd6bc2ee599256c924ffe0d88573ddcbf8" - integrity sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw== + version "3.0.3" + resolved "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz#151d979f1a29668dc0025ec589a455b53282268d" + integrity sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA== dependencies: end-of-stream "^1.1.0" once "^1.3.1" punycode@^2.1.0: version "2.3.1" - resolved "https://registry.npmmirror.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + resolved "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== -puppeteer-core@24.10.0: - version "24.10.0" - resolved "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-24.10.0.tgz#b44175d90511eb414395ae60c51a13185b391421" - integrity sha512-xX0QJRc8t19iAwRDsAOR38Q/Zx/W6WVzJCEhKCAwp2XMsaWqfNtQ+rBfQW9PlF+Op24d7c8Zlgq9YNmbnA7hdQ== +puppeteer-core@24.12.1: + version "24.12.1" + resolved "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-24.12.1.tgz#66338926570ae904ce8ea0acb68b1bda361f5884" + integrity sha512-8odp6d3ERKBa3BAVaYWXn95UxQv3sxvP1reD+xZamaX6ed8nCykhwlOiHSaHR9t/MtmIB+rJmNencI6Zy4Gxvg== dependencies: "@puppeteer/browsers" "2.10.5" chromium-bidi "5.1.0" debug "^4.4.1" - devtools-protocol "0.0.1452169" + devtools-protocol "0.0.1464554" typed-query-selector "^2.12.0" - ws "^8.18.2" + ws "^8.18.3" puppeteer@^24.10.0: - version "24.10.0" - resolved "https://registry.npmjs.org/puppeteer/-/puppeteer-24.10.0.tgz#52bc657c01a96ce2b9e0b4300f1ff5ddb8a644cd" - integrity sha512-Oua9VkGpj0S2psYu5e6mCer6W9AU9POEQh22wRgSXnLXASGH+MwLUVWgLCLeP9QPHHcJ7tySUlg4Sa9OJmaLpw== + version "24.12.1" + resolved "https://registry.npmjs.org/puppeteer/-/puppeteer-24.12.1.tgz#04f23bc65d592277fef5a306c671f464e8ec9042" + integrity sha512-+vvwl+Xo4z5uXLLHG+XW8uXnUXQ62oY6KU6bEFZJvHWLutbmv5dw9A/jcMQ0fqpQdLydHmK0Uy7/9Ilj8ufwSQ== dependencies: "@puppeteer/browsers" "2.10.5" chromium-bidi "5.1.0" cosmiconfig "^9.0.0" - devtools-protocol "0.0.1452169" - puppeteer-core "24.10.0" + devtools-protocol "0.0.1464554" + puppeteer-core "24.12.1" typed-query-selector "^2.12.0" queue-microtask@^1.2.2: version "1.2.3" - resolved "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== raf@^3.0.0: version "3.4.1" - resolved "https://registry.npmmirror.com/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39" + resolved "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39" integrity sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA== dependencies: performance-now "^2.1.0" react-copy-to-clipboard@^5.1.0: version "5.1.0" - resolved "https://registry.npmmirror.com/react-copy-to-clipboard/-/react-copy-to-clipboard-5.1.0.tgz#09aae5ec4c62750ccb2e6421a58725eabc41255c" + resolved "https://registry.npmjs.org/react-copy-to-clipboard/-/react-copy-to-clipboard-5.1.0.tgz#09aae5ec4c62750ccb2e6421a58725eabc41255c" integrity sha512-k61RsNgAayIJNoy9yDsYzDe/yAZAzEbEgcz3DZMhF686LEyukcE1hzurxe85JandPUG+yTfGVFzuEw3xt8WP/A== dependencies: copy-to-clipboard "^3.3.1" prop-types "^15.8.1" react-dom@^19.0.0: - version "19.0.0" - resolved "https://registry.npmmirror.com/react-dom/-/react-dom-19.0.0.tgz#43446f1f01c65a4cd7f7588083e686a6726cfb57" - integrity sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ== + version "19.1.0" + resolved "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz#133558deca37fa1d682708df8904b25186793623" + integrity sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g== dependencies: - scheduler "^0.25.0" + scheduler "^0.26.0" react-is@^16.13.1, react-is@^16.7.0: version "16.13.1" - resolved "https://registry.npmmirror.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + resolved "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== react-is@^19.0.0: - version "19.0.0" - resolved "https://registry.npmmirror.com/react-is/-/react-is-19.0.0.tgz#d6669fd389ff022a9684f708cf6fa4962d1fea7a" - integrity sha512-H91OHcwjZsbq3ClIDHMzBShc1rotbfACdWENsmEf0IFvZ3FgGPtdHMcsv45bQ1hAbgdfiA8SnxTKfDS+x/8m2g== + version "19.1.0" + resolved "https://registry.npmjs.org/react-is/-/react-is-19.1.0.tgz#805bce321546b7e14c084989c77022351bbdd11b" + integrity sha512-Oe56aUPnkHyyDxxkvqtd7KkdQP5uIUfHxd5XTb3wE9d/kRnZLmKbDB0GWk919tdQ+mxxPtG6EAs6RMT6i1qtHg== react-scroll@^1.9.3: version "1.9.3" - resolved "https://registry.npmmirror.com/react-scroll/-/react-scroll-1.9.3.tgz#8312831244a7a8f86036e72c71de155a454a78c0" + resolved "https://registry.npmjs.org/react-scroll/-/react-scroll-1.9.3.tgz#8312831244a7a8f86036e72c71de155a454a78c0" integrity sha512-xv7FXqF3k63aSLNu4/NjFvRNI0ge7DmmmsbeGarP7LZVAlJMSjUuW3dTtLxp1Afijyv0lS2qwC0GiFHvx1KBHQ== dependencies: lodash.throttle "^4.1.1" @@ -4225,7 +4358,7 @@ react-scroll@^1.9.3: react-slick@^0.30.3: version "0.30.3" - resolved "https://registry.npmmirror.com/react-slick/-/react-slick-0.30.3.tgz#3af5846fcbc04c681f8ba92f48881a0f78124a27" + resolved "https://registry.npmjs.org/react-slick/-/react-slick-0.30.3.tgz#3af5846fcbc04c681f8ba92f48881a0f78124a27" integrity sha512-B4x0L9GhkEWUMApeHxr/Ezp2NncpGc+5174R02j+zFiWuYboaq98vmxwlpafZfMjZic1bjdIqqmwLDcQY0QaFA== dependencies: classnames "^2.2.5" @@ -4236,7 +4369,7 @@ react-slick@^0.30.3: react-stickynode@^5.0.2: version "5.0.2" - resolved "https://registry.npmmirror.com/react-stickynode/-/react-stickynode-5.0.2.tgz#90be0871ae453188f2c9187f4c59a8092220ea12" + resolved "https://registry.npmjs.org/react-stickynode/-/react-stickynode-5.0.2.tgz#90be0871ae453188f2c9187f4c59a8092220ea12" integrity sha512-aywk3qFTPu5VMKexra5VpDzQCRt3JkBk/Ft6SEzuXcafKz9sYal8QhY8Xfjt6BGmq2Teh90E/QcIfyzKFg9vuw== dependencies: classnames "^2.0.0" @@ -4246,7 +4379,7 @@ react-stickynode@^5.0.2: react-transition-group@^4.4.5: version "4.4.5" - resolved "https://registry.npmmirror.com/react-transition-group/-/react-transition-group-4.4.5.tgz#e53d4e3f3344da8521489fbef8f2581d42becdd1" + resolved "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz#e53d4e3f3344da8521489fbef8f2581d42becdd1" integrity sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g== dependencies: "@babel/runtime" "^7.5.5" @@ -4255,13 +4388,13 @@ react-transition-group@^4.4.5: prop-types "^15.6.2" react@^19.0.0: - version "19.0.0" - resolved "https://registry.npmmirror.com/react/-/react-19.0.0.tgz#6e1969251b9f108870aa4bff37a0ce9ddfaaabdd" - integrity sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ== + version "19.1.0" + resolved "https://registry.npmjs.org/react/-/react-19.1.0.tgz#926864b6c48da7627f004795d6cce50e90793b75" + integrity sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg== read-yaml-file@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/read-yaml-file/-/read-yaml-file-2.1.0.tgz#c5866712db9ef5343b4d02c2413bada53c41c4a9" + resolved "https://registry.npmjs.org/read-yaml-file/-/read-yaml-file-2.1.0.tgz#c5866712db9ef5343b4d02c2413bada53c41c4a9" integrity sha512-UkRNRIwnhG+y7hpqnycCL/xbTk7+ia9VuVTC0S+zVbwd65DI9eUpRMfsWIGrCWxTU/mi+JW8cHQCrv+zfCbEPQ== dependencies: js-yaml "^4.0.0" @@ -4269,7 +4402,7 @@ read-yaml-file@^2.1.0: recma-build-jsx@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz#c02f29e047e103d2fab2054954e1761b8ea253c4" + resolved "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz#c02f29e047e103d2fab2054954e1761b8ea253c4" integrity sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew== dependencies: "@types/estree" "^1.0.0" @@ -4278,7 +4411,7 @@ recma-build-jsx@^1.0.0: recma-jsx@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/recma-jsx/-/recma-jsx-1.0.0.tgz#f7bef02e571a49d6ba3efdfda8e2efab48dbe3aa" + resolved "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.0.tgz#f7bef02e571a49d6ba3efdfda8e2efab48dbe3aa" integrity sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q== dependencies: acorn-jsx "^5.0.0" @@ -4289,7 +4422,7 @@ recma-jsx@^1.0.0: recma-parse@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/recma-parse/-/recma-parse-1.0.0.tgz#c351e161bb0ab47d86b92a98a9d891f9b6814b52" + resolved "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz#c351e161bb0ab47d86b92a98a9d891f9b6814b52" integrity sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ== dependencies: "@types/estree" "^1.0.0" @@ -4299,7 +4432,7 @@ recma-parse@^1.0.0: recma-stringify@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/recma-stringify/-/recma-stringify-1.0.0.tgz#54632030631e0c7546136ff9ef8fde8e7b44f130" + resolved "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz#54632030631e0c7546136ff9ef8fde8e7b44f130" integrity sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g== dependencies: "@types/estree" "^1.0.0" @@ -4309,7 +4442,7 @@ recma-stringify@^1.0.0: reflect.getprototypeof@^1.0.6, reflect.getprototypeof@^1.0.9: version "1.0.10" - resolved "https://registry.npmmirror.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz#c629219e78a3316d8b604c765ef68996964e7bf9" + resolved "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz#c629219e78a3316d8b604c765ef68996964e7bf9" integrity sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw== dependencies: call-bind "^1.0.8" @@ -4321,14 +4454,9 @@ reflect.getprototypeof@^1.0.6, reflect.getprototypeof@^1.0.9: get-proto "^1.0.1" which-builtin-type "^1.2.1" -regenerator-runtime@^0.14.0: - version "0.14.1" - resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" - integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== - -regexp.prototype.flags@^1.5.3: +regexp.prototype.flags@^1.5.3, regexp.prototype.flags@^1.5.4: version "1.5.4" - resolved "https://registry.npmmirror.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz#1ad6c62d44a259007e55b3970e00f746efbcaa19" + resolved "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz#1ad6c62d44a259007e55b3970e00f746efbcaa19" integrity sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA== dependencies: call-bind "^1.0.8" @@ -4339,9 +4467,9 @@ regexp.prototype.flags@^1.5.3: set-function-name "^2.0.2" rehype-highlight-code-lines@^1.1.3: - version "1.1.3" - resolved "https://registry.npmmirror.com/rehype-highlight-code-lines/-/rehype-highlight-code-lines-1.1.3.tgz#ee34670cc6dd986574da7d1c2c5648e30dd20bca" - integrity sha512-Kq63wvBTkaQn5d5eqVJsqkpAW2fmqwbyGRervWtH32fTY6+cFqzVU5NTBZH/fe9PjjRUTyZfWinEDu7Ewvr+lw== + version "1.1.5" + resolved "https://registry.npmjs.org/rehype-highlight-code-lines/-/rehype-highlight-code-lines-1.1.5.tgz#baeca70565f7cb5a8143f3fa6fc131064bacc878" + integrity sha512-U9IAjx7rVizYsHQ3dgnUQ/RSEPqnyHHwX7rannSukwLTW0VVZF7Tg/DxcomRPCVt0V25F1n8bi75Y0qQFjittA== dependencies: "@types/hast" "^3.0.4" parse-numeric-range "^1.3.0" @@ -4349,7 +4477,7 @@ rehype-highlight-code-lines@^1.1.3: rehype-highlight@^7.0.2: version "7.0.2" - resolved "https://registry.npmmirror.com/rehype-highlight/-/rehype-highlight-7.0.2.tgz#997e05e3a336853f6f6b2cfc450c5dad0f960b07" + resolved "https://registry.npmjs.org/rehype-highlight/-/rehype-highlight-7.0.2.tgz#997e05e3a336853f6f6b2cfc450c5dad0f960b07" integrity sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA== dependencies: "@types/hast" "^3.0.0" @@ -4360,7 +4488,7 @@ rehype-highlight@^7.0.2: rehype-recma@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/rehype-recma/-/rehype-recma-1.0.0.tgz#d68ef6344d05916bd96e25400c6261775411aa76" + resolved "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz#d68ef6344d05916bd96e25400c6261775411aa76" integrity sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw== dependencies: "@types/estree" "^1.0.0" @@ -4369,7 +4497,7 @@ rehype-recma@^1.0.0: remark-directive-rehype@^0.4.2: version "0.4.2" - resolved "https://registry.npmmirror.com/remark-directive-rehype/-/remark-directive-rehype-0.4.2.tgz#044040455523a3281a7be7fdde304714d29021b5" + resolved "https://registry.npmjs.org/remark-directive-rehype/-/remark-directive-rehype-0.4.2.tgz#044040455523a3281a7be7fdde304714d29021b5" integrity sha512-T6e+IG+BwqU4++MK54vFb+KDFjs3a+tHeK6E0T0ctR1FSyngolfDtAEzqxHWlRzQZqGi2sB4DFXry6oqH87D/g== dependencies: hastscript "^7.0.2" @@ -4377,7 +4505,7 @@ remark-directive-rehype@^0.4.2: remark-directive@^3.0.1: version "3.0.1" - resolved "https://registry.npmmirror.com/remark-directive/-/remark-directive-3.0.1.tgz#689ba332f156cfe1118e849164cc81f157a3ef0a" + resolved "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.1.tgz#689ba332f156cfe1118e849164cc81f157a3ef0a" integrity sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A== dependencies: "@types/mdast" "^4.0.0" @@ -4387,7 +4515,7 @@ remark-directive@^3.0.1: remark-frontmatter@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz#b68d61552a421ec412c76f4f66c344627dc187a2" + resolved "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz#b68d61552a421ec412c76f4f66c344627dc187a2" integrity sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ== dependencies: "@types/mdast" "^4.0.0" @@ -4397,7 +4525,7 @@ remark-frontmatter@^5.0.0: remark-gfm@^4.0.1: version "4.0.1" - resolved "https://registry.npmmirror.com/remark-gfm/-/remark-gfm-4.0.1.tgz#33227b2a74397670d357bf05c098eaf8513f0d6b" + resolved "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz#33227b2a74397670d357bf05c098eaf8513f0d6b" integrity sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg== dependencies: "@types/mdast" "^4.0.0" @@ -4409,7 +4537,7 @@ remark-gfm@^4.0.1: remark-github-admonitions-to-directives@^2.1.0: version "2.1.0" - resolved "https://registry.npmmirror.com/remark-github-admonitions-to-directives/-/remark-github-admonitions-to-directives-2.1.0.tgz#5bdb5deb3e1d6ee7a1427aeea9e27e7b3ea96ff4" + resolved "https://registry.npmjs.org/remark-github-admonitions-to-directives/-/remark-github-admonitions-to-directives-2.1.0.tgz#5bdb5deb3e1d6ee7a1427aeea9e27e7b3ea96ff4" integrity sha512-bI3E4Oj1pKY3ym2IQrrVCdORgEu0+mSrWgpCYFNy8QvytfnLs/nAacVPjkWU/JzDMUiQio2k4nTFP7bsIr9TSA== dependencies: "@types/mdast" "^4.0.0" @@ -4419,27 +4547,27 @@ remark-github-admonitions-to-directives@^2.1.0: remark-heading-id@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/remark-heading-id/-/remark-heading-id-1.0.1.tgz#5500b8cbc12ba6cc36fc5ccc2a3ff3d6e5cf981e" + resolved "https://registry.npmjs.org/remark-heading-id/-/remark-heading-id-1.0.1.tgz#5500b8cbc12ba6cc36fc5ccc2a3ff3d6e5cf981e" integrity sha512-GmJjuCeEkYvwFlvn/Skjc/1Qafj71412gbQnrwUmP/tKskmAf1cMRlZRNoovV+aIvsSRkTb2rCmGv2b9RdoJbQ== dependencies: lodash "^4.17.21" unist-util-visit "^1.4.0" remark-mdx-frontmatter@^5.0.0: - version "5.0.0" - resolved "https://registry.npmmirror.com/remark-mdx-frontmatter/-/remark-mdx-frontmatter-5.0.0.tgz#22c48c4758963701595082fd89157586caa5a372" - integrity sha512-kI75pshe27TM71R+0iX7C3p4MbGMdygkvSbrk1WYSar88WAwR2JfQilofcDGgDNFAWUo5IwTPyq9XvGpifTwqQ== + version "5.2.0" + resolved "https://registry.npmjs.org/remark-mdx-frontmatter/-/remark-mdx-frontmatter-5.2.0.tgz#fd3738b289248ef9afe84d45767acab8b9e00fcb" + integrity sha512-U/hjUYTkQqNjjMRYyilJgLXSPF65qbLPdoESOkXyrwz2tVyhAnm4GUKhfXqOOS9W34M3545xEMq+aMpHgVjEeQ== dependencies: "@types/mdast" "^4.0.0" - estree-util-is-identifier-name "^3.0.0" estree-util-value-to-estree "^3.0.0" toml "^3.0.0" unified "^11.0.0" + unist-util-mdx-define "^1.0.0" yaml "^2.0.0" remark-mdx@^3.0.0: version "3.1.0" - resolved "https://registry.npmmirror.com/remark-mdx/-/remark-mdx-3.1.0.tgz#f979be729ecb35318fa48e2135c1169607a78343" + resolved "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.0.tgz#f979be729ecb35318fa48e2135c1169607a78343" integrity sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA== dependencies: mdast-util-mdx "^3.0.0" @@ -4447,7 +4575,7 @@ remark-mdx@^3.0.0: remark-parse@^11.0.0: version "11.0.0" - resolved "https://registry.npmmirror.com/remark-parse/-/remark-parse-11.0.0.tgz#aa60743fcb37ebf6b069204eb4da304e40db45a1" + resolved "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz#aa60743fcb37ebf6b069204eb4da304e40db45a1" integrity sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA== dependencies: "@types/mdast" "^4.0.0" @@ -4456,9 +4584,9 @@ remark-parse@^11.0.0: unified "^11.0.0" remark-rehype@^11.0.0: - version "11.1.1" - resolved "https://registry.npmmirror.com/remark-rehype/-/remark-rehype-11.1.1.tgz#f864dd2947889a11997c0a2667cd6b38f685bca7" - integrity sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ== + version "11.1.2" + resolved "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz#2addaadda80ca9bd9aa0da763e74d16327683b37" + integrity sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw== dependencies: "@types/hast" "^3.0.0" "@types/mdast" "^4.0.0" @@ -4468,7 +4596,7 @@ remark-rehype@^11.0.0: remark-stringify@^11.0.0: version "11.0.0" - resolved "https://registry.npmmirror.com/remark-stringify/-/remark-stringify-11.0.0.tgz#4c5b01dd711c269df1aaae11743eb7e2e7636fd3" + resolved "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz#4c5b01dd711c269df1aaae11743eb7e2e7636fd3" integrity sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw== dependencies: "@types/mdast" "^4.0.0" @@ -4482,22 +4610,22 @@ require-directory@^2.1.1: resize-observer-polyfill@^1.5.0: version "1.5.1" - resolved "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" + resolved "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== resolve-from@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + resolved "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== resolve-pkg-maps@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz#616b3dc2c57056b5588c31cdf4b3d64db133720f" + resolved "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz#616b3dc2c57056b5588c31cdf4b3d64db133720f" integrity sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== resolve@^1.19.0, resolve@^1.22.4: version "1.22.10" - resolved "https://registry.npmmirror.com/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" + resolved "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" integrity sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w== dependencies: is-core-module "^2.16.0" @@ -4506,7 +4634,7 @@ resolve@^1.19.0, resolve@^1.22.4: resolve@^2.0.0-next.5: version "2.0.0-next.5" - resolved "https://registry.npmmirror.com/resolve/-/resolve-2.0.0-next.5.tgz#6b0ec3107e671e52b68cd068ef327173b90dc03c" + resolved "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz#6b0ec3107e671e52b68cd068ef327173b90dc03c" integrity sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA== dependencies: is-core-module "^2.13.0" @@ -4514,20 +4642,20 @@ resolve@^2.0.0-next.5: supports-preserve-symlinks-flag "^1.0.0" reusify@^1.0.4: - version "1.0.4" - resolved "https://registry.npmmirror.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" - integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + version "1.1.0" + resolved "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz#0fe13b9522e1473f51b558ee796e08f11f9b489f" + integrity sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw== run-parallel@^1.1.9: version "1.2.0" - resolved "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + resolved "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== dependencies: queue-microtask "^1.2.2" safe-array-concat@^1.1.3: version "1.1.3" - resolved "https://registry.npmmirror.com/safe-array-concat/-/safe-array-concat-1.1.3.tgz#c9e54ec4f603b0bbb8e7e5007a5ee7aecd1538c3" + resolved "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz#c9e54ec4f603b0bbb8e7e5007a5ee7aecd1538c3" integrity sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q== dependencies: call-bind "^1.0.8" @@ -4538,7 +4666,7 @@ safe-array-concat@^1.1.3: safe-push-apply@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/safe-push-apply/-/safe-push-apply-1.0.0.tgz#01850e981c1602d398c85081f360e4e6d03d27f5" + resolved "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz#01850e981c1602d398c85081f360e4e6d03d27f5" integrity sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA== dependencies: es-errors "^1.3.0" @@ -4546,7 +4674,7 @@ safe-push-apply@^1.0.0: safe-regex-test@^1.0.3, safe-regex-test@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/safe-regex-test/-/safe-regex-test-1.1.0.tgz#7f87dfb67a3150782eaaf18583ff5d1711ac10c1" + resolved "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz#7f87dfb67a3150782eaaf18583ff5d1711ac10c1" integrity sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw== dependencies: call-bound "^1.0.2" @@ -4555,17 +4683,17 @@ safe-regex-test@^1.0.3, safe-regex-test@^1.1.0: "safer-buffer@>= 2.1.2 < 3.0.0": version "2.1.2" - resolved "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + resolved "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== -scheduler@^0.25.0: - version "0.25.0" - resolved "https://registry.npmmirror.com/scheduler/-/scheduler-0.25.0.tgz#336cd9768e8cceebf52d3c80e3dcf5de23e7e015" - integrity sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA== +scheduler@^0.26.0: + version "0.26.0" + resolved "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz#4ce8a8c2a2095f13ea11bf9a445be50c555d6337" + integrity sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA== section-matter@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/section-matter/-/section-matter-1.0.0.tgz#e9041953506780ec01d59f292a19c7b850b84167" + resolved "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz#e9041953506780ec01d59f292a19c7b850b84167" integrity sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA== dependencies: extend-shallow "^2.0.1" @@ -4573,27 +4701,22 @@ section-matter@^1.0.0: semver@^6.3.1: version "6.3.1" - resolved "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.6.0, semver@^7.6.3: - version "7.7.1" - resolved "https://registry.npmmirror.com/semver/-/semver-7.7.1.tgz#abd5098d82b18c6c81f6074ff2647fd3e7220c9f" - integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA== - -semver@^7.7.2: +semver@^7.6.0, semver@^7.6.3, semver@^7.7.1, semver@^7.7.2: version "7.7.2" resolved "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz#67d99fdcd35cec21e6f8b87a7fd515a33f982b58" integrity sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA== server-only@^0.0.1: version "0.0.1" - resolved "https://registry.npmmirror.com/server-only/-/server-only-0.0.1.tgz#0f366bb6afb618c37c9255a314535dc412cd1c9e" + resolved "https://registry.npmjs.org/server-only/-/server-only-0.0.1.tgz#0f366bb6afb618c37c9255a314535dc412cd1c9e" integrity sha512-qepMx2JxAa5jjfzxG79yPPq+8BuFToHd1hm7kI+Z4zAq1ftQiP7HcxMhDDItrbtwVeLg/cY2JnKnrcFkmiswNA== set-function-length@^1.2.2: version "1.2.2" - resolved "https://registry.npmmirror.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + resolved "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== dependencies: define-data-property "^1.1.4" @@ -4605,7 +4728,7 @@ set-function-length@^1.2.2: set-function-name@^2.0.2: version "2.0.2" - resolved "https://registry.npmmirror.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + resolved "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== dependencies: define-data-property "^1.1.4" @@ -4615,7 +4738,7 @@ set-function-name@^2.0.2: set-proto@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/set-proto/-/set-proto-1.0.0.tgz#0760dbcff30b2d7e801fd6e19983e56da337565e" + resolved "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz#0760dbcff30b2d7e801fd6e19983e56da337565e" integrity sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw== dependencies: dunder-proto "^1.0.1" @@ -4624,12 +4747,12 @@ set-proto@^1.0.0: shallowequal@^1.0.0: version "1.1.0" - resolved "https://registry.npmmirror.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + resolved "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== sharp@^0.33.5: version "0.33.5" - resolved "https://registry.npmmirror.com/sharp/-/sharp-0.33.5.tgz#13e0e4130cc309d6a9497596715240b2ec0c594e" + resolved "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz#13e0e4130cc309d6a9497596715240b2ec0c594e" integrity sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw== dependencies: color "^4.2.3" @@ -4658,19 +4781,19 @@ sharp@^0.33.5: shebang-command@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== dependencies: shebang-regex "^3.0.0" shebang-regex@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + resolved "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== side-channel-list@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/side-channel-list/-/side-channel-list-1.0.0.tgz#10cb5984263115d3b7a0e336591e290a830af8ad" + resolved "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz#10cb5984263115d3b7a0e336591e290a830af8ad" integrity sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA== dependencies: es-errors "^1.3.0" @@ -4678,7 +4801,7 @@ side-channel-list@^1.0.0: side-channel-map@^1.0.1: version "1.0.1" - resolved "https://registry.npmmirror.com/side-channel-map/-/side-channel-map-1.0.1.tgz#d6bb6b37902c6fef5174e5f533fab4c732a26f42" + resolved "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz#d6bb6b37902c6fef5174e5f533fab4c732a26f42" integrity sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA== dependencies: call-bound "^1.0.2" @@ -4688,7 +4811,7 @@ side-channel-map@^1.0.1: side-channel-weakmap@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz#11dda19d5368e40ce9ec2bdc1fb0ecbc0790ecea" + resolved "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz#11dda19d5368e40ce9ec2bdc1fb0ecbc0790ecea" integrity sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A== dependencies: call-bound "^1.0.2" @@ -4699,7 +4822,7 @@ side-channel-weakmap@^1.0.2: side-channel@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" + resolved "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" integrity sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw== dependencies: es-errors "^1.3.0" @@ -4710,14 +4833,14 @@ side-channel@^1.1.0: simple-swizzle@^0.2.2: version "0.2.2" - resolved "https://registry.npmmirror.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" + resolved "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" integrity sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg== dependencies: is-arrayish "^0.3.1" slick-carousel@^1.8.1: version "1.8.1" - resolved "https://registry.npmmirror.com/slick-carousel/-/slick-carousel-1.8.1.tgz#a4bfb29014887bb66ce528b90bd0cda262cc8f8d" + resolved "https://registry.npmjs.org/slick-carousel/-/slick-carousel-1.8.1.tgz#a4bfb29014887bb66ce528b90bd0cda262cc8f8d" integrity sha512-XB9Ftrf2EEKfzoQXt3Nitrt/IPbT+f1fgqBdoxO3W/+JYvtEOW6EgxnWfr9GH6nmULv7Y2tPmEX3koxThVmebA== smart-buffer@^4.2.0: @@ -4735,26 +4858,26 @@ socks-proxy-agent@^8.0.5: socks "^2.8.3" socks@^2.8.3: - version "2.8.4" - resolved "https://registry.npmjs.org/socks/-/socks-2.8.4.tgz#07109755cdd4da03269bda4725baa061ab56d5cc" - integrity sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ== + version "2.8.6" + resolved "https://registry.npmjs.org/socks/-/socks-2.8.6.tgz#e335486a2552f34f932f0c27d8dbb93f2be867aa" + integrity sha512-pe4Y2yzru68lXCb38aAqRf5gvN8YdjP1lok5o0J7BOHljkyCGKVz7H3vpVIXKD27rj2giOJ7DwVyk/GWrPHDWA== dependencies: ip-address "^9.0.5" smart-buffer "^4.2.0" source-map-js@^1.0.2: version "1.2.1" - resolved "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + resolved "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== source-map@^0.5.7: version "0.5.7" - resolved "https://registry.npmmirror.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + resolved "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== source-map@^0.7.0: version "0.7.4" - resolved "https://registry.npmmirror.com/source-map/-/source-map-0.7.4.tgz#a9bbe705c9d8846f4e08ff6765acf0f1b0898656" + resolved "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz#a9bbe705c9d8846f4e08ff6765acf0f1b0898656" integrity sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA== source-map@~0.6.1: @@ -4764,7 +4887,7 @@ source-map@~0.6.1: space-separated-tokens@^2.0.0: version "2.0.2" - resolved "https://registry.npmmirror.com/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz#1ecd9d2350a3844572c3f4a312bceb018348859f" + resolved "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz#1ecd9d2350a3844572c3f4a312bceb018348859f" integrity sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q== sprintf-js@^1.1.3: @@ -4774,17 +4897,25 @@ sprintf-js@^1.1.3: sprintf-js@~1.0.2: version "1.0.3" - resolved "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== -stable-hash@^0.0.4: - version "0.0.4" - resolved "https://registry.npmmirror.com/stable-hash/-/stable-hash-0.0.4.tgz#55ae7dadc13e4b3faed13601587cec41859b42f7" - integrity sha512-LjdcbuBeLcdETCrPn9i8AYAZ1eCtu4ECAWtP7UleOiZ9LzVxRzzUZEoZ8zB24nhkQnDWyET0I+3sWokSDS3E7g== +stable-hash@^0.0.5: + version "0.0.5" + resolved "https://registry.npmjs.org/stable-hash/-/stable-hash-0.0.5.tgz#94e8837aaeac5b4d0f631d2972adef2924b40269" + integrity sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA== + +stop-iteration-iterator@^1.1.0: + version "1.1.0" + resolved "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz#f481ff70a548f6124d0312c3aa14cbfa7aa542ad" + integrity sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ== + dependencies: + es-errors "^1.3.0" + internal-slot "^1.1.0" streamsearch@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" + resolved "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" integrity sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg== streamx@^2.15.0, streamx@^2.21.0: @@ -4799,7 +4930,7 @@ streamx@^2.15.0, streamx@^2.21.0: string-convert@^0.2.0: version "0.2.1" - resolved "https://registry.npmmirror.com/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97" + resolved "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97" integrity sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A== string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: @@ -4813,7 +4944,7 @@ string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: string.prototype.includes@^2.0.1: version "2.0.1" - resolved "https://registry.npmmirror.com/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz#eceef21283640761a81dbe16d6c7171a4edf7d92" + resolved "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz#eceef21283640761a81dbe16d6c7171a4edf7d92" integrity sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg== dependencies: call-bind "^1.0.7" @@ -4822,7 +4953,7 @@ string.prototype.includes@^2.0.1: string.prototype.matchall@^4.0.12: version "4.0.12" - resolved "https://registry.npmmirror.com/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz#6c88740e49ad4956b1332a911e949583a275d4c0" + resolved "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz#6c88740e49ad4956b1332a911e949583a275d4c0" integrity sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA== dependencies: call-bind "^1.0.8" @@ -4841,7 +4972,7 @@ string.prototype.matchall@^4.0.12: string.prototype.repeat@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz#e90872ee0308b29435aa26275f6e1b762daee01a" + resolved "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz#e90872ee0308b29435aa26275f6e1b762daee01a" integrity sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w== dependencies: define-properties "^1.1.3" @@ -4849,7 +4980,7 @@ string.prototype.repeat@^1.0.0: string.prototype.trim@^1.2.10: version "1.2.10" - resolved "https://registry.npmmirror.com/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz#40b2dd5ee94c959b4dcfb1d65ce72e90da480c81" + resolved "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz#40b2dd5ee94c959b4dcfb1d65ce72e90da480c81" integrity sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA== dependencies: call-bind "^1.0.8" @@ -4860,9 +4991,9 @@ string.prototype.trim@^1.2.10: es-object-atoms "^1.0.0" has-property-descriptors "^1.0.2" -string.prototype.trimend@^1.0.8, string.prototype.trimend@^1.0.9: +string.prototype.trimend@^1.0.9: version "1.0.9" - resolved "https://registry.npmmirror.com/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz#62e2731272cd285041b36596054e9f66569b6942" + resolved "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz#62e2731272cd285041b36596054e9f66569b6942" integrity sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ== dependencies: call-bind "^1.0.8" @@ -4872,7 +5003,7 @@ string.prototype.trimend@^1.0.8, string.prototype.trimend@^1.0.9: string.prototype.trimstart@^1.0.8: version "1.0.8" - resolved "https://registry.npmmirror.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde" + resolved "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde" integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg== dependencies: call-bind "^1.0.7" @@ -4881,7 +5012,7 @@ string.prototype.trimstart@^1.0.8: stringify-entities@^4.0.0: version "4.0.4" - resolved "https://registry.npmmirror.com/stringify-entities/-/stringify-entities-4.0.4.tgz#b3b79ef5f277cc4ac73caeb0236c5ba939b3a4f3" + resolved "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz#b3b79ef5f277cc4ac73caeb0236c5ba939b3a4f3" integrity sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg== dependencies: character-entities-html4 "^2.0.0" @@ -4896,46 +5027,53 @@ strip-ansi@^6.0.0, strip-ansi@^6.0.1: strip-bom-string@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92" + resolved "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92" integrity sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g== strip-bom@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" integrity sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA== strip-bom@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" + resolved "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w== strip-json-comments@^3.1.1: version "3.1.1" - resolved "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== -style-to-object@^1.0.0: - version "1.0.8" - resolved "https://registry.npmmirror.com/style-to-object/-/style-to-object-1.0.8.tgz#67a29bca47eaa587db18118d68f9d95955e81292" - integrity sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g== +style-to-js@^1.0.0: + version "1.1.17" + resolved "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.17.tgz#488b1558a8c1fd05352943f088cc3ce376813d83" + integrity sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA== + dependencies: + style-to-object "1.0.9" + +style-to-object@1.0.9: + version "1.0.9" + resolved "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz#35c65b713f4a6dba22d3d0c61435f965423653f0" + integrity sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw== dependencies: inline-style-parser "0.2.4" styled-jsx@5.1.6: version "5.1.6" - resolved "https://registry.npmmirror.com/styled-jsx/-/styled-jsx-5.1.6.tgz#83b90c077e6c6a80f7f5e8781d0f311b2fe41499" + resolved "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz#83b90c077e6c6a80f7f5e8781d0f311b2fe41499" integrity sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA== dependencies: client-only "0.0.1" stylis@4.2.0: version "4.2.0" - resolved "https://registry.npmmirror.com/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51" + resolved "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51" integrity sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw== subscribe-ui-event@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/subscribe-ui-event/-/subscribe-ui-event-3.0.0.tgz#18f7c3e87a5007bc79550b04d9f0f596277c20e4" + resolved "https://registry.npmjs.org/subscribe-ui-event/-/subscribe-ui-event-3.0.0.tgz#18f7c3e87a5007bc79550b04d9f0f596277c20e4" integrity sha512-Rgop/8yYQjgK47HsPKSxGLJnL/n2ejtAl0PAW9Bvt1kt+pEDN9SB3RP5JVUPjjuYZoFcGb3MOFNIf06uIzrazg== dependencies: eventemitter3 "^5.0.0" @@ -4943,25 +5081,20 @@ subscribe-ui-event@^3.0.0: supports-color@^7.1.0: version "7.2.0" - resolved "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + resolved "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== dependencies: has-flag "^4.0.0" supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" - resolved "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== -tapable@^2.2.0: - version "2.2.1" - resolved "https://registry.npmmirror.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0" - integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ== - tar-fs@^3.0.8: - version "3.0.9" - resolved "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.9.tgz#d570793c6370d7078926c41fa422891566a0b617" - integrity sha512-XF4w9Xp+ZQgifKakjZYmFdkLoSWd34VGKcsTCwlNWM7QG3ZbaxnTsaBwnjFZqHRf/rROxaR8rXnbtwdvaDI+lA== + version "3.1.0" + resolved "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.0.tgz#4675e2254d81410e609d91581a762608de999d25" + integrity sha512-5Mty5y/sOF1YWj1J6GiBodjlDc05CUR8PKXrsnFAiSG0xA+GHeWLovaZPYUDXkH/1iKRf2+M5+OrRgzC7O9b7w== dependencies: pump "^3.0.0" tar-stream "^3.1.5" @@ -4985,49 +5118,49 @@ text-decoder@^1.1.0: dependencies: b4a "^1.6.4" -tinyglobby@^0.2.10: - version "0.2.11" - resolved "https://registry.npmmirror.com/tinyglobby/-/tinyglobby-0.2.11.tgz#9182cff655a0e272aad850d1a84c5e8e0f700426" - integrity sha512-32TmKeeKUahv0Go8WmQgiEp9Y21NuxjwjqiRC1nrUB51YacfSwuB44xgXD+HdIppmMRgjQNPdrHyA6vIybYZ+g== +tinyglobby@^0.2.13: + version "0.2.14" + resolved "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz#5280b0cf3f972b050e74ae88406c0a6a58f4079d" + integrity sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ== dependencies: - fdir "^6.4.3" + fdir "^6.4.4" picomatch "^4.0.2" to-regex-range@^5.0.1: version "5.0.1" - resolved "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + resolved "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== dependencies: is-number "^7.0.0" toggle-selection@^1.0.6: version "1.0.6" - resolved "https://registry.npmmirror.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" + resolved "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" integrity sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ== toml@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/toml/-/toml-3.0.0.tgz#342160f1af1904ec9d204d03a5d61222d762c5ee" + resolved "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz#342160f1af1904ec9d204d03a5d61222d762c5ee" integrity sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w== trim-lines@^3.0.0: version "3.0.1" - resolved "https://registry.npmmirror.com/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338" + resolved "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338" integrity sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg== trough@^2.0.0: version "2.2.0" - resolved "https://registry.npmmirror.com/trough/-/trough-2.2.0.tgz#94a60bd6bd375c152c1df911a4b11d5b0256f50f" + resolved "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz#94a60bd6bd375c152c1df911a4b11d5b0256f50f" integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw== -ts-api-utils@^2.0.1: - version "2.0.1" - resolved "https://registry.npmmirror.com/ts-api-utils/-/ts-api-utils-2.0.1.tgz#660729385b625b939aaa58054f45c058f33f10cd" - integrity sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w== +ts-api-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz#595f7094e46eed364c13fd23e75f9513d29baf91" + integrity sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ== tsconfig-paths@^3.15.0: version "3.15.0" - resolved "https://registry.npmmirror.com/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz#5299ec605e55b1abb23ec939ef15edaf483070d4" + resolved "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz#5299ec605e55b1abb23ec939ef15edaf483070d4" integrity sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg== dependencies: "@types/json5" "^0.0.29" @@ -5037,19 +5170,19 @@ tsconfig-paths@^3.15.0: tslib@^2.0.1, tslib@^2.4.0, tslib@^2.8.0: version "2.8.1" - resolved "https://registry.npmmirror.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + resolved "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== type-check@^0.4.0, type-check@~0.4.0: version "0.4.0" - resolved "https://registry.npmmirror.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + resolved "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== dependencies: prelude-ls "^1.2.1" typed-array-buffer@^1.0.3: version "1.0.3" - resolved "https://registry.npmmirror.com/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz#a72395450a4869ec033fd549371b47af3a2ee536" + resolved "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz#a72395450a4869ec033fd549371b47af3a2ee536" integrity sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw== dependencies: call-bound "^1.0.3" @@ -5058,7 +5191,7 @@ typed-array-buffer@^1.0.3: typed-array-byte-length@^1.0.3: version "1.0.3" - resolved "https://registry.npmmirror.com/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz#8407a04f7d78684f3d252aa1a143d2b77b4160ce" + resolved "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz#8407a04f7d78684f3d252aa1a143d2b77b4160ce" integrity sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg== dependencies: call-bind "^1.0.8" @@ -5069,7 +5202,7 @@ typed-array-byte-length@^1.0.3: typed-array-byte-offset@^1.0.4: version "1.0.4" - resolved "https://registry.npmmirror.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz#ae3698b8ec91a8ab945016108aef00d5bff12355" + resolved "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz#ae3698b8ec91a8ab945016108aef00d5bff12355" integrity sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ== dependencies: available-typed-arrays "^1.0.7" @@ -5082,7 +5215,7 @@ typed-array-byte-offset@^1.0.4: typed-array-length@^1.0.7: version "1.0.7" - resolved "https://registry.npmmirror.com/typed-array-length/-/typed-array-length-1.0.7.tgz#ee4deff984b64be1e118b0de8c9c877d5ce73d3d" + resolved "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz#ee4deff984b64be1e118b0de8c9c877d5ce73d3d" integrity sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg== dependencies: call-bind "^1.0.7" @@ -5098,13 +5231,13 @@ typed-query-selector@^2.12.0: integrity sha512-SbklCd1F0EiZOyPiW192rrHZzZ5sBijB6xM+cpmrwDqObvdtunOHHIk9fCGsoK5JVIYXoyEp4iEdE3upFH3PAg== typescript@^5: - version "5.7.3" - resolved "https://registry.npmmirror.com/typescript/-/typescript-5.7.3.tgz#919b44a7dbb8583a9b856d162be24a54bf80073e" - integrity sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw== + version "5.8.3" + resolved "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz#92f8a3e5e3cf497356f4178c34cd65a7f5e8440e" + integrity sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ== unbox-primitive@^1.1.0: version "1.1.0" - resolved "https://registry.npmmirror.com/unbox-primitive/-/unbox-primitive-1.1.0.tgz#8d9d2c9edeea8460c7f35033a88867944934d1e2" + resolved "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz#8d9d2c9edeea8460c7f35033a88867944934d1e2" integrity sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw== dependencies: call-bound "^1.0.3" @@ -5112,24 +5245,24 @@ unbox-primitive@^1.1.0: has-symbols "^1.1.0" which-boxed-primitive "^1.1.1" -undici-types@~6.19.2: - version "6.19.8" - resolved "https://registry.npmmirror.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" - integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== - undici-types@~6.21.0: version "6.21.0" resolved "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== -undici@^6.19.5: - version "6.21.1" - resolved "https://registry.npmmirror.com/undici/-/undici-6.21.1.tgz#336025a14162e6837e44ad7b819b35b6c6af0e05" - integrity sha512-q/1rj5D0/zayJB2FraXdaWxbhWiNKDvu8naDT2dl1yTlvJp4BLtOcp2a5BvgGNQpYYJzau7tf1WgKv3b+7mqpQ== +undici-types@~7.8.0: + version "7.8.0" + resolved "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz#de00b85b710c54122e44fbfd911f8d70174cd294" + integrity sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw== + +undici@^7.10.0: + version "7.11.0" + resolved "https://registry.npmjs.org/undici/-/undici-7.11.0.tgz#8e13a54f62afa756666c0590c38b3866e286d0b3" + integrity sha512-heTSIac3iLhsmZhUCjyS3JQEkZELateufzZuBaVM5RHXdSBMb1LPMQf5x+FH7qjsZYDP0ttAc3nnVpUB+wYbOg== unified@^11.0.0: version "11.0.5" - resolved "https://registry.npmmirror.com/unified/-/unified-11.0.5.tgz#f66677610a5c0a9ee90cab2b8d4d66037026d9e1" + resolved "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz#f66677610a5c0a9ee90cab2b8d4d66037026d9e1" integrity sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA== dependencies: "@types/unist" "^3.0.0" @@ -5142,12 +5275,12 @@ unified@^11.0.0: uniqolor@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/uniqolor/-/uniqolor-1.1.1.tgz#ef35e41d930d31b90228a18d82dd151b8106b2f3" + resolved "https://registry.npmjs.org/uniqolor/-/uniqolor-1.1.1.tgz#ef35e41d930d31b90228a18d82dd151b8106b2f3" integrity sha512-HUwezlXCwm5bzsEXW7AP7ybezH13uWENRgYT+3dOdhJPvpYucSqvIGckMiLn+Uy2j0NVf3fPp43uZ4aun3t4Ww== unist-util-find-after@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz#3fccc1b086b56f34c8b798e1ff90b5c54468e896" + resolved "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz#3fccc1b086b56f34c8b798e1ff90b5c54468e896" integrity sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ== dependencies: "@types/unist" "^3.0.0" @@ -5155,54 +5288,67 @@ unist-util-find-after@^5.0.0: unist-util-is@^3.0.0: version "3.0.0" - resolved "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-3.0.0.tgz#d9e84381c2468e82629e4a5be9d7d05a2dd324cd" + resolved "https://registry.npmjs.org/unist-util-is/-/unist-util-is-3.0.0.tgz#d9e84381c2468e82629e4a5be9d7d05a2dd324cd" integrity sha512-sVZZX3+kspVNmLWBPAB6r+7D9ZgAFPNWm66f7YNb420RlQSbn+n8rG8dGZSkrER7ZIXGQYNm5pqC3v3HopH24A== unist-util-is@^6.0.0: version "6.0.0" - resolved "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-6.0.0.tgz#b775956486aff107a9ded971d996c173374be424" + resolved "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz#b775956486aff107a9ded971d996c173374be424" integrity sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw== dependencies: "@types/unist" "^3.0.0" unist-util-map@^3.1.2: version "3.1.3" - resolved "https://registry.npmmirror.com/unist-util-map/-/unist-util-map-3.1.3.tgz#5ae708cafb8400439f0a8e590a20be05223f78e1" + resolved "https://registry.npmjs.org/unist-util-map/-/unist-util-map-3.1.3.tgz#5ae708cafb8400439f0a8e590a20be05223f78e1" integrity sha512-4/mDauoxqZ6geK97lJ6n2kDk6JK88Vh+hWMSJqyaaP/7eqN1dDhjcjnNxKNm3YU6Sw7PVJtcFMUbnmHvYzb6Vg== dependencies: "@types/unist" "^2.0.0" +unist-util-mdx-define@^1.0.0: + version "1.1.2" + resolved "https://registry.npmjs.org/unist-util-mdx-define/-/unist-util-mdx-define-1.1.2.tgz#f25c46242ee5d1785ec5d2cd9cc4950cae72a05c" + integrity sha512-9ncH7i7TN5Xn7/tzX5bE3rXgz1X/u877gYVAUB3mLeTKYJmQHmqKTDBi6BTGXV7AeolBCI9ErcVsOt2qryoD0g== + dependencies: + "@types/estree" "^1.0.0" + "@types/hast" "^3.0.0" + "@types/mdast" "^4.0.0" + estree-util-is-identifier-name "^3.0.0" + estree-util-scope "^1.0.0" + estree-walker "^3.0.0" + vfile "^6.0.0" + unist-util-position-from-estree@^2.0.0: version "2.0.0" - resolved "https://registry.npmmirror.com/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz#d94da4df596529d1faa3de506202f0c9a23f2200" + resolved "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz#d94da4df596529d1faa3de506202f0c9a23f2200" integrity sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ== dependencies: "@types/unist" "^3.0.0" unist-util-position@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/unist-util-position/-/unist-util-position-5.0.0.tgz#678f20ab5ca1207a97d7ea8a388373c9cf896be4" + resolved "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz#678f20ab5ca1207a97d7ea8a388373c9cf896be4" integrity sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA== dependencies: "@types/unist" "^3.0.0" unist-util-stringify-position@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz#449c6e21a880e0855bf5aabadeb3a740314abac2" + resolved "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz#449c6e21a880e0855bf5aabadeb3a740314abac2" integrity sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ== dependencies: "@types/unist" "^3.0.0" unist-util-visit-parents@^2.0.0: version "2.1.2" - resolved "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-2.1.2.tgz#25e43e55312166f3348cae6743588781d112c1e9" + resolved "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-2.1.2.tgz#25e43e55312166f3348cae6743588781d112c1e9" integrity sha512-DyN5vD4NE3aSeB+PXYNKxzGsfocxp6asDc2XXE3b0ekO2BaRUpBicbbUygfSvYfUz1IkmjFR1YF7dPklraMZ2g== dependencies: unist-util-is "^3.0.0" unist-util-visit-parents@^6.0.0: version "6.0.1" - resolved "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz#4d5f85755c3b8f0dc69e21eca5d6d82d22162815" + resolved "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz#4d5f85755c3b8f0dc69e21eca5d6d82d22162815" integrity sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw== dependencies: "@types/unist" "^3.0.0" @@ -5210,30 +5356,57 @@ unist-util-visit-parents@^6.0.0: unist-util-visit@^1.4.0: version "1.4.1" - resolved "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-1.4.1.tgz#4724aaa8486e6ee6e26d7ff3c8685960d560b1e3" + resolved "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-1.4.1.tgz#4724aaa8486e6ee6e26d7ff3c8685960d560b1e3" integrity sha512-AvGNk7Bb//EmJZyhtRUnNMEpId/AZ5Ph/KUpTI09WHQuDZHKovQ1oEv3mfmKpWKtoMzyMC4GLBm1Zy5k12fjIw== dependencies: unist-util-visit-parents "^2.0.0" unist-util-visit@^5.0.0: version "5.0.0" - resolved "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz#a7de1f31f72ffd3519ea71814cccf5fd6a9217d6" + resolved "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz#a7de1f31f72ffd3519ea71814cccf5fd6a9217d6" integrity sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg== dependencies: "@types/unist" "^3.0.0" unist-util-is "^6.0.0" unist-util-visit-parents "^6.0.0" +unrs-resolver@^1.6.2: + version "1.11.1" + resolved "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz#be9cd8686c99ef53ecb96df2a473c64d304048a9" + integrity sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg== + dependencies: + napi-postinstall "^0.3.0" + optionalDependencies: + "@unrs/resolver-binding-android-arm-eabi" "1.11.1" + "@unrs/resolver-binding-android-arm64" "1.11.1" + "@unrs/resolver-binding-darwin-arm64" "1.11.1" + "@unrs/resolver-binding-darwin-x64" "1.11.1" + "@unrs/resolver-binding-freebsd-x64" "1.11.1" + "@unrs/resolver-binding-linux-arm-gnueabihf" "1.11.1" + "@unrs/resolver-binding-linux-arm-musleabihf" "1.11.1" + "@unrs/resolver-binding-linux-arm64-gnu" "1.11.1" + "@unrs/resolver-binding-linux-arm64-musl" "1.11.1" + "@unrs/resolver-binding-linux-ppc64-gnu" "1.11.1" + "@unrs/resolver-binding-linux-riscv64-gnu" "1.11.1" + "@unrs/resolver-binding-linux-riscv64-musl" "1.11.1" + "@unrs/resolver-binding-linux-s390x-gnu" "1.11.1" + "@unrs/resolver-binding-linux-x64-gnu" "1.11.1" + "@unrs/resolver-binding-linux-x64-musl" "1.11.1" + "@unrs/resolver-binding-wasm32-wasi" "1.11.1" + "@unrs/resolver-binding-win32-arm64-msvc" "1.11.1" + "@unrs/resolver-binding-win32-ia32-msvc" "1.11.1" + "@unrs/resolver-binding-win32-x64-msvc" "1.11.1" + uri-js@^4.2.2: version "4.4.1" - resolved "https://registry.npmmirror.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + resolved "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== dependencies: punycode "^2.1.0" vfile-message@^4.0.0: version "4.0.2" - resolved "https://registry.npmmirror.com/vfile-message/-/vfile-message-4.0.2.tgz#c883c9f677c72c166362fd635f21fc165a7d1181" + resolved "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz#c883c9f677c72c166362fd635f21fc165a7d1181" integrity sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw== dependencies: "@types/unist" "^3.0.0" @@ -5241,7 +5414,7 @@ vfile-message@^4.0.0: vfile@^6.0.0: version "6.0.3" - resolved "https://registry.npmmirror.com/vfile/-/vfile-6.0.3.tgz#3652ab1c496531852bf55a6bac57af981ebc38ab" + resolved "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz#3652ab1c496531852bf55a6bac57af981ebc38ab" integrity sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q== dependencies: "@types/unist" "^3.0.0" @@ -5249,19 +5422,19 @@ vfile@^6.0.0: whatwg-encoding@^3.1.1: version "3.1.1" - resolved "https://registry.npmmirror.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5" + resolved "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5" integrity sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ== dependencies: iconv-lite "0.6.3" whatwg-mimetype@^4.0.0: version "4.0.0" - resolved "https://registry.npmmirror.com/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a" + resolved "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a" integrity sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg== which-boxed-primitive@^1.1.0, which-boxed-primitive@^1.1.1: version "1.1.1" - resolved "https://registry.npmmirror.com/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz#d76ec27df7fa165f18d5808374a5fe23c29b176e" + resolved "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz#d76ec27df7fa165f18d5808374a5fe23c29b176e" integrity sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA== dependencies: is-bigint "^1.1.0" @@ -5272,7 +5445,7 @@ which-boxed-primitive@^1.1.0, which-boxed-primitive@^1.1.1: which-builtin-type@^1.2.1: version "1.2.1" - resolved "https://registry.npmmirror.com/which-builtin-type/-/which-builtin-type-1.2.1.tgz#89183da1b4907ab089a6b02029cc5d8d6574270e" + resolved "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz#89183da1b4907ab089a6b02029cc5d8d6574270e" integrity sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q== dependencies: call-bound "^1.0.2" @@ -5291,7 +5464,7 @@ which-builtin-type@^1.2.1: which-collection@^1.0.2: version "1.0.2" - resolved "https://registry.npmmirror.com/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" + resolved "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" integrity sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw== dependencies: is-map "^2.0.3" @@ -5299,28 +5472,29 @@ which-collection@^1.0.2: is-weakmap "^2.0.2" is-weakset "^2.0.3" -which-typed-array@^1.1.16, which-typed-array@^1.1.18: - version "1.1.18" - resolved "https://registry.npmmirror.com/which-typed-array/-/which-typed-array-1.1.18.tgz#df2389ebf3fbb246a71390e90730a9edb6ce17ad" - integrity sha512-qEcY+KJYlWyLH9vNbsr6/5j59AXk5ni5aakf8ldzBvGde6Iz4sxZGkJyWSAueTG7QhOvNRYb1lDdFmL5Td0QKA== +which-typed-array@^1.1.16, which-typed-array@^1.1.19: + version "1.1.19" + resolved "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz#df03842e870b6b88e117524a4b364b6fc689f956" + integrity sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw== dependencies: available-typed-arrays "^1.0.7" call-bind "^1.0.8" - call-bound "^1.0.3" - for-each "^0.3.3" + call-bound "^1.0.4" + for-each "^0.3.5" + get-proto "^1.0.1" gopd "^1.2.0" has-tostringtag "^1.0.2" which@^2.0.1: version "2.0.2" - resolved "https://registry.npmmirror.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + resolved "https://registry.npmjs.org/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== dependencies: isexe "^2.0.0" word-wrap@^1.2.5: version "1.2.5" - resolved "https://registry.npmmirror.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" + resolved "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== wrap-ansi@^7.0.0: @@ -5337,10 +5511,10 @@ wrappy@1: resolved "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== -ws@^8.18.2: - version "8.18.2" - resolved "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz#42738b2be57ced85f46154320aabb51ab003705a" - integrity sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ== +ws@^8.18.3: + version "8.18.3" + resolved "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz#b56b88abffde62791c639170400c93dcb0c95472" + integrity sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg== y18n@^5.0.5: version "5.0.8" @@ -5349,13 +5523,13 @@ y18n@^5.0.5: yaml@^1.10.0: version "1.10.2" - resolved "https://registry.npmmirror.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + resolved "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== yaml@^2.0.0: - version "2.7.0" - resolved "https://registry.npmmirror.com/yaml/-/yaml-2.7.0.tgz#aef9bb617a64c937a9a748803786ad8d3ffe1e98" - integrity sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA== + version "2.8.0" + resolved "https://registry.npmjs.org/yaml/-/yaml-2.8.0.tgz#15f8c9866211bdc2d3781a0890e44d4fa1a5fff6" + integrity sha512-4lLa/EcQCB0cJkyts+FpIRx5G/llPxfP6VQU5KByHEhLxY3IJCH0f0Hy1MHI8sClTvsIb8qwRJ6R/ZdlDJ/leQ== yargs-parser@^21.1.1: version "21.1.1" @@ -5377,7 +5551,7 @@ yargs@^17.7.2: yarn@^1.22.22: version "1.22.22" - resolved "https://registry.npmmirror.com/yarn/-/yarn-1.22.22.tgz#ac34549e6aa8e7ead463a7407e1c7390f61a6610" + resolved "https://registry.npmjs.org/yarn/-/yarn-1.22.22.tgz#ac34549e6aa8e7ead463a7407e1c7390f61a6610" integrity sha512-prL3kGtyG7o9Z9Sv8IPfBNrWTDmXB4Qbes8A9rEzt6wkJV8mUvoirjU0Mp3GGAU06Y0XQyA3/2/RQFVuK7MTfg== yauzl@^2.10.0: @@ -5390,20 +5564,20 @@ yauzl@^2.10.0: yocto-queue@^0.1.0: version "0.1.0" - resolved "https://registry.npmmirror.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== zod@^3.24.1: - version "3.25.56" - resolved "https://registry.npmjs.org/zod/-/zod-3.25.56.tgz#4cff0340b7cd3778314dac1e74d6a663ffeef914" - integrity sha512-rd6eEF3BTNvQnR2e2wwolfTmUTnp70aUTqr0oaGbHifzC3BKJsoV+Gat8vxUMR1hwOKBs6El+qWehrHbCpW6SQ== + version "3.25.76" + resolved "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz#26841c3f6fd22a6a2760e7ccb719179768471e34" + integrity sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ== zustand@^5.0.5: - version "5.0.5" - resolved "https://registry.npmmirror.com/zustand/-/zustand-5.0.5.tgz#3e236f6a953142d975336d179bc735d97db17e84" - integrity sha512-mILtRfKW9xM47hqxGIxCv12gXusoY/xTSHBYApXozR0HmQv299whhBeeAcRy+KrPPybzosvJBCOmVjq6x12fCg== + version "5.0.6" + resolved "https://registry.npmjs.org/zustand/-/zustand-5.0.6.tgz#a2da43d8dc3d31e314279e5baec06297bea70a5c" + integrity sha512-ihAqNeUVhe0MAD+X8M5UzqyZ9k3FFZLBTtqo6JLPwV53cbRB/mJwBI0PxcIgqhBBHlEs8G45OTDTMq3gNcLq3A== zwitch@^2.0.0: version "2.0.4" - resolved "https://registry.npmmirror.com/zwitch/-/zwitch-2.0.4.tgz#c827d4b0acb76fc3e685a4c6ec2902d51070e9d7" + resolved "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz#c827d4b0acb76fc3e685a4c6ec2902d51070e9d7" integrity sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A== From 4ae10e5dc39d795b8b46477e0a2229a7fc58e3cf Mon Sep 17 00:00:00 2001 From: Shanshan Date: Wed, 16 Jul 2025 13:09:23 +0800 Subject: [PATCH 2/2] chore: translate blogs/userdocs and update script --- blogs/en/redis-on-kb.mdx | 76 +- ...ptimizing-PG-performance-on-kubeblocks.mdx | 545 + blogs/zh/announcing-kubeblocks-v0-7-0.mdx | 69 + blogs/zh/announcing-kubeblocks-v0-8-0.mdx | 105 + blogs/zh/announcing-kubeblocks-v0-9-0.mdx | 149 + blogs/zh/announcing-kubeblocks-v0-9-1.mdx | 96 + blogs/zh/announcing-kubeblocks-v0-9-2.mdx | 54 + blogs/zh/announcing-kubeblocks-v0-9-3.mdx | 82 + blogs/zh/announcing-kubeblocks-v0-9-4.mdx | 73 + blogs/zh/announcing-kubeblocks-v1-0-0.mdx | 189 + ...ommunity-monthly-report-for-april-2024.mdx | 73 + ...unity-monthly-report-for-february-2024.mdx | 61 + ...munity-monthly-report-for-january-2024.mdx | 60 + ...ommunity-monthly-report-for-march-2024.mdx | 61 + .../community-monthly-report-for-may-2024.mdx | 59 + blogs/zh/deploy-harbor-on-kubeblocks.mdx | 366 + blogs/zh/deploy-wordpress-on-kubeblocks.mdx | 214 + blogs/zh/dify-on-kb.mdx | 373 + ...on-affect-the-performance-of-databases.mdx | 502 + ...to-significant-performance-degradation.mdx | 184 + ...to-fix-pod-stuck-in-terminating-status.mdx | 272 + ...-clusters-without-a-dedicated-operator.mdx | 195 + blogs/zh/in-place-updates.mdx | 69 + blogs/zh/instance-template.mdx | 249 + blogs/zh/instanceset-introduction.mdx | 333 + blogs/zh/is-k8s-a-database.mdx | 127 + blogs/zh/kubeblocks-on-kata.mdx | 329 + ...mangage-6k-db-instance-with-kubeblocks.mdx | 184 + ...dis-in-kuaishou-from-bare-metal-to-k8s.mdx | 233 + blogs/zh/moodle-in-kubeblocks-windows.mdx | 258 + blogs/zh/redis-containerization.mdx | 91 + blogs/zh/redis-on-kb.mdx | 602 + ...rom-leading-chinese-internet-companies.mdx | 126 + ...-k8s-kuaishou-solution-with-kubeblocks.mdx | 197 + blogs/zh/take-specified-instances-offline.mdx | 110 + ...to-build-your-aigc-infra-on-amazon-eks.mdx | 570 + docs/zh/preview/cli/cli.mdx | 203 + docs/zh/preview/cli/kbcli.mdx | 74 + docs/zh/preview/cli/kbcli_addon.mdx | 52 + docs/zh/preview/cli/kbcli_addon_describe.mdx | 46 + docs/zh/preview/cli/kbcli_addon_disable.mdx | 63 + docs/zh/preview/cli/kbcli_addon_enable.mdx | 94 + docs/zh/preview/cli/kbcli_addon_index.mdx | 50 + docs/zh/preview/cli/kbcli_addon_index_add.mdx | 56 + .../preview/cli/kbcli_addon_index_delete.mdx | 50 + .../zh/preview/cli/kbcli_addon_index_list.mdx | 53 + .../preview/cli/kbcli_addon_index_update.mdx | 57 + docs/zh/preview/cli/kbcli_addon_install.mdx | 74 + docs/zh/preview/cli/kbcli_addon_list.mdx | 51 + docs/zh/preview/cli/kbcli_addon_purge.mdx | 63 + docs/zh/preview/cli/kbcli_addon_search.mdx | 60 + docs/zh/preview/cli/kbcli_addon_uninstall.mdx | 57 + docs/zh/preview/cli/kbcli_addon_upgrade.mdx | 79 + docs/zh/preview/cli/kbcli_backuprepo.mdx | 48 + .../preview/cli/kbcli_backuprepo_create.mdx | 90 + .../preview/cli/kbcli_backuprepo_delete.mdx | 53 + .../preview/cli/kbcli_backuprepo_describe.mdx | 53 + ...kbcli_backuprepo_list-storage-provider.mdx | 56 + docs/zh/preview/cli/kbcli_backuprepo_list.mdx | 57 + .../preview/cli/kbcli_backuprepo_update.mdx | 60 + docs/zh/preview/cli/kbcli_cluster.mdx | 86 + docs/zh/preview/cli/kbcli_cluster_backup.mdx | 71 + .../preview/cli/kbcli_cluster_cancel-ops.mdx | 54 + .../preview/cli/kbcli_cluster_configure.mdx | 71 + docs/zh/preview/cli/kbcli_cluster_connect.mdx | 65 + .../cli/kbcli_cluster_convert-to-v1.mdx | 58 + docs/zh/preview/cli/kbcli_cluster_create.mdx | 68 + .../kbcli_cluster_create_apecloud-mysql.mdx | 86 + .../preview/cli/kbcli_cluster_create_etcd.mdx | 74 + .../cli/kbcli_cluster_create_kafka.mdx | 89 + .../cli/kbcli_cluster_create_mongodb.mdx | 74 + .../cli/kbcli_cluster_create_mysql.mdx | 78 + .../cli/kbcli_cluster_create_oriol.mdx | 79 + .../cli/kbcli_cluster_create_postgresql.mdx | 72 + .../cli/kbcli_cluster_create_qdrant.mdx | 72 + .../cli/kbcli_cluster_create_rabbitmq.mdx | 73 + .../cli/kbcli_cluster_create_redis.mdx | 92 + .../preview/cli/kbcli_cluster_create_tidb.mdx | 75 + .../preview/cli/kbcli_cluster_custom-ops.mdx | 68 + .../kbcli_cluster_custom-ops_kafka-quota.mdx | 67 + .../kbcli_cluster_custom-ops_kafka-topic.mdx | 74 + ...bcli_cluster_custom-ops_kafka-user-acl.mdx | 73 + ...li_cluster_custom-ops_mogdb-switchover.mdx | 65 + ...custom-ops_post-rebuild-for-clickhouse.mdx | 63 + ...ps_post-scale-out-shard-for-clickhouse.mdx | 61 + .../cli/kbcli_cluster_delete-backup.mdx | 60 + .../preview/cli/kbcli_cluster_delete-ops.mdx | 63 + docs/zh/preview/cli/kbcli_cluster_delete.mdx | 65 + .../kbcli_cluster_describe-backup-policy.mdx | 57 + .../cli/kbcli_cluster_describe-backup.mdx | 57 + .../cli/kbcli_cluster_describe-config.mdx | 65 + .../cli/kbcli_cluster_describe-ops.mdx | 53 + .../cli/kbcli_cluster_describe-restore.mdx | 53 + .../zh/preview/cli/kbcli_cluster_describe.mdx | 53 + .../preview/cli/kbcli_cluster_diff-config.mdx | 53 + .../cli/kbcli_cluster_edit-backup-policy.mdx | 53 + .../preview/cli/kbcli_cluster_edit-config.mdx | 67 + .../cli/kbcli_cluster_explain-config.mdx | 67 + docs/zh/preview/cli/kbcli_cluster_expose.mdx | 71 + docs/zh/preview/cli/kbcli_cluster_label.mdx | 73 + .../kbcli_cluster_list-backup-policies.mdx | 61 + .../cli/kbcli_cluster_list-backups.mdx | 64 + .../cli/kbcli_cluster_list-components.mdx | 58 + .../preview/cli/kbcli_cluster_list-events.mdx | 58 + .../cli/kbcli_cluster_list-instances.mdx | 58 + .../preview/cli/kbcli_cluster_list-logs.mdx | 61 + .../zh/preview/cli/kbcli_cluster_list-ops.mdx | 63 + .../cli/kbcli_cluster_list-restores.mdx | 64 + docs/zh/preview/cli/kbcli_cluster_list.mdx | 70 + docs/zh/preview/cli/kbcli_cluster_logs.mdx | 92 + docs/zh/preview/cli/kbcli_cluster_promote.mdx | 62 + .../cli/kbcli_cluster_rebuild-instance.mdx | 75 + .../zh/preview/cli/kbcli_cluster_register.mdx | 64 + docs/zh/preview/cli/kbcli_cluster_restart.mdx | 64 + docs/zh/preview/cli/kbcli_cluster_restore.mdx | 63 + .../zh/preview/cli/kbcli_cluster_scale-in.mdx | 69 + .../preview/cli/kbcli_cluster_scale-out.mdx | 69 + docs/zh/preview/cli/kbcli_cluster_start.mdx | 63 + docs/zh/preview/cli/kbcli_cluster_stop.mdx | 64 + docs/zh/preview/cli/kbcli_cluster_update.mdx | 103 + .../cli/kbcli_cluster_upgrade-to-v1.mdx | 58 + docs/zh/preview/cli/kbcli_cluster_upgrade.mdx | 69 + .../cli/kbcli_cluster_volume-expand.mdx | 63 + docs/zh/preview/cli/kbcli_cluster_vscale.mdx | 67 + .../preview/cli/kbcli_clusterdefinition.mdx | 44 + .../cli/kbcli_clusterdefinition_describe.mdx | 53 + .../cli/kbcli_clusterdefinition_list.mdx | 56 + .../preview/cli/kbcli_componentdefinition.mdx | 44 + .../kbcli_componentdefinition_describe.mdx | 53 + .../cli/kbcli_componentdefinition_list.mdx | 59 + .../zh/preview/cli/kbcli_componentversion.mdx | 44 + .../cli/kbcli_componentversion_describe.mdx | 53 + .../cli/kbcli_componentversion_list.mdx | 59 + docs/zh/preview/cli/kbcli_dataprotection.mdx | 54 + .../cli/kbcli_dataprotection_backup.mdx | 67 + .../kbcli_dataprotection_delete-backup.mdx | 59 + ..._dataprotection_describe-backup-policy.mdx | 56 + .../kbcli_dataprotection_describe-backup.mdx | 53 + .../kbcli_dataprotection_describe-restore.mdx | 53 + ...bcli_dataprotection_edit-backup-policy.mdx | 53 + .../kbcli_dataprotection_list-action-sets.mdx | 56 + ...li_dataprotection_list-backup-policies.mdx | 60 + ...rotection_list-backup-policy-templates.mdx | 56 + .../cli/kbcli_dataprotection_list-backups.mdx | 60 + .../kbcli_dataprotection_list-restores.mdx | 57 + .../cli/kbcli_dataprotection_restore.mdx | 57 + docs/zh/preview/cli/kbcli_kubeblocks.mdx | 51 + .../preview/cli/kbcli_kubeblocks_compare.mdx | 57 + .../preview/cli/kbcli_kubeblocks_config.mdx | 58 + .../cli/kbcli_kubeblocks_describe-config.mdx | 60 + .../preview/cli/kbcli_kubeblocks_install.mdx | 81 + .../cli/kbcli_kubeblocks_list-versions.mdx | 58 + .../cli/kbcli_kubeblocks_preflight.mdx | 72 + .../preview/cli/kbcli_kubeblocks_status.mdx | 57 + .../cli/kbcli_kubeblocks_uninstall.mdx | 59 + .../preview/cli/kbcli_kubeblocks_upgrade.mdx | 66 + docs/zh/preview/cli/kbcli_ops-definition.mdx | 44 + .../cli/kbcli_ops-definition_describe.mdx | 53 + .../preview/cli/kbcli_ops-definition_list.mdx | 60 + docs/zh/preview/cli/kbcli_options.mdx | 54 + docs/zh/preview/cli/kbcli_playground.mdx | 44 + .../preview/cli/kbcli_playground_destroy.mdx | 56 + docs/zh/preview/cli/kbcli_playground_init.mdx | 87 + docs/zh/preview/cli/kbcli_plugin.mdx | 55 + docs/zh/preview/cli/kbcli_plugin_describe.mdx | 56 + docs/zh/preview/cli/kbcli_plugin_index.mdx | 50 + .../zh/preview/cli/kbcli_plugin_index_add.mdx | 55 + .../preview/cli/kbcli_plugin_index_delete.mdx | 53 + .../preview/cli/kbcli_plugin_index_list.mdx | 53 + .../preview/cli/kbcli_plugin_index_update.mdx | 46 + docs/zh/preview/cli/kbcli_plugin_install.mdx | 56 + docs/zh/preview/cli/kbcli_plugin_list.mdx | 53 + docs/zh/preview/cli/kbcli_plugin_search.mdx | 58 + .../zh/preview/cli/kbcli_plugin_uninstall.mdx | 53 + docs/zh/preview/cli/kbcli_plugin_upgrade.mdx | 57 + docs/zh/preview/cli/kbcli_report.mdx | 44 + docs/zh/preview/cli/kbcli_report_cluster.mdx | 79 + .../preview/cli/kbcli_report_kubeblocks.mdx | 70 + docs/zh/preview/cli/kbcli_trace.mdx | 47 + docs/zh/preview/cli/kbcli_trace_create.mdx | 63 + docs/zh/preview/cli/kbcli_trace_delete.mdx | 53 + docs/zh/preview/cli/kbcli_trace_list.mdx | 57 + docs/zh/preview/cli/kbcli_trace_update.mdx | 62 + docs/zh/preview/cli/kbcli_trace_watch.mdx | 53 + docs/zh/preview/cli/kbcli_version.mdx | 47 + .../01-overview.mdx | 72 + .../02-quickstart.mdx | 329 + .../04-operations/01-stop-start-restart.mdx | 320 + .../04-operations/02-vertical-scaling.mdx | 179 + .../04-operations/03-horizontal-scaling.mdx | 237 + .../04-operations/04-volume-expansion.mdx | 252 + .../04-operations/05-manage-loadbalancer.mdx | 316 + .../09-decommission-a-specific-replica.mdx | 135 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 199 + .../08-monitoring/_category_.yml | 4 + .../_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 65 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 36 + .../kubeblocks-for-kafka/01-overview.mdx | 49 + .../kubeblocks-for-kafka/02-quickstart.mdx | 470 + .../04-operations/01-stop-start-restart.mdx | 322 + .../04-operations/02-vertical-scaling.mdx | 178 + .../04-operations/03-horizontal-scaling.mdx | 230 + .../04-operations/04-volume-expansion.mdx | 259 + .../04-operations/05-manage-loadbalancer.mdx | 322 + .../09-decommission-a-specific-replica.mdx | 161 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 249 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-kafka/_category_.yml | 4 + .../kubeblocks-for-kafka/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 82 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-milvus/01-overview.mdx | 88 + .../kubeblocks-for-milvus/02-quickstart.mdx | 436 + .../03-topologies/01-standlone.mdx | 137 + .../03-topologies/02-cluster.mdx | 527 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop-start-restart.mdx | 261 + .../04-operations/02-vertical-scaling.mdx | 192 + .../04-operations/03-horizontal-scaling.mdx | 241 + .../04-operations/05-manage-loadbalancer.mdx | 304 + .../09-decommission-a-specific-replica.mdx | 141 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 227 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-milvus/_category_.yml | 4 + .../kubeblocks-for-milvus/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 42 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 48 + .../kubeblocks-for-mongodb/01-overview.mdx | 78 + .../kubeblocks-for-mongodb/02-quickstart.mdx | 538 + .../04-operations/01-stop-start-restart.mdx | 288 + .../04-operations/02-vertical-scaling.mdx | 177 + .../04-operations/03-horizontal-scaling.mdx | 276 + .../04-operations/04-volume-expansion.mdx | 219 + .../04-operations/05-manage-loadbalancer.mdx | 345 + .../04-operations/08-switchover.mdx | 186 + .../09-decommission-a-specific-replica.mdx | 142 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 129 + .../02-create-full-backup.mdx | 228 + .../03-scheduled-full-backup.mdx | 153 + .../04-scheduled-continuous-backup.mdx | 184 + .../05-restoring-from-full-backup.mdx | 165 + .../06-restore-with-pitr.mdx | 182 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 138 + .../06-custom-secret/_category_.yml | 4 + .../kubeblocks-for-mongodb/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 41 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-mysql/01-overview.mdx | 66 + .../kubeblocks-for-mysql/02-quickstart.mdx | 412 + .../03-topologies/01-semisync.mdx | 377 + .../02-semisync-with-proxysql.mdx | 311 + .../03-topologies/03-mgr.mdx | 254 + .../03-topologies/04-mgr-with-proxysql.mdx | 294 + .../03-topologies/05-orchestrator.mdx | 412 + .../06-orchestrator-with-proxysql.mdx | 422 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop_start_restart.mdx | 308 + .../04-operations/02-vertical-scaling.mdx | 230 + .../04-operations/03-horizontal-scaling.mdx | 262 + .../04-operations/04-volume-expansion.mdx | 260 + .../04-operations/05-manage-loadbalancer.mdx | 465 + .../06-minior-version-upgrade.mdx | 257 + .../04-operations/07-modify-parameters.mdx | 274 + .../04-operations/08-switchover.mdx | 195 + .../09-decommission-a-specific-replica.mdx | 237 + .../04-operations/11-rebuild-replica.mdx | 478 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 143 + .../02-create-full-backup.mdx | 285 + .../03-scheduled-full-backup.mdx | 176 + .../04-scheduled-continuous-backup.mdx | 181 + .../05-restoring-from-full-backup.mdx | 197 + .../06-restore-with-pitr.mdx | 188 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 167 + .../02-custom-password-generation-policy.mdx | 157 + .../06-custom-secret/_category_.yml | 4 + .../07-tls/01-tls-overview.mdx | 167 + .../07-tls/02-tls-custom-cert.mdx | 231 + .../kubeblocks-for-mysql/07-tls/03-mtls.mdx | 314 + .../07-tls/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 286 + .../08-monitoring/_category_.yml | 4 + .../01-custom-scheduling-policy.mdx | 269 + .../02-custom-pod-resources.mdx | 338 + ...03-parallel-pod-management-concurrency.mdx | 305 + .../04-instance-update-strategy-ondelete.mdx | 226 + .../05-gradual-rolling-update.mdx | 246 + .../09-advanced-pod-management/_category_.yml | 4 + .../kubeblocks-for-mysql/_category_.yml | 4 + .../kubeblocks-for-postgresql/01-overview.mdx | 88 + .../02-quickstart.mdx | 572 + .../04-operations/01-stop-start-restart.mdx | 288 + .../04-operations/02-vertical-scaling.mdx | 194 + .../04-operations/03-horizontal-scaling.mdx | 310 + .../04-operations/04-volume-expansion.mdx | 242 + .../04-operations/05-manage-loadbalancer.mdx | 400 + .../06-minior-version-upgrade.mdx | 308 + .../04-operations/07-modify-parameters.mdx | 263 + .../04-operations/08-switchover.mdx | 184 + .../09-decommission-a-specific-replica.mdx | 157 + .../04-operations/11-rebuild-replica.mdx | 331 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 166 + .../02-create-full-backup.mdx | 254 + .../03-scheduled-full-backup.mdx | 153 + .../04-scheduled-continuous-backup.mdx | 329 + .../05-restoring-from-full-backup.mdx | 132 + .../06-restore-with-pitr.mdx | 162 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 142 + .../02-custom-password-generation-policy.mdx | 122 + .../06-custom-secret/_category_.yml | 4 + .../07-tls/01-tls-overview.mdx | 200 + .../07-tls/02-tls-custom-cert.mdx | 207 + .../07-tls/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 271 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-postgresql/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-pg-replication-cluster.mdx | 37 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-pg-replication-cluster.mdx | 18 + .../kubeblocks-for-qdrant/01-overview.mdx | 63 + .../kubeblocks-for-qdrant/02-quickstart.mdx | 443 + .../04-operations/01-stop-start-restart.mdx | 285 + .../04-operations/02-vertical-scaling.mdx | 178 + .../04-operations/03-horizontal-scaling.mdx | 279 + .../04-operations/04-volume-expansion.mdx | 219 + .../04-operations/05-manage-loadbalancer.mdx | 320 + .../06-minior-version-upgrade.mdx | 275 + .../09-decommission-a-specific-replica.mdx | 135 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 129 + .../02-create-full-backup.mdx | 222 + .../03-scheduled-full-backup.mdx | 153 + .../05-restoring-from-full-backup.mdx | 164 + .../05-backup-restore/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 247 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-qdrant/_category_.yml | 4 + .../kubeblocks-for-qdrant/_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 42 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-rabbitmq/01-overview.mdx | 57 + .../kubeblocks-for-rabbitmq/02-quickstart.mdx | 481 + .../04-operations/01-stop-start-restart.mdx | 286 + .../04-operations/02-vertical-scaling.mdx | 177 + .../04-operations/03-horizontal-scaling.mdx | 240 + .../04-operations/04-volume-expansion.mdx | 219 + .../04-operations/05-manage-loadbalancer.mdx | 330 + .../09-decommission-a-specific-replica.mdx | 149 + .../04-operations/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 226 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-rabbitmq/_category_.yml | 4 + .../_tpl/_category_.yml | 5 + .../_tpl/_create-cluster.mdx | 36 + .../_tpl/_prerequisites.mdx | 11 + .../_tpl/_verify-cluster.mdx | 33 + .../kubeblocks-for-redis/01-overview.mdx | 72 + .../kubeblocks-for-redis/02-quickstart.mdx | 561 + .../03-topologies/01-standlone.mdx | 95 + .../03-topologies/02-replication.mdx | 133 + .../03-topologies/03-sharding.mdx | 239 + .../03-topologies/_category_.yml | 4 + .../04-operations/01-stop-start-restart.mdx | 316 + .../04-operations/02-vertical-scaling.mdx | 181 + .../04-operations/03-horizontal-scaling.mdx | 289 + .../04-operations/04-volume-expansion.mdx | 238 + .../04-operations/05-manage-loadbalancer.mdx | 341 + .../04-operations/07-modify-parameters.mdx | 131 + .../04-operations/08-switchover.mdx | 184 + .../09-decommission-a-specific-replica.mdx | 132 + .../04-operations/_category_.yml | 4 + .../01-create-backuprepo.mdx | 129 + .../02-create-full-backup.mdx | 220 + .../03-scheduled-full-backup.mdx | 153 + .../04-scheduled-continuous-backup.mdx | 163 + .../05-restoring-from-full-backup.mdx | 183 + .../06-restore-with-pitr.mdx | 186 + .../05-backup-restore/_category_.yml | 4 + .../06-custom-secret/01-custom-secret.mdx | 157 + .../06-custom-secret/_category_.yml | 4 + .../01-integrate-with-prometheus-operator.mdx | 267 + .../08-monitoring/_category_.yml | 4 + .../kubeblocks-for-redis/_category_.yml | 4 + .../kubeblocks-for-redis/_tpl/_category_.yml | 5 + .../_create-redis-replication-cluster.mdx | 76 + .../_tpl/_prerequisites.mdx | 11 + .../_verify-redis-replication-cluster.mdx | 33 + .../preview/user_docs/concepts/_category_.yml | 4 + .../backup-and-restore/_category_.yaml | 4 + .../backup-and-restore/backup/_category_.yaml | 4 + .../backup-and-restore/backup/backup-repo.mdx | 651 + .../backup/configure-backuppolicy.mdx | 173 + .../backup/on-demand-backup.mdx | 142 + .../backup/scheduled-backup.mdx | 89 + .../backup-and-restore/introduction.mdx | 33 + .../restore/_category_.yaml | 4 + .../backup-and-restore/restore/pitr.mdx | 118 + .../restore/restore-data-from-backup-set.mdx | 61 + .../zh/preview/user_docs/concepts/concept.mdx | 159 + .../concepts/in-place-update/_category_.yaml | 4 + .../in-place-update/ignore-vertical-scale.mdx | 13 + .../concepts/in-place-update/overview.mdx | 56 + .../concepts/instance-template/_category_.yml | 4 + .../how-to-use-instance-template.mdx | 207 + .../instance-template/introduction.mdx | 27 + .../preview/user_docs/overview/_category_.yml | 4 + .../user_docs/overview/install-kubeblocks.mdx | 543 + .../user_docs/overview/introduction.mdx | 258 + .../user_docs/overview/supported-addons.mdx | 217 + .../user_docs/references/_category_.yml | 4 + .../references/api-reference/_category_.yml | 4 + .../references/api-reference/add-on.mdx | 2545 + .../references/api-reference/cluster.mdx | 57618 ++++++++++++++++ .../api-reference/dataprotection.mdx | 11576 ++++ .../references/api-reference/operations.mdx | 7998 +++ .../references/api-reference/parameters.mdx | 5770 ++ .../user_docs/references/install-addons.mdx | 170 + .../user_docs/references/install-kbcli.mdx | 285 + .../user_docs/references/install-minio.mdx | 55 + .../install-snapshot-controller.mdx | 78 + .../references/kubeblocks_options.mdx | 211 + .../kubernetes_and_operator_101.mdx | 135 + .../prepare-a-local-k8s-cluster.mdx | 259 + .../user_docs/references/terminology.mdx | 99 + .../user_docs/release_notes/_category_.yml | 4 + .../release_notes/release-09/090.mdx | 126 + .../release_notes/release-09/091.mdx | 98 + .../release_notes/release-09/092.mdx | 58 + .../release_notes/release-09/093.mdx | 77 + .../release_notes/release-09/094.mdx | 71 + .../release_notes/release-09/_category_.yml | 4 + .../release_notes/release-10/100-cn.mdx | 116 + .../release_notes/release-10/100.mdx | 189 + .../release_notes/release-10/_category_.yml | 4 + .../user_docs/troubleshooting/_category_.yml | 4 + .../handle-a-cluster-exception.mdx | 197 + .../troubleshooting/known-issues.mdx | 92 + .../preview/user_docs/upgrade/_category_.yml | 5 + .../user_docs/upgrade/upgrade-to-0_8.mdx | 115 + .../user_docs/upgrade/upgrade-to-0_9_0.mdx | 171 + .../upgrade/upgrade-to-v09-version.mdx | 298 + scripts/python/transalate_mdx.py | 19 +- 459 files changed, 143631 insertions(+), 50 deletions(-) create mode 100644 blogs/zh/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-7-0.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-8-0.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-9-0.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-9-1.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-9-2.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-9-3.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v0-9-4.mdx create mode 100644 blogs/zh/announcing-kubeblocks-v1-0-0.mdx create mode 100644 blogs/zh/community-monthly-report-for-april-2024.mdx create mode 100644 blogs/zh/community-monthly-report-for-february-2024.mdx create mode 100644 blogs/zh/community-monthly-report-for-january-2024.mdx create mode 100644 blogs/zh/community-monthly-report-for-march-2024.mdx create mode 100644 blogs/zh/community-monthly-report-for-may-2024.mdx create mode 100644 blogs/zh/deploy-harbor-on-kubeblocks.mdx create mode 100644 blogs/zh/deploy-wordpress-on-kubeblocks.mdx create mode 100644 blogs/zh/dify-on-kb.mdx create mode 100644 blogs/zh/does-containerization-affect-the-performance-of-databases.mdx create mode 100644 blogs/zh/does-running-mysql-on-kubernetes-lead-to-significant-performance-degradation.mdx create mode 100644 blogs/zh/how-to-fix-pod-stuck-in-terminating-status.mdx create mode 100644 blogs/zh/how-to-manage-database-clusters-without-a-dedicated-operator.mdx create mode 100644 blogs/zh/in-place-updates.mdx create mode 100644 blogs/zh/instance-template.mdx create mode 100644 blogs/zh/instanceset-introduction.mdx create mode 100644 blogs/zh/is-k8s-a-database.mdx create mode 100644 blogs/zh/kubeblocks-on-kata.mdx create mode 100644 blogs/zh/mangage-6k-db-instance-with-kubeblocks.mdx create mode 100644 blogs/zh/migrate-redis-in-kuaishou-from-bare-metal-to-k8s.mdx create mode 100644 blogs/zh/moodle-in-kubeblocks-windows.mdx create mode 100644 blogs/zh/redis-containerization.mdx create mode 100644 blogs/zh/redis-on-kb.mdx create mode 100644 blogs/zh/run-databases-on-k8s-insights-from-leading-chinese-internet-companies.mdx create mode 100644 blogs/zh/run-redis-on-k8s-kuaishou-solution-with-kubeblocks.mdx create mode 100644 blogs/zh/take-specified-instances-offline.mdx create mode 100644 blogs/zh/use-kubeblocks-to-build-your-aigc-infra-on-amazon-eks.mdx create mode 100644 docs/zh/preview/cli/cli.mdx create mode 100644 docs/zh/preview/cli/kbcli.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_disable.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_enable.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_index.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_index_add.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_index_delete.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_index_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_index_update.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_install.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_purge.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_search.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_uninstall.mdx create mode 100644 docs/zh/preview/cli/kbcli_addon_upgrade.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_create.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_delete.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_list-storage-provider.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_backuprepo_update.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_cancel-ops.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_configure.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_connect.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_convert-to-v1.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_apecloud-mysql.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_etcd.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_kafka.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_mongodb.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_mysql.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_oriol.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_postgresql.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_qdrant.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_rabbitmq.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_redis.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_create_tidb.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_mogdb-switchover.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_post-rebuild-for-clickhouse.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_custom-ops_post-scale-out-shard-for-clickhouse.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_delete-backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_delete-ops.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_delete.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe-backup-policy.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe-backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe-config.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe-ops.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe-restore.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_diff-config.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_edit-backup-policy.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_edit-config.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_explain-config.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_expose.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_label.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-backup-policies.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-backups.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-components.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-events.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-instances.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-logs.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-ops.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list-restores.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_logs.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_promote.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_rebuild-instance.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_register.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_restart.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_restore.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_scale-in.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_scale-out.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_start.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_stop.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_update.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_upgrade-to-v1.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_upgrade.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_volume-expand.mdx create mode 100644 docs/zh/preview/cli/kbcli_cluster_vscale.mdx create mode 100644 docs/zh/preview/cli/kbcli_clusterdefinition.mdx create mode 100644 docs/zh/preview/cli/kbcli_clusterdefinition_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_clusterdefinition_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentdefinition.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentdefinition_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentdefinition_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentversion.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentversion_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_componentversion_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_delete-backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_describe-backup-policy.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_describe-backup.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_describe-restore.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_edit-backup-policy.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_list-action-sets.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_list-backup-policies.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_list-backup-policy-templates.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_list-backups.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_list-restores.mdx create mode 100644 docs/zh/preview/cli/kbcli_dataprotection_restore.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_compare.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_config.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_describe-config.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_install.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_list-versions.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_preflight.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_status.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_uninstall.mdx create mode 100644 docs/zh/preview/cli/kbcli_kubeblocks_upgrade.mdx create mode 100644 docs/zh/preview/cli/kbcli_ops-definition.mdx create mode 100644 docs/zh/preview/cli/kbcli_ops-definition_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_ops-definition_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_options.mdx create mode 100644 docs/zh/preview/cli/kbcli_playground.mdx create mode 100644 docs/zh/preview/cli/kbcli_playground_destroy.mdx create mode 100644 docs/zh/preview/cli/kbcli_playground_init.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_describe.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_index.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_index_add.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_index_delete.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_index_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_index_update.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_install.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_search.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_uninstall.mdx create mode 100644 docs/zh/preview/cli/kbcli_plugin_upgrade.mdx create mode 100644 docs/zh/preview/cli/kbcli_report.mdx create mode 100644 docs/zh/preview/cli/kbcli_report_cluster.mdx create mode 100644 docs/zh/preview/cli/kbcli_report_kubeblocks.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace_create.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace_delete.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace_list.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace_update.mdx create mode 100644 docs/zh/preview/cli/kbcli_trace_watch.mdx create mode 100644 docs/zh/preview/cli/kbcli_version.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-kafka/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-kafka/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-kafka/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/03-topologies/01-standlone.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/03-topologies/02-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/03-topologies/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-milvus/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-milvus/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-milvus/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/08-switchover.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/01-semisync.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/02-semisync-with-proxysql.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/03-mgr.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/03-topologies/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/07-modify-parameters.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/08-switchover.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/02-custom-password-generation-policy.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/07-tls/01-tls-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/07-tls/03-mtls.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/07-tls/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/01-custom-scheduling-policy.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/02-custom-pod-resources.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/04-instance-update-strategy-ondelete.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/05-gradual-rolling-update.mdx create mode 100644 docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-mysql/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/08-switchover.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/07-tls/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/01-overview.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/02-quickstart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/03-topologies/01-standlone.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/03-topologies/02-replication.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/03-topologies/03-sharding.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/03-topologies/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/08-switchover.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/04-operations/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/05-backup-restore/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/06-custom-secret/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/08-monitoring/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/_tpl/_category_.yml create mode 100644 docs/zh/preview/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/_tpl/_prerequisites.mdx create mode 100644 docs/zh/preview/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx create mode 100644 docs/zh/preview/user_docs/concepts/_category_.yml create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/_category_.yaml create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/backup/_category_.yaml create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/introduction.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/restore/_category_.yaml create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/restore/pitr.mdx create mode 100644 docs/zh/preview/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx create mode 100644 docs/zh/preview/user_docs/concepts/concept.mdx create mode 100644 docs/zh/preview/user_docs/concepts/in-place-update/_category_.yaml create mode 100644 docs/zh/preview/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx create mode 100644 docs/zh/preview/user_docs/concepts/in-place-update/overview.mdx create mode 100644 docs/zh/preview/user_docs/concepts/instance-template/_category_.yml create mode 100644 docs/zh/preview/user_docs/concepts/instance-template/how-to-use-instance-template.mdx create mode 100644 docs/zh/preview/user_docs/concepts/instance-template/introduction.mdx create mode 100644 docs/zh/preview/user_docs/overview/_category_.yml create mode 100644 docs/zh/preview/user_docs/overview/install-kubeblocks.mdx create mode 100644 docs/zh/preview/user_docs/overview/introduction.mdx create mode 100644 docs/zh/preview/user_docs/overview/supported-addons.mdx create mode 100644 docs/zh/preview/user_docs/references/_category_.yml create mode 100644 docs/zh/preview/user_docs/references/api-reference/_category_.yml create mode 100644 docs/zh/preview/user_docs/references/api-reference/add-on.mdx create mode 100644 docs/zh/preview/user_docs/references/api-reference/cluster.mdx create mode 100644 docs/zh/preview/user_docs/references/api-reference/dataprotection.mdx create mode 100644 docs/zh/preview/user_docs/references/api-reference/operations.mdx create mode 100644 docs/zh/preview/user_docs/references/api-reference/parameters.mdx create mode 100644 docs/zh/preview/user_docs/references/install-addons.mdx create mode 100644 docs/zh/preview/user_docs/references/install-kbcli.mdx create mode 100644 docs/zh/preview/user_docs/references/install-minio.mdx create mode 100644 docs/zh/preview/user_docs/references/install-snapshot-controller.mdx create mode 100644 docs/zh/preview/user_docs/references/kubeblocks_options.mdx create mode 100644 docs/zh/preview/user_docs/references/kubernetes_and_operator_101.mdx create mode 100644 docs/zh/preview/user_docs/references/prepare-a-local-k8s-cluster.mdx create mode 100644 docs/zh/preview/user_docs/references/terminology.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/_category_.yml create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/090.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/091.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/092.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/093.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/094.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-09/_category_.yml create mode 100644 docs/zh/preview/user_docs/release_notes/release-10/100-cn.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-10/100.mdx create mode 100644 docs/zh/preview/user_docs/release_notes/release-10/_category_.yml create mode 100644 docs/zh/preview/user_docs/troubleshooting/_category_.yml create mode 100644 docs/zh/preview/user_docs/troubleshooting/handle-a-cluster-exception.mdx create mode 100644 docs/zh/preview/user_docs/troubleshooting/known-issues.mdx create mode 100644 docs/zh/preview/user_docs/upgrade/_category_.yml create mode 100644 docs/zh/preview/user_docs/upgrade/upgrade-to-0_8.mdx create mode 100644 docs/zh/preview/user_docs/upgrade/upgrade-to-0_9_0.mdx create mode 100644 docs/zh/preview/user_docs/upgrade/upgrade-to-v09-version.mdx diff --git a/blogs/en/redis-on-kb.mdx b/blogs/en/redis-on-kb.mdx index 24b76787..0db0213a 100644 --- a/blogs/en/redis-on-kb.mdx +++ b/blogs/en/redis-on-kb.mdx @@ -28,7 +28,7 @@ Many of Kubeblocks' clients have a strong demand for Redis Cluster, so we have a 1. Install KubeBlocks 0.9.0. -``` +```bash slc@slcmac kbcli % ./bin/kbcli kubeblocks list-versions --devel VERSION RELEASE-NOTES 0.9.0-beta.8 https://github.com/apecloud/kubeblocks/releases/tag/v0.9.0-beta.8 @@ -40,12 +40,12 @@ slc@slcmac kbcli % kbcli kubeblocks install --version="0.9.0-beta.8" Although Redis cluster addon is installed by default, the network standard compatibility caused some issue, we need to install it manually. -``` +```bash # Disable addon slc@slcmac addons % kbcli addon disable redis # Install the latest addon on the branch slc@slcmac addons % git clone git@github.com:apecloud/kubeblocks-addons.git -slc@slcmac addons % cd kubeblocks-addons/addons/redis +slc@slcmac addons % cd kubeblocks-addons/addons/redis slc@slcmac addons % helm dependency build && cd .. slc@slcmac addons % helm install redis ./redis slc@slcmac addons % helm list @@ -58,7 +58,7 @@ To reproduce the issue, we modify the configuration of the addon before executin The cluster created using NodePort mode with 3 primary nodes and 3 secondary nodes. -``` +```bash slc@slcmac addons % helm install redisc ./redis-cluster --set mode=cluster --set nodePortEnabled=true --set redisCluster.shardCount=3 slc@slcmac addons % kg pods | grep -v job NAME READY STATUS RESTARTS AGE @@ -72,7 +72,7 @@ redisc-shard-5g8-1 3/3 Running 0 14m We can see clearly that 3 primary-secondary pods are created, but the relationship between nodes is not built. Announce ip/port/bus-port -``` +```bash redisc-shard-5g8-0 kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set re 30039 @@ -89,20 +89,20 @@ kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS Create Slot -``` +```bash kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 0 5461 kubectl exec -it redisc-shard-hxx-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 5462 10922 kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 10923 16383 ``` Cluster Meet -``` -# login to one of the primary nodes +```bash +# login to one of the primary nodes slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- /bin/bash root@redisc-shard-5g8-0:/# redis-cli -a O3605v7HsS 127.0.0.1:6379> cluster nodes ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 0 0 connected 0-5461 -# Only one node found, we have to meet other two nodes. +# Only one node found, we have to meet other two nodes. slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster meet 172.18.0.2 30182 31879 OK slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster meet 172.18.0.2 31993 30105 @@ -122,7 +122,7 @@ At this moment, a 3-node cluster is created. We use the pod `redisc-shard-5g8-1` as the standby of primary pod `redisc-shard-5g8-0`. Check the link on primary pod, it is not connected to any other primary pod. -``` +```bash # Check link root@redisc-shard-5g8-1:/# netstat -anop | grep redis tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -134,7 +134,7 @@ tcp6 0 0 :::6379 :::* LISTEN Headless address of the secondary pod: redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 The complete `join` command is: -``` +```bash slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- /bin/bash root@redisc-shard-5g8-1:/# redis-cli -a O3605v7HsS --cluster add-node redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 172.18.0.2:30039 --cluster-slave --cluster-master-id ff935854b7626a7e4374598857d5fbe998297799 >>> Adding node redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 to cluster 172.18.0.2:30039 @@ -158,7 +158,7 @@ Waiting for the cluster to join 172.18.0.2:30039 is the announced ip/port of the primary pod. Check connection: -``` +```bash root@redisc-shard-5g8-1:/# netstat -anop | grep redis tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -173,7 +173,7 @@ tcp6 0 0 :::6379 :::* LISTEN The secondary pod and other 3 primary pods are connected on announced bus port, and also the secondary pod is connected to its primary pod. Check the cluster topology on the secondary pod. -``` +```bash root@redisc-shard-5g8-1:/# redis-cli -a O3605v7HsS 127.0.0.1:6379> cluster nodes ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713327060494 0 connected 0-5461 @@ -183,7 +183,7 @@ a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 17133 ``` Check the cluster topology on the primary pod, and the newly added secondary pod is missing. -``` +```bash root@redisc-shard-5g8-0:/# redis-cli -a O3605v7HsS 127.0.0.1:6379> cluster nodes ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 1713327106000 0 connected 0-5461 @@ -193,25 +193,25 @@ a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 17133 During the previous `add-node` process, the` cluster meet` operation reported success, but the primary node did not actually see the new replica node. After reviewing the ·/data/running.log·, the following error information was found: -``` +```bash root@redisc-shard-5g8-0:/data# grep 16379 running.log 1:M 17 Apr 2024 04:05:37.610 - Connection with Node 30e6d55c687bfc08e4a2fcd2ef586ba5458d801f at 10.42.0.1:16379 failed: Connection refused **10 times repeated** 30e6d55c687bfc08e4a2fcd2ef586ba5458d801f at 10.42.0.1:16379 failed: Connection refused ``` -Therefore, actually this `cluster meet` operation is failed. But why? +Therefore, actually this `cluster meet` operation is failed. But why? ## Troubleshooting 1. The mysterious IP address. The default Redis Cluster bus port is 16379 = 6379 + 10000. If the bus port is not explicitly announced, Redis Cluster will use this default address. So the issue seems to be that when the primary pod received the meet request, it tried to reconnect to the other pod's default bus port (16379), but was unable to connect. However, the replica pod's IP (10.42.0.237) is not the same as the IP (10.42.0.1) mentioned in the error message. Why would the primary pod try to reconnect to an inconsistent IP? -``` +```bash slc@slcmac redis % kg pods -A -o wide | grep redisc-shard-5g8-1 default redisc-shard-5g8-1 3/3 Running 0 72m 10.42.0.237 k3d-k3s-default-server-0 ``` -Continuing the investigation, it was found that 10.42.0.1 is actually the address of the k3d (the Kubernetes version we use in the development environment) CNI0. -``` +Continuing the investigation, it was found that 10.42.0.1 is actually the address of the k3d (the Kubernetes version we use in the development environment) CNI0. +```bash slc@slcmac redis % docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8f8958df3298 moby/buildkit:buildx-stable-1 "buildkitd --allow-i…" 6 weeks ago Up 6 weeks buildx_buildkit_project-v3-builder0 @@ -247,7 +247,7 @@ It turns out that the link corresponding to the gossip protocol (local 16379 -> Link information of primary- 1 redisc-shard-5g8-0: -``` +```bash root@redisc-shard-5g8-0:/data# netstat -anop | grep redis tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -263,7 +263,7 @@ tcp6 0 0 :::16379 :::* LISTEN ``` Link information of primary-2 redisc-shard-hxx-0: -``` +```bash root@redisc-shard-hxx-0:/# netstat -anop | grep redis tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -278,14 +278,14 @@ tcp6 0 0 :::6379 :::* LISTEN ``` Mapping relationship between two links: -``` +```bash # On primary-1 redisc-shard-5g8-0, capture packets on NodePort 31879 (primary -2 redisc-shard-hxx-0): 05:40:04.817984 IP redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412 > k3d-k3s-default-server-0.31879: Flags [P.], seq 6976:9336, ack 7081, win 10027, options [nop,nop,TS val 4191410578 ecr 867568717], length 2360 05:40:04.818428 IP k3d-k3s-default-server-0.31879 > redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412: Flags [.], ack 9336, win 498, options [nop,nop,TS val 867569232 ecr 4191410578], length 0 05:40:04.819269 IP k3d-k3s-default-server-0.31879 > redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412: Flags [P.], seq 7081:9441, ack 9336, win 501, options [nop,nop,TS val 867569233 ecr 4191410578], length 2360 05:40:04.819309 IP redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412 > k3d-k3s-default-server-0.31879: Flags [.], ack 9441, win 10026, options [nop,nop,TS val 4191410580 ecr 867569233], length 0 -# On primary-2 redisc-shard-hxx-0, capture packets on local Port 24780 (primary-1 redisc-shard-5g8-0): +# On primary-2 redisc-shard-hxx-0, capture packets on local Port 24780 (primary-1 redisc-shard-5g8-0): 05:40:04.818178 IP 10.42.0.1.24780 > redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379: Flags [P.], seq 32624:34984, ack 32937, win 10027, options [nop,nop,TS val 4191410578 ecr 867568717], length 2360 05:40:04.818371 IP redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379 > 10.42.0.1.24780: Flags [.], ack 34984, win 498, options [nop,nop,TS val 867569232 ecr 4191410578], length 0 05:40:04.819239 IP redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379 > 10.42.0.1.24780: Flags [P.], seq 32937:35297, ack 34984, win 501, options [nop,nop,TS val 867569233 ecr 4191410578], length 2360 @@ -305,7 +305,7 @@ Now that the root cause is known, the problem becomes easier to solve. For this kind of "meet failure" scenario, we can have secondary-1 explicitly announce its IP/port/bus-port, and then have it actively join the cluster. This way, when primary-1 tries to reconnect, it will use the announced IP to establish the connection. -``` +```bash slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 31309 slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 31153 @@ -323,7 +323,7 @@ a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 17133 OK ``` Check primary-1 to see the difference after `meet`. -``` +```bash root@redisc-shard-5g8-0:/data# redis-cli -a O3605v7HsS Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6379> cluster nodes @@ -336,9 +336,9 @@ ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713334506133 2 connected 10923-16383 a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713334506233 1 connected 5462-10922 ``` -A gossip link for secondary-1 can be found on primary-1. +A gossip link for secondary-1 can be found on primary-1. -``` +```bash root@redisc-shard-5g8-0:/data# netstat -anop | grep redis tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -355,7 +355,7 @@ tcp6 0 0 :::16379 :::* LISTEN ``` We can see three new gossip links from primary-1/2/3 on secondary-1. -``` +```bash root@redisc-shard-5g8-1:/# netstat -anop | grep redis tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -379,7 +379,7 @@ Those three links are actually the primary pod connecting successfully through t Announce ip/port/bus-port -``` +```bash slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 30662 slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 30960 @@ -387,12 +387,12 @@ slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- /bin/ ``` Add-node secondary-2 (This process includes `meet` operation) -``` +```bash redis-cli -a O3605v7HsS --cluster add-node 172.18.0.2:30662 172.18.0.2:30182 --cluster-slave --cluster-master-id a54e8fa9474c620154f4c1abc9628116deb3dc7e ``` Check cluster topology on secondary-2. -``` +```bash 127.0.0.1:6379> cluster nodes 3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 slave ff935854b7626a7e4374598857d5fbe998297799 0 1713335442641 0 connected a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713335442328 1 connected 5462-10922 @@ -402,7 +402,7 @@ ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 17133 ``` Check cluster topology on primary-2. -``` +```bash 127.0.0.1:6379> cluster nodes e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713335448690 2 connected 10923-16383 ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713335448892 0 connected 0-5461 @@ -415,7 +415,7 @@ a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 myself,master - Announce ip/port/bus-port and then add node. -``` +```bash slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 30110 slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 30971 @@ -450,7 +450,7 @@ Waiting for the cluster to join Check cluster topology on any primary pod. -``` +```bash 127.0.0.1:6379> cluster nodes e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713335724101 2 connected 10923-16383 ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713335724101 0 connected 0-5461 @@ -472,7 +472,7 @@ The default CNI used by k3s/k3d is Flannel, and as analyzed above, Flannel will We also tested the scenario of k3s and Calico, where Calico uses vxlan to establish the Pod network. We found that when using NodePort, there is still a NAT problem on Calico. Assuming the NodePort we use is 10.128.0.52:32135, on the inbound direction, the communication to the local port 16379 through the NodePort (10.128.0.52) will still be translated to the address of the Node's vxlan.calico network device (192.168.238.0). This is the network connection of one of the secondary pods: -``` +```bash root@redisc-shard-ffv-1:/# netstat -anop | grep redis tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) @@ -492,9 +492,9 @@ tcp6 0 0 :::16379 :::* LISTEN tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) ``` -On Node 10.128.0.52, the are two devices. +On Node 10.128.0.52, the are two devices. -``` +```bash ens4: flags=4163 mtu 1460 inet 10.128.0.52 netmask 255.255.255.255 broadcast 0.0.0.0 inet6 fe80::4001:aff:fe80:34 prefixlen 64 scopeid 0x20 @@ -515,7 +515,7 @@ vxlan.calico: flags=4163 mtu 1410 If the NodePort uses the Node that the Pod is located on, it will not be NATed in Calico. -``` +```bash slc@cluster-1:~$ kubectl exec -it redisc-shard-ffv-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 10.128.0.54 // Set the announced IP to the local Node IP where the Pod is located. OK slc@cluster-1:~$ kubectl exec -it redisc-shard-ffv-1 -c redis-cluster -- /bin/bash diff --git a/blogs/zh/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.mdx b/blogs/zh/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.mdx new file mode 100644 index 00000000..2969f5bb --- /dev/null +++ b/blogs/zh/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.mdx @@ -0,0 +1,545 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/111858489?v=4 + name: dullboy + url: https://github.com/nayutah +date: 2023-09-21 +description: Kubernetes上优化PostgreSQL性能的测试报告 +image: /img/blogs/thumbnails/blog-pg.png +slug: A-testing-report-for-optimizing-PG-performance-on-Kubernetes +tags: +- KubeBlocks PG +- ECS PG +- optimization +- performance +title: Kubernetes上优化PostgreSQL性能的测试报告 +--- +# Kubernetes 上优化 PostgreSQL 性能的测试报告 + +## 引言 + +如今,越来越多的应用被部署在 Kubernetes 的容器中运行。这一趋势如此显著,以至于有人将 Kubernetes 比作云时代的 Linux,以形容其无处不在的影响力。然而,尽管应用层的容器化增长显而易见,数据管理领域的容器化进程却尚未形成同等规模。数据库作为有状态工作负载,常被称为"最不适合运行在 Kubernetes 上的组件"。这并不令人意外,因为容器化工作负载本身就需要具备应对重启、扩缩容、虚拟化等各种约束的健壮性。 + +但当前关注点正在向数据层转移,开发者希望像管理应用栈一样管理数据基础设施。他们试图对数据库和应用使用相同的工具,以获取一致的收益,例如快速部署和环境一致性。在本博客中,我们将对自托管的 PostgreSQL 解决方案(使用 KubeBlocks PG)和在 ECS 上自托管 PostgreSQL(以下简称 ECS PG)进行测试对比,并探讨如何优化数据库性能,使其在生产环境中的性能和稳定性达到或超越全托管数据库的水平。 + +## 环境准备 +| | 版本 | CPU | 内存 | 磁盘 | 网络 | 实例类型 | 复制协议 | +|--|-------|--------|-----|------|--------|-----|------| +| ECS PG|12.14|16核|64G|ESSD PL1 500G|SLB|专属型|异步复制 | +|KubeBlocks PG|12.14|16核|64G|ESSD PL1 300G|SLB|专属型|异步复制 | + +1. 在ACK上购买K8s集群并部署KubeBlocks(参考[本教程](https://kubeblocks.io/docs/preview/user_docs/installation/install-with-kbcli/install-kbcli))。采用Terway网络模式,使Pod IP即为VPC IP。这能确保VPC内网络连通性,简化网络管理并降低应用开发成本。将节点规格设置为16核64G。 + +2. 在生产环境中,开发者可能无法在16核64G规格的专属节点上创建实例。这通常是由于kubelet等代理进程占用了资源。为解决此问题,需将资源请求和限制设置为14核56G。 + +现在通过kubectl edit编辑PG集群的资源规格。移除对资源请求和限制的约束,以确保在压力测试期间集群能充分利用16核CPU。将缓冲区设置为16GB,然后使用以下命令创建PG实例: + +```bash +kbcli cluster create --cluster-definition=postgresql +``` + + + + +## 测试计划 +Sysbench 读密集型测试:80% 读取 + 20% 写入。 + +在测试场景中,读取请求多于写入请求,这与实际生产场景相似。 + +## 第一轮压力测试:TPS 降为 0 + +本次测试由 ECS 发起,通过 VPC IP 访问 PG 集群。 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
线程数吞吐量延迟(ms)
KubeBlocks PGECS PGKubeBlocks PGECS PG
25872649131031.9428.67
5011106314055955.8240.37
10083032159386132.4992.42
15061865140938272.2718654
17556487134933350.33240.02
+ +出现的问题: + +1. CPU 未能满载:当数据库由 ECS 测试时,数据库所在节点的 CPU 无法完全负载。 +2. 并发性能快速衰减:随着并发数增加,KubeBlocks PG 的性能下降速度比 ECS PG 更快。 +3. TPS 间歇性降为 0:在测试过程中(从 307 秒开始)频繁观察到 TPS 降为 0 的情况。 + +TPS dropped to 0 + +由于客户端和服务端的 CPU 均未满载,我怀疑网络是否存在问题,特别是 SLB 规格是否已达到上限。因此,我将 SLB 规格从默认的 'slb.s2.small' 更改为 'slb.s3.large' 并重新发起压力测试。 + +Change spec + +然而,问题依然存在。 + +## 第二轮压力测试:排查网络链路问题 +为了测试SLB性能,我们设计了使用'sysbench select 1'模拟端到端网络延迟的测试用例。虽然简单的ping测试可以反映部分延迟问题,但存在诸多局限性,无法保证完全端到端穿透。例如,SLB设备可能直接响应ping测试产生的ICMP包,导致无法检测从SLB到Pod的后续链路。 + +测试再次通过ECS发起: + +1. ECS -> Pod IP:使用VPC IP直接网络访问 +2. ECS -> SLB IP -> Pod IP:中间增加SLB层 +3. ECS -> ECS SLB IP:前端默认嵌入SLB层的PG + +测试结果如下: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
线程数吞吐量延迟(ms)
Pod IPSLB IPSLB IPPodIPSLB IPSLB IP
25107309105298921630.300.300.32
+ +结果表明,ACK和SLB网络均表现良好,不太可能是性能波动的原因。因此,我们继续进行压力测试。 + +## 第三轮压测:调整IO带宽 +随后我们继续按原测试计划,通过观察ECS监控图表对系统进行定性分析。 + +CPU繁忙率 + +从监控图表可以看出: +1. 磁盘读写带宽已达瓶颈。ESSD带宽与磁盘容量直接相关,计算公式为min`{120+0.5*容量, 350}`。对于300GB磁盘,对应带宽为270MB,数据显示已触及瓶颈。 + +2. 检查日志时发现,当TPS降至0时,CPU繁忙率同步下降。 + +CPU繁忙率 + +由于带宽达到上限,我们追加了一组500GB磁盘的性能测试。500GB磁盘带宽为350MB(min{120+0.5*500, 350})。压力测试期间,即使磁盘写满时CPU仍出现间歇性波动,这种波动可能与检查点相关,但依然不应导致TPS完全降为0。 + +提升磁盘带宽后,TPS骤降问题有所缓解。因此我们决定采用ESSD PL2 1TB磁盘(带宽620MB)来最大化磁盘带宽。结果显示波动虽仍存在,但幅度大幅减小,CPU繁忙率下降区间也明显收窄。 + +CPU繁忙率 + +我们采取了更激进的调整方案——升级至ESSD PL3 2TB磁盘(带宽700MB)。 + +CPU繁忙率 + +此次TPS下跌和CPU波动得到改善,但问题仍未根除。在8183秒时,TPS从2400骤降至1400,跌幅约40%,且CPU波动范围虽缩小但仍持续存在。 + +TPS下跌与CPU波动 + +结论:IO带宽对CPU和TPS均有显著影响。随着IO带宽提升,波动现象减轻,TPS归零的情况消失。但即便在没有IO带宽限制的情况下,TPS仍会出现40%的下跌。排除硬件限制因素后,该问题很可能与PG集群自身相关。 + +## 第四轮压测:检查点与锁分析 + +在这一轮测试中,我深入研究了检查点机制,并分析了I/O限流对检查点和事务的影响。 + +1. 为什么PostgreSQL的检查点相比其他数据库受到更严重的影响?在MySQL上进行类似测试时,我观察到的波动较弱。 + +2. 即使I/O限流生效时,结果显示I/O仍处于满载状态,因此TPS不应降至0。这是否因为带宽完全被检查点进程占用? + +为了更好地监控数据库,我启用了KubeBlocks内置的Node Exporter并开始测试。 +结果显示,当TPS降至0时,我观察到单次操作中有10GB内存被回收。在没有Huge Pages的情况下,如果页帧大小约为4KB,那么10GB大约相当于250万页。如此大规模的页面遍历和回收操作可能对操作系统内核的页面回收模块造成巨大压力。在那个特定时刻,操作系统出现了数十秒的冻结,导致上层所有进程挂起。 + +这类回收行为通常与dirty_background_ratio参数设置不当有关。于是我执行了`sysctl -a | grep dirty_background_ratio`命令,发现`vm.dirty_background_ratio = 10`。 + +dirty_background_ratio = 10 + + +通过命令`sysctl -w vm.dirty_background_ratio=5`将后台比例调整为5%。这个调整可以促使部分脏页缓存被刷写。 + +这个设置非常关键,且与PostgreSQL的机制密切相关。PostgreSQL依赖操作系统页缓存,这与Oracle和MySQL的I/O架构不同。MySQL使用DirectIO,因此对内存管理的压力较小。但在某些场景下,DirectIO相比写入缓冲缓存可能会带来稍高的延迟。 + +另一个发现是关于PostgreSQL内核和日志的。登录到Pod后,我发现WAL日志默认大小为16MB: + +```bash +root@postgres-cluster-postgresql-0:/home/postgres/pgdata/pgroot/data/pg_wal# du -sh 0000000A000001F300000077 16M 0000000A000001F300000077 +``` + +此外,PostgreSQL的后台进程会清理pg_wal目录下的WAL日志以释放空间。通过strace命令,我发现单次操作最多会删除数百个WAL文件,总大小为12GB。 + +(由于时区问题,日志中的时间需要加8小时调整,例如5:42对应北京时间13:42。) + +```bash +2023-05-18 05:42:42.352 GMT,,,129,,64657f66.81,134,,2023-05-18 01:29:10 GMT,,0,LOG,00000,"checkpoint complete: wrote 680117 buffers (32.4%); 0 WAL file(s) added, 788 removed, 0 recycled; write=238.224 s, sync=35.28 6 s, total=276.989 s; sync files=312, longest=1.348 s, average=0.114 s; distance=18756500 kB, estimate=19166525 kB",,,,,,,,,"" 2023-05-18 05:42:42.362 GMT,,,129,,64657f66.81,135,,2023-05-18 01:29:10 GMT,,0,LOG,00000,"checkpoint starting: wal",,,,,,,,,"" 2023-05-18 05:42:44.336 GMT,"sysbenchrole","pgbenchtest",65143,"::1:43962",6465928f.fe77,1157,"SELECT",2023-05-18 02:50:55 GMT,36/46849938,0,LOG,00000,"duration: 1533.532 ms execute sbstmt1641749330-465186528: SEL ECT c FROM sbtest46 WHERE id=$1","parameters: $1 = '948136'",,,,,,,,"" 2023-05-18 05:42:44.336 GMT,"sysbenchrole","pgbenchtest",65196,"::1:44028",6465928f.feac,1137,"UPDATE",2023-05-18 02:50:55 GMT,57/43973954,949436561,LOG,00000,"duration: 1533.785 ms execute sbstmt493865735-6481814 15: UPDATE sbtest51 SET k=k+1 WHERE id=$1","parameters: $1 = '996782'",,,,,,,,"" +``` + +当执行检查点时,CPU空闲率飙升至80%(对应TPS降至0)。 + +CPU idle spiked to 80% + +日志中部分事务的持续时间延长至超过1秒。 + +TPS下降问题也在13:44:20结束。 + +```bash +2023-05-18 05:44:20.693 GMT,"sysbenchrole","pgbenchtest",65145,"::1:43964",6465928f.fe79,1178,"SELECT",2023-05-18 02:50:55 GMT,48/45617265,0,LOG,00000,"duration: 1942.633 ms execute sbstmt-1652152656-473838068: SE LECT c FROM sbtest37 WHERE id=$1","parameters: $1 = '1007844'",,,,,,,,"" +``` + +在 13:45:41,vacuum 进程启动。 + + + +```bash +2023-05-18 05:45:41.512 GMT,,,87995,,646596d6.157bb,71,,2023-05-18 03:09:10 GMT,64/3879558,0,LOG,00000,"automatic aggressive vacuum of table ""pgbenchtest.public.sbtest45"": index scans: 1 pages: 0 removed, 66886 remain, 0 skipped due to pins, 2328 skipped frozen tuples: 14166 removed, 2005943 remain, 15904 are dead but not yet removable, oldest xmin: 944519757 +``` + +在 13:47:04,检查点最终完成。 + +```bash +2023-05-18 05:47:04.920 GMT,,,129,,64657f66.81,136,,2023-05-18 01:29:10 GMT,,0,LOG,00000,"checkpoint complete: wrote 680483 buffers (32.4%); 0 WAL file(s) added, 753 removed, 0 recycled; write=226.176 s, sync=32.53 +``` + +整个过程在监控图表中清晰呈现。 + +Entire process + +CPU波动与检查点期间的脏页刷新过程高度吻合。 + +同时磁盘带宽始终保持饱和状态。 + +Disk bandwidth saturated + +当TPS降为0的时间段,恰好与检查点刷写脏页的时刻完全对应。 + +Checkpoint flush + +通过观察内存波动可以发现,内存回收导致的卡顿问题已得到有效解决,这表明调整`dirty_background_ratio`参数确实有效。 + +此外在刷写过程中,锁数量始终保持较高水平,这与非刷写状态形成鲜明对比。 + +Number of locks + +涉及的锁类型包括: + +Locks +Locks + +有时会出现多个进程争抢同一把锁的情况。 + +Lock contentions +Lock contentions + +在常规I/O操作中,即便磁盘带宽满载,事务间也极少出现锁争用,TPS保持稳定。但当锁争用显著时,TPS极易跌至0值,这与检查点过程直接相关。 + +Lock contentions + +## 第五轮压力测试:分析PG核心代码与追踪 + +在继续调查的过程中,我研究了一些与PostgreSQL检查点和WAL相关的代码,并对PostgreSQL后端进程进行了追踪。随后发现了一些WAL日志创建的问题,这些问题的时间数据是通过脚本日志分析计算得出的。 + +```bash +duration:550 ms 11:50:03.951036 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EE000000E7.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 22 +duration:674 ms 11:50:09.733902 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EF00000003.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 22 +duration:501 ms 11:50:25.263054 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EF0000004B.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 23 +duration:609 ms 11:50:47.875338 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EF000000A8.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 25 +duration:988 ms 11:50:53.596897 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EF000000BD.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 29 +duration:1119 ms 11:51:10.987796 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002EF000000F6.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 29 +duration:1442 ms 11:51:42.425118 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F000000059.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 45 +duration:1083 ms 11:51:52.186613 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F000000071.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 51 +duration:503 ms 11:52:32.879828 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F0000000D8.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 75 +duration:541 ms 11:52:43.078011 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F0000000EB.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 84 +duration:1547 ms 11:52:56.286199 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F10000000C.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 84 +duration:1773 ms 11:53:19.821761 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F10000003D.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 94 +duration:2676 ms 11:53:30.398228 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F10000004F.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 101 +duration:2666 ms 11:54:05.693044 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F100000090.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 122 +duration:658 ms 11:54:55.267889 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F1000000E5.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 139 +duration:933 ms 11:55:37.229660 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F200000025.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 163 +duration:2681 ms 11:57:02.550339 openat(AT_FDCWD, "pg_wal/archive_status/00000010000002F200000093.ready", O_WRONLY|O_CREAT|O_TRUNC, 0666) = 197 +``` + +这些WAL文件准备就绪耗时超过500毫秒,有些甚至长达2.6秒。这就是为什么部分事务持续时间超过2秒的原因——事务必须等待WAL文件就绪后才能继续写入。 + +创建WAL文件的过程如下: +1. `stat(pg_wal/00000010000002F200000093)` - 未找到文件 +2. 使用`pg_wal/xlogtemp.129`创建WAL文件 +3. 将`pg_wal/xlogtemp.129`清零 +4. 在(`pg_wal/xlogtemp.129`和`pg_wal/00000010000002F200000093`)之间创建符号链接 +5. 打开`pg_wal/00000010000002F200000093` +6. 在文件末尾写入元数据 +7. 加载并应用WAL文件 + +从PostgreSQL日志判断,当时部分客户端连接被重置,某些事务执行耗时超过10秒。 + + + +```bash +2023-05-22 11:56:08.355 GMT,,,442907,"100.127.12.1:23928",646b5858.6c21b,1,"",2023-05-22 11:56:08 GMT,,0,LOG,08006,"could not receive data from client: Connection reset by peer",,,,,,,,,"" 2023-05-22 11:56:10.427 GMT,,,442925,"100.127.12.1:38942",646b585a.6c22d,1,"",2023-05-22 11:56:10 GMT,,0,LOG,08006,"could not receive data from client: Connection reset by peer",,,,,,,,,"" 2023-05-22 11:56:12.118 GMT,,,442932,"100.127.13.2:41985",646b585c.6c234,1,"",2023-05-22 11:56:12 GMT,,0,LOG,08006,"could not receive data from client: Connection reset by peer",,,,,,,,,"" 2023-05-22 11:56:13.401 GMT,"postgres","pgbenchtest",3549,"::1:45862",646ae5d3.ddd,3430,"UPDATE waiting",2023-05-22 03:47:31 GMT,15/95980531,1420084298,LOG,00000,"process 3549 still waiting for ShareLock on transac tion 1420065380 after 1000.051 ms","Process holding the lock: 3588. Wait queue: 3549.",,,,"while updating tuple (60702,39) in relation ""sbtest44""","UPDATE sbtest44 SET k=k+1 WHERE id=$1",,,"" +``` + +我对比日志后发现,每当WAL段文件需要较长时间完成时,客户端就会生成一批慢查询(>1秒)日志。 + +我在PG内核中清除了WAL文件: + + +2. "PG kernel"译为"PG内核"(PostgreSQL内核) +3. "slow query"译为行业通用术语"慢查询" +4. 保留所有时间单位符号">1s"不变 +5. 保持技术名词首字母大写规范(如WAL/PG)) + +```bash +/* do not use get_sync_bit() here --- want to fsync only at end of fill */ + fd = BasicOpenFile(tmppath, open_flags); + if (fd < 0) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not create file \"%s\": %m", tmppath))); + + pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_WRITE); + save_errno = 0; + if (wal_init_zero) + { + ssize_t rc; + + /* + * Zero-fill the file. With this setting, we do this the hard way to + * ensure that all the file space has really been allocated. On + * platforms that allow "holes" in files, just seeking to the end + * doesn't allocate intermediate space. This way, we know that we + * have all the space and (after the fsync below) that all the + * indirect blocks are down on disk. Therefore, fdatasync(2) or + * O_DSYNC will be sufficient to sync future writes to the log file. + */ + rc = pg_pwrite_zeros(fd, wal_segment_size, 0); // buffer write + + if (rc < 0) + save_errno = errno; + } + else + { + /* + * Otherwise, seeking to the end and writing a solitary byte is + * enough. + */ + errno = 0; + if (pg_pwrite(fd, "\0", 1, wal_segment_size - 1) != 1) + { + /* if write didn't set errno, assume no disk space */ + save_errno = errno ? errno : ENOSPC; + } + } + pgstat_report_wait_end(); + + if (save_errno) + { + /* + * If we fail to make the file, delete it to release disk space + */ + unlink(tmppath); + + close(fd); + + errno = save_errno; + + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write to file \"%s\": %m", tmppath))); + } + + pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_SYNC); + if (pg_fsync(fd) != 0) // fsync data to disk + { + save_errno = errno; + close(fd); + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not fsync file \"%s\": %m", tmppath))); + } + pgstat_report_wait_end(); +``` + +如代码所示,清理操作始于异步写入,每次写入针对一个页块执行,直至整个循环完成。随后执行一次fsync操作。 + +通常情况下,异步写入速度较快,在系统负载较低时响应时间可达微秒级。但在高系统负载下,异步I/O操作的延迟可能超过30毫秒。这与操作系统内核中的I/O路径密切相关。当内存压力较大时,异步写入可能会转变为同步写入。此外,I/O过程与页回收的慢路径相互交织,理论上会导致较长的持续时间。这一现象在实际跟踪记录中确实被观测到。 + +以下是监控到的两次连续清理操作,其中两次异步IO操作之间的间隔超过了30毫秒: + +```bash +11:56:57.238340 write(3, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 8192) = 8192 +11:56:57.271551 write(3, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 8192) = 8192 +``` + +磁盘带宽如下: + +Disk bandwidth + +对于一个16MB的WAL段,如果需要进行2000次操作且每次耗时1毫秒,那么完成整体清零至少需要2秒。 + +例如: + +```bash +# I traced a PostgreSQL backend process that was executing a transaction. It took 1.5s to wait for the lock. +02:27:52.868356 recvfrom(10, "*\0c\304$Es\200\332\2130}\32S\250l\36\202H\261\243duD\344\321p\335\344\241\312/"..., 92, 0, NULL, NULL) = 92 +02:27:52.868409 getrusage(RUSAGE_SELF, {ru_utime={tv_sec=232, tv_usec=765624}, ru_stime={tv_sec=59, tv_usec=963504}, ...}) = 0 +02:27:52.868508 futex(0x7f55bebf9e38, FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, 0, NULL, FUTEX_BITSET_MATCH_ANY) = 0 +02:27:54.211960 futex(0x7f55bebfa238, FUTEX_WAKE, 1) = 1 +02:27:54.215049 write(2, "\0\0\36\1\377\334\23\0T2023-05-23 02:27:54.215"..., 295) = 295 +02:27:54.215462 getrusage(RUSAGE_SELF, {ru_utime={tv_sec=232, tv_usec=765773}, ru_stime={tv_sec=59, tv_usec=963504}, ...}) = 0 +``` + +对应的 SQL 语句为: + +```bash +2023-05-23 02:27:54.215 GMT,"postgres","pgbenchtest",1301759,"::1:56066",646c1ef3.13dcff,58,"SELECT",2023-05-23 02:03:31 GMT,43/198458539,0,LOG,00000,"duration: 1346.558 ms execute sbstmt-13047857631771152290: SEL ECT c FROM sbtest39 WHERE id=$1","parameters: $1 = '1001713'",,,,,,,,"" +``` + +至此可以得出结论:TPS降至0和CPU波动与WAL清零操作有关。其机制如下: + +WAL创建 -> WAL清零 -> 脏页刷写与清零过程的I/O争用 -> 事务执行时间延长 -> 锁持有时间增加 -> 更多事务被阻塞 -> 事务超时。 + +TPS drops mechanism + +清零操作的最大问题在于会产生大量IO,且所有事务必须等待新WAL文件准备就绪后才能同步数据。在此过程中,所有事务都需等待WALWrite和wal_insert锁,这是造成性能抖动的最主要原因。 + +由于问题的根本原因仍是IO争用,若IO负载较低且清零速度相对较快,抖动就不会如此显著,问题也不会暴露。目前严重抖动仅出现在压力测试期间,因此在前几轮测试中增加IO带宽确实有助于缓解TPS下降和CPU问题。 + +此外,由于创建新WAL文件时需要加锁,调整WAL文件大小以降低加锁频率也是解决方案之一。 + +## 第六轮压力测试:禁用 wal_init_zero + +现在我尝试解决这个问题。 + +清零WAL日志依赖于WAL日志槽是否正常工作,这是一种次优但某种程度上有效的方法。理想情况下WAL日志应该是自描述的,不应依赖清零来确保正确性。不过这种解决方案需要修改PG内核,这并不现实。另一种方法是通过文件系统清除WAL日志,而无需显式的PG内核调用。此解决方案要求文件系统支持此功能。 + +ZFS和XFS恰好具有这种COW特性。更多细节请参考[Reddit上的这个回答](https://www.reddit.com/r/bcachefs/comments/fhws6h/the_state_of_linux_cow_file_systems_what_to_choose/?rdt=58971)。 + +由于测试使用的EXT4文件系统没有这个特性,我切换到了ZFS文件系统。 + +然而在ZFS测试过程中,我多次遇到文件系统挂起的情况: + +```bash +root@pgclusterzfs-postgresql-0:~# cat /proc/4328/stack +[<0>] zil_commit_impl+0x105/0x650 [zfs] +[<0>] zfs_fsync+0x71/0xf0 [zfs] +[<0>] zpl_fsync+0x63/0x90 [zfs] +[<0>] do_fsync+0x38/0x60 +[<0>] __x64_sys_fsync+0x10/0x20 +[<0>] do_syscall_64+0x5b/0x1d0 +[<0>] entry_SYSCALL_64_after_hwframe+0x44/0xa9 +[<0>] 0xffffffffffffffff +``` + +出于稳定性考虑,我选择了XFS文件系统并设置`wal_init_zero=off`。为了降低WAL日志文件的创建频率,同时将`wal_segment_size`从16MB调整为1GB,从而减少锁争用频率。 + +这次调整后,TPS下降和CPU抖动现象得到了显著缓解。 + +Disable wal_init_zero + +虽然避免清零操作和降低锁频率已见成效,但在检查点期间,脏页刷盘和WAL日志写入仍会引发带宽和锁资源的竞争,导致性能波动。为进一步优化该问题,我将重点转向减少单个事务的IO量。 + +出于数据安全考虑,之前的压力测试都启用了`full_page_write`功能,这是为了确保在因断电可能导致数据块损坏时仍能恢复数据。具体原理可参考[这篇文章](http://mysql.taobao.org/monthly/2015/11/05/)。如果存储设备能保证原子写入(不会出现部分成功/失败),或者PostgreSQL可以通过备份集恢复(完整的基础数据+增量WAL重放),那么在保证数据安全的前提下,或许可以考虑禁用`full_page_write`功能。 + +## 第七轮压力测试:禁用 full_page_write +本次测试中,在禁用 `full_page_write` 前后,CPU 和 IO 带宽表现出了截然不同的性能特征。 + +CPU performance with full_page_write off +IO bandwidth performance with full_page_write off + +可以看出 IO 争用对 PG 产生了显著影响,即使在禁用 `full_page_write` 后,检查点期间的 CPU 抖动也几乎消失。 + +随后我又进行了三组对比测试: +1. 启用 `full_page_write` 并设置 16MB WAL 段大小 +2. 启用 `full_page_write` 并设置 1GB WAL 段大小 +3. 禁用 `full_page_write` 并设置 1GB WAL 段大小 + +KubeBlocks PG wal_segment 1GB vs 16MB + +当 `full_page_write` 启用时,1GB 段大小相比 16MB 略有性能提升,这验证了增大段尺寸确实能降低锁竞争频率。而在禁用 `full_page_write` 后,PG 性能表现非常优异。 + +最终我选择以下组合配置进行测试: +(`wal_init_zero off` + XFS) + (`full_page_write` 禁用) + (`wal_segment_size` 1GB) +测试结果如下: + +CPU performance +Disk R/W data performance + +在检查点期间系统运行非常平稳,没有任何抖动。PG 也从 IO 瓶颈转变为 CPU 瓶颈。至此可以确定,问题的核心在于 PG 的锁机制。 + +## 第八轮压力测试:最终性能对比 +然而根据我的经验,PG作为进程模型(一个会话对应一个进程),在并发较高时会产生较大的页表和进程上下文切换开销,因此需要引入pgBouncer。用户自建ECS PG实例时通常会开启Huge Pages来解决并发问题,而KubeBlocks PG部署在ACK上并未启用Huge Page。 + +为保证测试公平性,我在后续测试中为KubeBlocks开启了`full_page_write`参数。 + +KubeBlocks PG vs ECS PG throughput +引入pgBouncer后,PG能够处理更多连接且性能无明显下降。KubeBlocks PG与PG表现相当,但在低并发场景下略胜一筹,整体稳定性更优。 + +KubeBlocks PG vs ECS PG latency + + + +## 结论 +1. 清零 WAL 段对 PG 的性能和稳定性有显著影响。若文件系统支持清零操作,可关闭 `wal_init_zero` 选项以有效降低 CPU 和 TPS 波动。 +2. `full_page_write` 同样对 PG 的性能和稳定性影响较大。若存储或备份方案能确保数据安全,可考虑关闭该功能以有效减少 CPU 和 TPS 抖动。 +3. 增大 WAL 段尺寸可降低日志轮换时的锁竞争频率,也能缓解 CPU 和 TPS 抖动,但效果可能不如前两项显著。 +4. PG 采用多进程模型。引入 pgBouncer 可支持更大并发连接数并显著提升稳定性。若条件允许,启用 Huge Page 也能达到类似效果,尽管原理不同。 +5. 默认情况下 PG 受 IO 限制,但经过上述优化后会转为受 CPU 限制。 +6. ACK 和 SLB 网络表现稳健,满足性能与稳定性需求。 +7. K8s 可便捷调整文件系统及 PG 参数,并能快速有效地进行不同组合测试。此外,在 K8s 上运行数据库不会造成性能降级,经过常规调优即可呈现优异表现。K8s 为用户提供了更多自由度和自主权,限制更少。 + + +- Huge Page = 大页 +- IO-bound = IO 密集型 +- CPU-bound = CPU 密集型 +- ACK = 阿里云容器服务 +- SLB = 负载均衡服务 +- K8s = Kubernetes) \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-7-0.mdx b/blogs/zh/announcing-kubeblocks-v0-7-0.mdx new file mode 100644 index 00000000..b604d17a --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-7-0.mdx @@ -0,0 +1,69 @@ +--- +date: 2023-11-06 +description: 本博客介绍KubeBlocks v0.7.0版本的主要变更。 +image: /img/blogs/thumbnails/blog-banner.png +slug: announcing-kubeblocks-v0-7-0 +tags: +- KubeBlocks v0.7.0 +- release notes +title: KubeBlocks v0.7.0 正式发布 +--- +# KubeBlocks v0.7.0 正式发布 + +我们很高兴地宣布 KubeBlocks v0.7.0 正式发布! + +在此版本中,KubeBlocks 已支持 31 种开源数据库引擎,新增了包括 MariaDB、Elasticsearch、Pulsar 和 Pika 等组件。这为 Kubernetes 用户提供了更广泛的选择,同时保持了相同的用户体验。 + +## 核心亮点 + +### 外部组件 + +某些数据库集群依赖元数据存储来实现分布式协调和动态配置。然而,随着数据库集群数量的增加,元数据存储本身可能消耗大量资源。例如 Pulsar 中的 Zookeeper 就属于这类组件。为降低开销,用户现在可以在多个数据库集群中引用同一个外部组件。 + +### 备份 API + +数据库集群的部分生命周期管理功能依赖于备份恢复能力,而该能力又依赖对象存储。若对象存储缺失,KubeBlocks 的某些生命周期管理功能可能无法正常工作。例如创建新副本或将数据恢复到其他节点等操作可能受到影响。 + +为解决此问题,我们计划将集群生命周期管理功能与备份恢复能力解耦。第一步是分离 API。通过新的备份 API,备份和恢复操作被抽象化,允许用户自定义备份方式。此外,该 API 现已支持 GCS、OBS 和 COS 对象存储。 + +### Pika v3.5 + +Pika 是由奇虎开发的开源 NoSQL 数据库,支持 Redis 协议,在处理超过 100GB 数据量时具有成本优势。从 Redis 迁移到 Pika 的过程非常平滑,因为 Pika 保留了相同的操作模式和使用习惯,确保对现有工作流的影响最小。截至目前,KubeBlocks 已支持部署 Pika v3.5 分片集群。 + +## 集成引擎概览 + +下表概述了 KubeBlocks 中集成的引擎及其功能。 + +| v0.7.0 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 备份/恢复 | 日志 | 配置 | 升级(数据库引擎版本) | 账户 | 故障转移 | 主从切换 | 监控 | +|---------------------------------------|--------|--------|--------------|------------|---------|----------------|------|--------|-----------------------------|---------|----------|------------|---------| +| apecloud-mysql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | ✔️ | +| postgresql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| redis | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | ✔️ | ✔️ | 不适用 | ✔️ | +| mongodb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用 | ✔️ | ✔️ | ✔️ | +| kafka | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| pulsar | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| weaviate | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| qdrant | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| greptimedb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| nebula | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| risingwave | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| starrocks | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| etcd | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| oceanbase | | ✔️ | ✔️ | 不适用 | 不适用 | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| foxlake | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| orioledb | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| oracle-mysql | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | ✔️ | 不适用| ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| official-postgresql | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| mysql (主从复制) | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| openldap | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| neon | ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| opensearch | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| vllm | 不适用 | 不适用 | 不适用 | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| ggml | | 不适用 | 不适用 | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| milvus | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| elasticsearch | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| tdengine | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| clickhouse | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| PolarDB-X | ✔️ | ✔️ | 不适用 | ✔️ | 不适用 | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | ✔️ | +| Zookeeper | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | ✔️ | ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | +| MariaDB | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | 不适用 | 不适用| 不适用 | 不适用 | 不适用 | \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-8-0.mdx b/blogs/zh/announcing-kubeblocks-v0-8-0.mdx new file mode 100644 index 00000000..9c22a9ea --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-8-0.mdx @@ -0,0 +1,105 @@ +--- +date: 2024-01-12 +description: 本博客介绍KubeBlocks v0.8.0版本的主要变更。 +image: /img/blogs/thumbnails/blog-banner.png +slug: announcing-kubeblocks-v0-8-0 +tags: +- KubeBlocks v0.8.0 +- release notes +title: KubeBlocks v0.8.0 正式发布 +--- +# KubeBlocks v0.8.0 正式发布 + +激动人心的消息!KubeBlocks v0.8.0 正式发布了!🚀 🎉 🎈 + +KubeBlocks v0.8.0 在用户体验方面迈出了一大步,引入了组件级 API,这使得标准化构建块变得更小且更易于复用。 + +例如,ETCD 和 ZK 等广泛用于各类数据库集群的元数据库组件,经过组件模块化后现在可以直接引用。 + +我们还使 Vitess Proxy 成为了标准组件,这样开发者在定义各种发行版中 MySQL 或 PostgreSQL 引擎的读写分离拓扑时,就无需重复工作(繁重任务)。 + +此外,Add-on 机制也得到了显著改进。数据库引擎的 Helm Chart 已从 KubeBlocks 代码库中分离出来。从此,数据库引擎或版本的变更将与 KubeBlocks 发布解耦。 + +## 核心亮点 + +### 独立组件 API + +在集成新数据库引擎时,我们发现 KubeBlocks 的抽象设计存在不足。v0.8.0 版本将 Component 从 Cluster 定义中拆分出来,以更好地支持具有多组件的数据库类型。它支持组件之间的变量引用,包括 ConfigMap、Secret、Service、ServiceReference 等变量引用类型,可以更好地连接组件间的关系,为构建不同拓扑结构的集群奠定基础。 + +### 从 KubeBlocks 仓库移除插件 Helm Chart + +在之前的版本中,数据库引擎的 Helm Chart 位于 "deploy" 目录中,与 KubeBlocks Operator 紧密耦合。但这导致了两个问题:首先,升级 KubeBlocks 会触发数据库引擎的升级;其次,升级数据库引擎会覆盖现有的 CD/CV,导致所有集群重启。 + +为解决这些问题,KubeBlocks v0.8.0 将数据库引擎放入名为 "kubeblocks-addon" 的独立仓库中,并为数据库引擎及相关资源添加了版本号。这确保新安装不会覆盖现有资源,从而避免集群重启。同时 KubeBlocks 提供了 "kbcli addon" 命令,允许用户下载、安装、使用和卸载特定引擎版本。 + +### 支持多版本数据库引擎定义 + +在 v0.8.0 之前,KubeBlocks 升级可能会触发数据库集群重启。KubeBlocks v0.8.0 通过新的 Component API 和插件 Helm Chart 存储机制,在一定程度上解决了该问题。我们将在未来持续优化多版本设计,最终实现无负担升级。 + +## 变更内容 + +### 新功能 + +#### Pika + +支持多个 Pulsar 集群共享一个 zookeeper 组件。 + +#### Clickhouse + +集成监控、水平扩展和高可用功能。 + +#### Oceanbase + +新增主备集群模式,支持完整生命周期管理,并集成备份、恢复、监控和切换功能。 + +#### MySQL + +- MySQL 5.7 和 8.0 社区版支持完整生命周期管理,包括备份恢复、监控和高可用。 +- 在 ApeCloud MySQL 上新增日志审计功能。 + +#### PostgreSQL + +支持 wal-g 全量备份和 PITR(时间点恢复)。 + +#### OpsRequest + +支持自定义 OpsRequest,可执行特定操作动作。例如创建和删除 Kafka 主题。 + +#### NodePort + +为 Redis 启用 NodePort 访问方式。 + +### 兼容性 + +兼容重构前的 ClusterDefinition 和 ClusterVersion API。 + +### 易用性 + +- OpsRequest 支持任务排队,允许用户一次性提交多个任务。对于无法并发执行的任务,系统会在前一个任务完成后自动执行下一个任务。 +- 在 KubeBlocks 安装过程中,现在可以指定镜像仓库地址,加速镜像拉取。 + +### 可观测性 + +统一日志和指标收集的配置管理。 + +### API + +- 在 ComponentDefinition 中添加接口定义 +- 新增 OpsDefinition API +- 为 ActionSet 添加 PreDelete 动作。可在删除备份前执行该动作 + +### 稳定性 + +稳定性测试及相关改进。[进行中] KubeBlocks 故障测试。 + +### kbcli + +增强 addon 子命令功能。可从索引仓库安装插件。 + +## 不兼容变更 + +- 在 KubeBlocks 0.8.0 中,我们对 OceanBase 进行了功能增强(新增主备集群创建、支持主机网络和动态端口、支持备份恢复、监控、日志等功能)。v0.7.0 版本创建的集群与 v0.8.0 版本不兼容,因此如果您正在使用 v0.7.0 管理 OceanBase,建议升级至 v0.8.0。请先升级 KubeBlocks,再升级 OceanBase Addon。建议使用 OceanBase 官方数据导入导出工具(OBLOADER 和 OBDUMPER)进行数据迁移。 + +- KubeBlocks 0.8.0 精简了部署 KubeBlocks 时默认安装的数据引擎,移除了 greptime、influxdb、neon、oracle-mysql、oroledb、tdengine、mariadb、nebula、risingwave、starrocks、tidb、zookeeper。您可以通过 kbcli addon 子命令或 kubectl apply 命令从 addon 索引仓库按需安装;如果是从低版本升级,请遵循升级手册操作,避免删除正在使用的 addon,这可能会影响运行中的集群。 + +- KubeBlocks 0.8.0 的 Helm Chart 不再包含依赖的 CRD。使用 helm 命令安装或升级 KubeBlocks 时,需要先安装对应的 CRD 再安装或升级 KubeBlocks,详情请参考升级手册。 \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-9-0.mdx b/blogs/zh/announcing-kubeblocks-v0-9-0.mdx new file mode 100644 index 00000000..9859e0b7 --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-9-0.mdx @@ -0,0 +1,149 @@ +--- +date: 2024-07-09 +description: 本博客介绍KubeBlocks v0.9.0版本的主要变更。 +image: /img/blogs/thumbnails/blog-release-0.9.png +slug: announcing-kubeblocks-v0-9-0 +tags: +- KubeBlocks v0.9.0 +- release notes +title: KubeBlocks v0.9.0 正式发布 +--- +# KubeBlocks v0.9.0 版本发布公告 + +我们非常高兴地宣布 KubeBlocks v0.9.0 版本正式发布,这标志着我们向备受期待的 v1.0 版本又迈进了一步。该版本引入了多项重大改进和新特性,全面提升了 KubeBlocks 平台的功能性和用户体验。 + + + +## API 亮点 + +- 在 KubeBlocks v0.9 中,随着 KubeBlocks 引入拓扑支持,集群构建体验变得更加灵活直观,就像用积木搭建集群一样。ClusterDefinition API 新增了 `topologies` 字段,允许开发者提供具有不同拓扑结构的多种部署模式。数据库用户可以在创建 Cluster 时通过 `topology` 字段选择拓扑结构。例如,Redis Addon 提供了三种拓扑:Standalone(单机)、Replication(复制)和 Proxy(代理)。Standalone 拓扑仅包含一个 Component - RedisServer,Replication 拓扑包含 RedisServer 和 Sentinel 两个 Components,而 Proxy 拓扑则额外添加了第三个 Component,例如 Twemproxy。 +- KubeBlocks 现在支持管理分布式数据库的水平扩展(Reshard)。您可以用一个 Component 表示一个水平分片,并通过增加或减少 Components 来扩展或收缩这个水平分片。这种扩展能力也将用于 Redis 和 Pika 的分布式部署。 +- KubeBlocks 现在使用 InstanceSet 替代 StatefulSet 来管理 Pods。InstanceSet 支持指定 Pod 下线以及 Pod 原地更新,并且在数据库复制架构中主从数据库可以采用不同的 Pod 规格(StatefulSet 不支持这些特性)。 +- 开发者可以为 Components 添加更多自定义事件处理器!v0.8 引入的 ComponentDefinition API 包含 `lifeCycleActions` 字段,允许您定义各种自定义事件处理器。在此基础上,KubeBlocks v0.9 为自定义 addon 实现提供了更多处理器,包括 roleprobe(节点角色探测)、memberLeave(节点下线)、preTerminate(Component 下线)和 postProvision(Component 上线)。事件处理器的扩展增强了 KubeBlocks 的表达能力。例如,preTerminate 和 postProvision 可用于在分布式数据库中执行跨分片数据重分配(Rebalance),或向 Sentinel 和 Orchestrator 等第三方 HA 管理器发起注册。 + +## 插件亮点 + +- KubeBlocks 支持 Redis 集群模式(分片模式) + + Redis 集群模式旨在提供水平写入扩展能力和智能客户端高可用策略,同时具备出色的故障转移能力。Redis 集群将数据分布在多个 Redis 节点上,显著提升系统容量、性能和可用性。 + +- KubeBlocks 引入 MySQL 复制模式 + + 相比 MGR 集群,MySQL 复制拓扑结构所需资源更少(仅需两个数据库副本)且数据复制开销更低。当对服务可用性和数据可靠性没有极端要求时,复制拓扑是更具成本效益的选择。您可以使用 `kbcli` 主动切换 MySQL 副本角色,或通过 `kubectl` 删除指定的 Kubernetes Pod 触发被动故障转移。如果没有长事务和大表 DDL 操作,故障转移通常可在 30 秒内完成。 + +## 变更内容 + +### 新功能 + +#### KubeBlocks + +- ClusterDefinition API + - 支持拓扑 API,允许开发者自定义多种拓扑结构。 +- Cluster API + - 支持 ShardingSpec API。 + - 支持分片扩缩容。 +- ComponentDefinition API + - `lifecycleActions` API 支持用户自定义操作动作,包括 roleprobe、memberLeave、preTerminate、postProvision。 + - 新增 Vars API 用于引用实例相关的动态资源和信息,包括 secret、service 和 service 引用。 + - 支持 Vars API。 + - 支持跨 Component 的 Vars 引用。 + - 优化 ServiceRef 引用。 + - 支持动态配置,在垂直扩缩容或水平扩缩容后重新生成指定变量。 +- Component + - 支持删除 Component。 + - 支持 ComponentVersion。 +- InstanceSet API + - InstanceSet 替代 StatefulSet 来管理 Pod。 + - 支持实例模板。 + - 支持指定实例缩容。 + - 支持原地更新。 +- OpsRequest API + - 支持重建故障备份实例。 + - 支持 force 标志控制并发。 + - 支持自定义多任务顺序执行。 +- 支持 NodeCountScaler。 +- 支持 PITR(时间点恢复)。 +- 支持跨 Namespace 恢复。 + +#### kbcli + +- 支持 PostgreSQL 时间点恢复。 +- `cluster` 支持 `rebuild-instance` 子命令以重建实例。 +- `cluster create` 子命令支持 `elasticsearch`。 +- 支持在创建备份仓库时指定路径前缀。 + +#### 插件 + +##### Redis + +- 支持官方 Redis Cluster 拓扑。 +- 增强 Redis 的功能和稳定性。 + - 适配 ComponentDefinition 和 ComponentVersion 等新 API,支持多种拓扑形式。 + - 优化 Redis Replication Cluster 初始化逻辑,移除对 DownwardAPI 的依赖。 + - 支持 Redis v7.2.4。 +- Redis 分片集群支持备份和恢复。 + +##### MySQL + +- 新增开源组件 Orchestrator Addon 用于管理 MySQL。 + +##### PostgreSQL + +- 支持 PostgreSQL PITR。 +- 支持 PostgreSQL v15.7。 + +##### Qdrant + +- Qdrant 分片集群支持备份和恢复。 + +##### MogDB + +- 支持在 v5.0.5 中创建、扩缩容、备份和切换 MogDB Replication Cluster。 + +##### ElasticSearch + +- 支持 Elasticsearch v7.7.1、v7.10.1 和 v8.8.2。 + +##### Pulsar + +- 支持 v3.0.2。 +- 支持 NodePort。 + +##### VictoriaMetrics + +- 支持 VictoriaMetrics v1.100.1。 + +### Release 0.9 中的 API 弃用及其他变更 + +- ConfigConstraint API 稳定并从 v1alpha1 升级到 v1beta1。 +- StorageProvider 的组变更,从 `storage.kubeblocks.io` 迁移到 `dataprotection.kubeblocks.io`。 +- ClusterVersion v1alpha1 CRD 将在 Release 1.0 中移除。 +- ComponentClassDefinition v1alpha1 CRD 将在 Release 1.0 中移除。 +- ComponentResourceConstraint v1alpha1 CRD 将在 Release 1.0 中移除。 +- ClusterDefinition API + - `type`、`componentDefs`、`connectionCredential` 将在 Release 1.0 中移除。 +- Cluster API + - 调度:`tenancy` 和 `availabilityPolicy` 将在 Release 1.0 中移除。 + - API 简化:`replicas`、`resources`、`storage` 和 `network` 将在 Release 1.0 中移除。 +- ComponentDefinition API + - switchPolicy 将在 Release 1.0 中移除。相同功能可通过 `componentDefinition.spec.lifecycleActions.switchover` API 实现。 +- ServiceRef API + - `cluster` 将在 Release 1.0 中移除。相同功能可通过 `serviceRef.clusterServiceSelector` 实现。 + +此外,所有引用上述 API 的字段也被标记为弃用,并将在 Release 1.0 中移除。 + +- `clusterVersionRef` +- `componentDefRef` +- `classDefRef` + +KubeBlocks Release 0.9 仍保持与标记为弃用的 API 的兼容性。 + +### 弃用功能 + +#### kbcli 弃用功能 + +- 移除 `bench`、`fault` 和 `migration` 子命令以精简功能。 + +## 升级至 v0.9 + +请参阅 [升级至 KubeBlocks v0.9](https://kubeblocks.io/docs/release-0.9/user_docs/installation/upgrade/upgrade-kubeblocks-to-0.9)。 \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-9-1.mdx b/blogs/zh/announcing-kubeblocks-v0-9-1.mdx new file mode 100644 index 00000000..f31ba38a --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-9-1.mdx @@ -0,0 +1,96 @@ +--- +date: 2024-10-18 +description: 本博客介绍KubeBlocks v0.9.1版本的主要变更。 +image: /img/blogs/thumbnails/blog-release-0.9.1.png +slug: announcing-kubeblocks-v0-9-1 +tags: +- KubeBlocks v0.9.1 +- release notes +title: KubeBlocks v0.9.1 正式发布 +--- +# KubeBlocks v0.9.1 版本发布公告 + +我们很高兴地宣布 KubeBlocks v0.9.1 正式发布! + +在此版本中,KubeBlocks 进一步优化了其 API 和插件功能,为您带来新特性和更好的用户体验。本次发布包含通过 Cluster API 启停集群、OpsRequest 中的实例重建能力、Redis 的时间点恢复(PITR)和基于键的恢复等新功能。我们还修复了一些错误并进行了多项改进以增强整体功能。 + +阅读完整[版本说明](https://kubeblocks.io/docs/preview/user_docs/upgrade/upgrade-with-kbcli/upgrade-kubeblocks-to-0.9.1)并升级至 KubeBlocks v0.9.1,探索更多功能! + +## 亮点功能 + +### KubeBlocks + +- 支持通过 Cluster API 启停集群 + + 该特性提供了新选项以满足不同场景下的多样化需求。 + +- 增强 OpsRequest 中的实例重建能力 + + 结合 KubeBlocks 的 [InstanceSet](https://kubeblocks.io/blog/instanceset-introduction),该功能显著提升了系统在故障场景下的恢复能力。 + +### 插件 + +- Redis + + 支持时间点恢复(PITR)和基于键的恢复。 + +- ZooKeeper + + 支持备份功能。 + +- 新版本支持 + + MySQL 和 PostgreSQL 插件支持更多版本。有关插件最新版本信息,请参阅[插件列表](https://github.com/apecloud/kubeblocks-addons?tab=readme-ov-file#supported-add-ons)。 + +## 变更内容 + +### 新特性 + +#### KubeBlocks + +- OpsDefinition 和 BackupPolicyTemplate 支持组件名前缀和正则匹配 [#8174](https://github.com/apecloud/kubeblocks/pull/8174) + + OpsDefinition 和 BackupPolicyTemplate 现在支持组件名前缀和正则表达式匹配,提供更大灵活性。 + +- 高可用性(HA)记录 [#8089](https://github.com/apecloud/kubeblocks/pull/8089) + + KubeBlocks 支持 HA 记录,增强容错能力和系统可靠性。 + +- 支持通过 Cluster API 启停集群 [#7783](https://github.com/apecloud/kubeblocks/pull/7783) + + KubeBlocks 支持通过 Cluster API 启停集群,简化集群管理。 + +- 支持实例重建时的水平扩展 [#7710](https://github.com/apecloud/kubeblocks/pull/7710) + + 您可以通过 OpsRequest API 中的 inPlace 字段选择实例重建方式:原地重建或远程重建。 + +- 自动清理失败的 OpsRequests [#7796](https://github.com/apecloud/kubeblocks/pull/7796) + + KubeBlocks 新增自动清理失败 OpsRequests 的机制,优化资源管理。 + +- 备份失败时的日志收集 [#8208](https://github.com/apecloud/kubeblocks/pull/8208) + + KubeBlocks 支持在备份操作失败时收集日志。 + +#### 插件 + +- Redis 插件 + - 单机副本的时间点恢复(PITR)[#7998](https://github.com/apecloud/kubeblocks/pull/7998) + - Redis 基于键的恢复 [#8129](https://github.com/apecloud/kubeblocks/pull/8129) +- 支持 Loki [#707](https://github.com/apecloud/kubeblocks-addons/pull/707) +- 支持 MinIO [#926](https://github.com/apecloud/kubeblocks-addons/pull/926) +- 支持 RabbitMQ [#746](https://github.com/apecloud/kubeblocks-addons/pull/746) +- 支持 MySQL 8.4 [#987](https://github.com/apecloud/kubeblocks-addons/pull/987) +- 支持 PostgreSQL 16 [#973](https://github.com/apecloud/kubeblocks-addons/pull/973) +- ZooKeeper 插件 + - 支持备份功能 [#794](https://github.com/apecloud/kubeblocks-addons/pull/794), [#851](https://github.com/apecloud/kubeblocks-addons/pull/851) + +### 其他改进 + +- ComponentDefinition 不可变性检查 + + KubeBlocks 为 ComponentDefinition 添加了默认的不可变性检查,避免意外修改,增强系统稳定性。 + +- 移除 Application 插件 (#7866) + + Application 插件已从代码库中移除,默认不再安装。如有需要,您仍可手动安装。 \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-9-2.mdx b/blogs/zh/announcing-kubeblocks-v0-9-2.mdx new file mode 100644 index 00000000..42a1859a --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-9-2.mdx @@ -0,0 +1,54 @@ +--- +date: 2024-12-03 +description: 本博客介绍KubeBlocks v0.9.2版本的主要变更。 +image: /img/blogs/thumbnails/blog-release-0.9.2.png +slug: announcing-kubeblocks-v0-9-2 +tags: +- KubeBlocks v0.9.2 +- release notes +title: KubeBlocks v0.9.2 正式发布 +--- +# 宣布 KubeBlocks v0.9.2 发布 + +我们很高兴地宣布 KubeBlocks v0.9.2 版本正式发布。本次更新包含多项新功能、错误修复以及各种改进。 + +阅读完整[发布说明](https://kubeblocks.io/docs/release-0.9/user_docs/upgrade/upgrade-with-kbcli/upgrade-kubeblocks-to-0.9.1)并[升级至 KubeBlocks v0.9.2](https://kubeblocks.io/docs/release-0.9/user_docs/upgrade/upgrade-with-kbcli/upgrade-kubeblocks-to-0.9.1)以探索更多功能!v0.9.2 的升级流程与 v0.9.1 完全相同,只需按照 v0.9.1 的教程操作,将版本号更新为 v0.9.2 即可完成升级。 + +## KubeBlocks + +- 新增容器镜像滚动更新支持,实现最小停机时间的无缝更新 ([#8389](https://github.com/apecloud/kubeblocks/pull/8389)) +- 引入组件级停止/启动功能,实现对集群组件的细粒度控制 ([#8480](https://github.com/apecloud/kubeblocks/pull/8480)) +- 增强分片集群的主机网络支持 ([#8517](https://github.com/apecloud/kubeblocks/pull/8517), [#8502](https://github.com/apecloud/kubeblocks/pull/8502)) +- 改进分片集群的水平扩缩容 OpeRequest ([#8530](https://github.com/apecloud/kubeblocks/pull/8530)) +- 新增 Pod 重建更新策略支持,增强更新策略灵活性 ([#8466](https://github.com/apecloud/kubeblocks/pull/8466)) +- KubeBlocks 安装改进:支持定义额外注解和环境变量 ([#8454](https://github.com/apecloud/kubeblocks/pull/8454)) + +## 插件 + +### MySQL + +- 新增 Jemalloc 支持以改进内存管理 ([#1158](https://github.com/apecloud/kubeblocks-addons/pull/1158)) + +### Redis + +- 为 Redis Sentinel 新增 NodePort 通告模式支持 ([#1227](https://github.com/apecloud/kubeblocks-addons/pull/1227)) +- 引入固定 Pod IP、自定义主节点名称和完整 FQDN 域名支持 ([#1222](https://github.com/apecloud/kubeblocks-addons/pull/1222)) +- 更新 PITR 备份中的用户 ACL 备份频率 ([#1180](https://github.com/apecloud/kubeblocks-addons/pull/1180)) + +### RabbitMQ + +- 新增成员离开操作支持,实现缩容场景 ([#1229](https://github.com/apecloud/kubeblocks-addons/pull/1229)) +- 通过配置约束和基于文件的日志增强 RabbitMQ 配置 ([#1199](https://github.com/apecloud/kubeblocks-addons/pull/1199)) + +### MongoDB + +- 新增主机网络支持 ([#1152](https://github.com/apecloud/kubeblocks-addons/pull/1152)) + +### PostgreSQL + +- 增强原生 PostgreSQL 集成并新增 PostgreSQL 15 支持 ([#1092](https://github.com/apecloud/kubeblocks-addons/pull/1092)) +- 新增 Supabase PostgreSQL 支持 ([#1154](https://github.com/apecloud/kubeblocks-addons/pull/1154)) +Xinference +- 新增 Xinference v0.15.4 支持 ([#1248](https://github.com/apecloud/kubeblocks-addons/pull/1248)) + +您可以查看[完整变更日志](https://github.com/apecloud/kubeblocks/compare/v0.9.1...v0.9.2)。 \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-9-3.mdx b/blogs/zh/announcing-kubeblocks-v0-9-3.mdx new file mode 100644 index 00000000..a64b9231 --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-9-3.mdx @@ -0,0 +1,82 @@ +--- +date: 2025-02-19 +description: 本博客介绍KubeBlocks v0.9.3版本的主要变更。 +image: /img/blogs/thumbnails/blog-release-0.9.3.png +slug: announcing-kubeblocks-v0-9-3 +tags: +- KubeBlocks v0.9.3 +- release notes +title: KubeBlocks v0.9.3 正式发布 +--- +# KubeBlocks v0.9.3 版本发布公告 + +我们很高兴地宣布 KubeBlocks v0.9.3 版本正式发布。本次更新包含多项新功能、错误修复及各类改进。 + +## KubeBlocks 核心功能 + +### 新特性 + +- **成员加入动作**:生命周期动作新增支持 `memberjoin` 操作。 +- **增量备份**:数据保护功能支持增量备份 ([#8757](https://github.com/apecloud/kubeblocks/pull/8757))。 +- **分片集群优化** + - 支持从备份重建分片集群实例 ([#8777](https://github.com/apecloud/kubeblocks/pull/8777))。 + - 支持在 OpsRequest 中进行分片组件切换 ([#8786](https://github.com/apecloud/kubeblocks/pull/8786))。 + +## KubeBlocks 插件 + +### MySQL + +- **功能改进**: + - 支持配置 MySQL 的 `lower_case_table_names` 参数 ([#1335](https://github.com/apecloud/kubeblocks-addons/pull/1335))。 + - 更新 MySQL 配置以支持 `default_time_zone` 参数动态更新 ([#1377](https://github.com/apecloud/kubeblocks-addons/pull/1377))。 +- **问题修复**: + - 修复 MySQL 备份策略未生效的问题 ([#1310](https://github.com/apecloud/kubeblocks-addons/pull/1310))。 + +### PostgreSQL + +- **问题修复**: + - 修复因 WAL 日志备份缺失导致的 PITR(时间点恢复)失败问题 ([#1280](https://github.com/apecloud/kubeblocks-addons/pull/1280))。 + - 解决 PITR 恢复失败问题 ([#1290](https://github.com/apecloud/kubeblocks-addons/pull/1290))。 + - 优化 WAL 日志归档机制。 + +### Redis + +- **功能改进**: + - 更新 Redis 版本以修复 [CVE-2024-46981](https://access.redhat.com/security/cve/cve-2024-46981) 漏洞 ([#1405](https://github.com/apecloud/kubeblocks-addons/pull/1405))。 + - 支持 Redis 自定义密钥密码 ([#1406](https://github.com/apecloud/kubeblocks-addons/pull/1406))。 + - 支持为 Redis Sentinel 配置 `storageClassName` 参数 ([#1418](https://github.com/apecloud/kubeblocks-addons/pull/1418))。 +- **问题修复**: + - 修复 Redis 集群分片扩容时的 FQDN 解析问题 ([#1283](https://github.com/apecloud/kubeblocks-addons/pull/1283))。 + +### MongoDB + +- **功能改进**: + - 新增 MongoDB 版本支持:v5.0.30/6.0.20/7.0.16/8.0.4 ([#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431))。 + - 更新 MongoDB 集群创建示例 ([#1363](https://github.com/apecloud/kubeblocks-addons/pull/1363))。 + +### ClickHouse + +- **功能改进**: + - 新增 ClickHouse 重配置示例 ([#1401](https://github.com/apecloud/kubeblocks-addons/pull/1401))。 + - 修复 ClickHouse 分片初始化问题 ([#1402](https://github.com/apecloud/kubeblocks-addons/pull/1402))。 + - 新增存储配置,修复水平扩展失败问题,并禁用分片功能 ([#1450](https://github.com/apecloud/kubeblocks-addons/pull/1450))。 +- **问题修复**: + - 解决分布式表跨分片查询失败的问题 ([#1411](https://github.com/apecloud/kubeblocks-addons/pull/1411))。 + +### Zookeeper + +- **功能改进**: + - 使用 `nc` 工具替代 Java 实现 Zookeeper 探针检测。 +- **问题修复**: + - 修复备份大小显示异常问题。 + - 解决恢复操作失败问题。 + - 修复快照日志挂载错误。 + +### TiDB + +- **功能改进**: + - 新增支持 TiDB v8.4 版本 ([#1275](https://github.com/apecloud/kubeblocks-addons/pull/1275))。 + +## 升级至 v0.9.3 + +请参考 [升级至 KubeBlocks \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v0-9-4.mdx b/blogs/zh/announcing-kubeblocks-v0-9-4.mdx new file mode 100644 index 00000000..e240689b --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v0-9-4.mdx @@ -0,0 +1,73 @@ +--- +date: 2025-06-30 +description: 本博客介绍KubeBlocks v0.9.4版本的主要变更。 +image: /img/blogs/thumbnails/blog-release-0.9.4.png +slug: announcing-kubeblocks-v0-9-4 +tags: +- KubeBlocks v0.9.4 +- release notes +title: KubeBlocks v0.9.4 正式发布 +--- +# 宣布 KubeBlocks v0.9.4 发布 + +我们很高兴地宣布 KubeBlocks v0.9.4 版本发布。本次发布包含多项新功能、错误修复和各种改进。以下是详细的更新内容。 + +## KubeBlocks + +### 新功能 + +* **集群支持指定 PVC 注解和标签** 通过为不同类型的 PVC 应用不同的注解/标签,CSI 驱动可以为不同存储卷设置不同的 IO 配额。[(#8799)](https://github.com/apecloud/kubeblocks/pull/8799) + +* **添加跳过预终止操作的注解键** 注解:`apps.kubeblocks.io/skip-pre-terminate` [(#9121)](https://github.com/apecloud/kubeblocks/pull/9121) + +* **支持分片组件水平扩展时重新渲染配置** 在分片组件水平扩展后重新渲染配置。[(#9195)](https://github.com/apecloud/kubeblocks/pull/9195) + +## KubeBlocks 插件 + +### MySQL + +* 更新 `innodb_redo_log_capacity` 和 `secure_file_priv` 参数作用域 [(#1510)](https://github.com/apecloud/kubeblocks-addons/pull/1510) [(#1585)](https://github.com/apecloud/kubeblocks-addons/pull/1585) + +### PostgreSQL + +* 修复 PostgreSQL 15 & 16 缺少 `backupPolicy` 的问题 [(#1546)](https://github.com/apecloud/kubeblocks-addons/pull/1546) + +### Redis + +* 改进 Redis 集群的停止-启动流程 [(#1554)](https://github.com/apecloud/kubeblocks-addons/pull/1554) +* 改进成员离开逻辑(不再依赖公告地址)[(#1548)](https://github.com/apecloud/kubeblocks-addons/pull/1548) +* 优化 Redis 主机网络变量 [(#1603)](https://github.com/apecloud/kubeblocks-addons/pull/1603) + +### MongoDB + +* 添加对 MongoDB exporter 的支持 [(#1721)](https://github.com/apecloud/kubeblocks-addons/pull/1721) + +### RabbitMQ + +* 修复 RabbitMQ 启动失败问题 [(#1479)](https://github.com/apecloud/kubeblocks-addons/pull/1479) +* 修复 RabbitMQ 成员离开问题 [(#1657)](https://github.com/apecloud/kubeblocks-addons/pull/1657) + +### ZooKeeper + +* 为 ZooKeeper 添加新的 CMPD 定义 [(#1514)](https://github.com/apecloud/kubeblocks-addons/pull/1514) +* 修复快照日志丢失问题 [(#1509)](https://github.com/apecloud/kubeblocks-addons/pull/1509) +* 添加 `minSessionTimeout` 参数 [(#1535)](https://github.com/apecloud/kubeblocks-addons/pull/1535) +* 修复备份和恢复逻辑 [(#1550)](https://github.com/apecloud/kubeblocks-addons/pull/1550) +* 改进 ZooKeeper 角色探测 [(#1542)](https://github.com/apecloud/kubeblocks-addons/pull/1542) +* 增强 ZooKeeper 跟踪日志 [(#1693)](https://github.com/apecloud/kubeblocks-addons/pull/1693) + +### VictoriaMetrics + +* 支持创建 VictoriaMetrics 集群时使用环境变量 [(#1622)](https://github.com/apecloud/kubeblocks-addons/pull/1622) + +### ClickHouse + +* 持久化 ClickHouse 和 Keeper 的日志文件 [(#1560)](https://github.com/apecloud/kubeblocks-addons/pull/1560) + +## 升级至 v0.9.4 + +参考[升级至 KubeBlocks v0.9.x](https://kubeblocks.io/docs/preview/user_docs/upgrade/upgrade-to-v09-version)。 + +## 完整变更日志 + +您可以查看[完整变更日志](https://github.com/apecloud/kubeblocks/compare/v0.9.3...v0.9.4)获取更多变更详情。 \ No newline at end of file diff --git a/blogs/zh/announcing-kubeblocks-v1-0-0.mdx b/blogs/zh/announcing-kubeblocks-v1-0-0.mdx new file mode 100644 index 00000000..e483d5a4 --- /dev/null +++ b/blogs/zh/announcing-kubeblocks-v1-0-0.mdx @@ -0,0 +1,189 @@ +--- +date: 2025-05-28 +description: 本博客介绍KubeBlocks v1.0.0的主要变更。 +image: /img/blogs/thumbnails/blog-release-1.0.0.png +slug: announcing-kubeblocks-v1-0-0 +tags: +- KubeBlocks v1.0.0 +- release notes +title: KubeBlocks v1.0.0 正式发布 +--- +# KubeBlocks v1.0.0 正式发布 + +我们很高兴地宣布 KubeBlocks 1.0.0 版本正式发布。 + +KubeBlocks 1.0.0 标志着一个重要里程碑,其核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现了重大改进,为生产环境带来了更高的灵活性和可靠性。 + +## 核心亮点 + +### 稳定版 API + +KubeBlocks 0.9 版本引入了灵活的拓扑结构、高级 Pod 管理(InstanceSet)和生命周期钩子等功能。如今这些特性已**正式升级为稳定版本**。 + +以下 CRD 现已升级至 **`v1` 稳定版本**,并获得长期支持: + +**`apps.kubeblocks.io` API 组:** + +* `ClusterDefinition` +* `Cluster` +* `ComponentDefinition` +* `Component` +* `ComponentVersion` +* `ServiceDescriptor` +* `ShardingDefinition` +* `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组:** + +* `InstanceSet` + +### KubeBlocks 功能特性 + +* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 +* **增量备份**:新增增量备份支持,提升性能并降低存储消耗 + +### KubeBlocks 插件亮点 + +* **MySQL 增强**:新增 TLS 支持、基于 ProxySQL 的组复制以及 WAL-G 实现的 PITR(时间点恢复),显著提升安全性和恢复能力 +* **MongoDB PITR 与多版本支持**:为 MongoDB 引入时间点恢复功能并新增版本兼容性 +* **Kafka 改进**:支持外部 ZooKeeper、自定义 Prometheus 指标以及多网络访问,提升灵活性和可观测性 +* **Redis 增强**:新增集群切换、实例重建和外部配置支持,强化运维健壮性 + +--- + +## 变更内容 + +### KubeBlocks + +#### 集群管理 + +* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) +* **动态镜像仓库**:支持动态替换镜像仓库以实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) +* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) +* **Pod 元数据更新**:允许更新底层 Pod 的标签和注解 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) +* **PVC 存储卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) +* **组件定义的 PolicyRules**:添加细粒度的策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) +* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) + +#### 数据保护 + +* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) +* **备份/恢复参数**:确保备份/恢复期间的配置一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) +* **保留最新备份**:新增保留最新备份的选项 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) + +#### 运维 + +* **OpsRequest 验证**:引入验证策略以确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) + +--- + +### KubeBlocks 插件 + +#### MySQL + +* **TLS 支持**:新增安全连接的 TLS 支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) +* **基于 ProxySQL 的组复制**:通过 ProxySQL 集成增强高可用性 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) +* **PITR 支持**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) +* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) + +#### Redis + +* **集群切换与 Sentinel 优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) +* **实例重建**:支持重建 Redis 实例 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) + +#### MongoDB + +* **PITR 支持**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) +* **新版本支持**:新增对 MongoDB 8.0.8 和 8.0.6 版本的支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) + +#### Kafka + +* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) +* **自定义 Prometheus 指标**:允许配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) +* **跳过广告端口解析**:使用 Pod IP 时避免解析 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) +* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) + +#### RabbitMQ + +* **新版本支持**:新增对 RabbitMQ 4.0.9 版本的支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) + +#### ClickHouse + +* **22.9.4 支持**:新增对 ClickHouse 22.9.4 的兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) + +#### TiDB + +* **8.4 版本支持**:新增对 TiDB 8.4 的支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) +* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) + +--- + +### API 正式发布、弃用与移除 + +#### GA (v1) API 升级 + +以下 CRD 现已**升级至 `v1` 并视为稳定版本**,将获得长期支持: + +**`apps.kubeblocks.io` API 组** + +- `ClusterDefinition` +- `Cluster` +- `ComponentDefinition` +- `Component` +- `ComponentVersion` +- `ServiceDescriptor` +- `ShardingDefinition` +- `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组** + +- `InstanceSet` + +> 这些资源的 `v1alpha1` 和 `v1beta1` 版本现已弃用,可能在未来的版本中移除。 + +#### 弃用声明 + +以下 CRD 已被弃用,并将在**后续版本中移除**。请相应迁移您的配置: + +* `ConfigConstraint` +* `Configuration` + +> 这些资源将不再维护或接收更新。 + +#### 新增 Alpha API(实验性) + +新的 **`parameters.kubeblocks.io`** API 组引入了细粒度配置 API: + +* `ComponentParameter` +* `ParamConfigRenderer` +* `Parameter` +* `ParametersDefinition` + +> 这些 API 旨在替代已弃用的 `ConfigConstraint` 和 `Configuration`。 + +#### API 组重组 + +部分 API 已迁移至新的组别以更好地体现其领域职责。请相应更新您的清单: + +| 资源 | 原 API 组 | 新 API 组 | +| ----------------------------- | ------------------- | ----------------------------- | +| `OpsDefinition` / `OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | +| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | + +## 升级说明 + +:::note +请注意,目前不支持从 0.9 版本直接升级到 1.0 版本。 +::: + +我们正在积极开发一个经过充分测试的可靠升级路径,该功能将在即将发布的版本中提供。 + +## 致谢 + +在庆祝 KubeBlocks 1.0.0 发布之际,我们要向所有工程师、贡献者和合作伙伴致以诚挚的谢意,正是你们的努力将该项目塑造成今天的模样。 + +特别感谢快手、中国移动云、唯品会、腾讯和 360 的技术贡献、深度反馈和真实场景用例,这些极大地推动了项目架构、性能和生产就绪度的进步。 + +我们同样深深感激社区贡献者们 —— 你们的代码提交、问题报告、讨论和评审,为项目的高质量发展和创新提供了关键动力。 + +1.0.0 版本标志着我们在构建健壮的云原生数据库平台道路上迈出了重要一步。我们期待与各位继续同行 —— 共同扩展生态、迎接新挑战、突破数据基础设施的边界。 \ No newline at end of file diff --git a/blogs/zh/community-monthly-report-for-april-2024.mdx b/blogs/zh/community-monthly-report-for-april-2024.mdx new file mode 100644 index 00000000..3bdb0199 --- /dev/null +++ b/blogs/zh/community-monthly-report-for-april-2024.mdx @@ -0,0 +1,73 @@ +--- +date: 2024-05-08 +description: OSPP 2024,即将发布的v0.9.0版本,了解KubeBlocks社区的最新动态。 +image: /img/blogs/thumbnails/blog-2024-04-monthly-report.png +slug: Community-Monthly-Report-for-April-2024 +tags: +- Monthly Report +- v0.9.0 +- Community +title: 2024年4月社区月度报告 +--- +# 2024年4月社区月报 + +## 概述 + +4月,KubeBlocks持续推进v0.9.0版本的开发工作。新版本预计将于5月发布,敬请期待 🌟。 + +在过去的一个月里,社区参与了6场线下活动,合并了157个PR,解决了133个issue,共有27位开发者参与贡献。更令人振奋的是,KubeBlocks和WeScale已宣布参与2024年度"开源之夏"(OSPP)活动,项目现已开放学生报名通道。详情请参阅: +- [KubeBlocks社区主页](https://summer-ospp.ac.cn/org/orgdetail/833ca537-91a2-44a8-9965-5eee8f34aceb?lang=en) +- [WeScale社区主页](https://summer-ospp.ac.cn/org/orgdetail/5d8efb0a-7f0d-4705-b253-00bb162ef507?lang=en) + +截至目前,KubeBlocks已收获1.7k星标。感谢大家的支持。 + +## 重点更新 + +- 支持将备份CR保存至备份仓库,允许用户使用保存的文件手动恢复集群 (#7002) +- 支持ServiceRef引用新的API对象,实现与ConnCredential的解耦 (#7006) +- RSM支持Pod原地更新:新增Kubernetes版本获取能力,改进Pod模板版本生成算法,并支持IgnorePodVerticalScaling开关 (#7000) +- 支持集群RuntimeClassName配置 (#7001) +- 支持指定实例缩容,适用于节点故障、数据损坏或实例不可用等场景 (#6958) +- 在集群API中支持可覆盖的组件服务 (#6934) + +## 问题修复 + +- 修复从v0.8.2升级到v0.9后集群始终处于"Deleting"状态的问题 (#6985) +- 修复垂直扩缩容(vscale)后集群始终处于"Updating"状态的问题 (#6971) +- 修复v0.9版本中stop/hscale操作请求持续运行的问题 (#6972) + +## 新晋贡献者 + +👏 让我们欢迎 + +💙 @Chiwency 💙! + +很高兴你加入KubeBlocks大家庭。Chiwency在上月成功合并了一个PR。 + +![2024-04-certificate](/img/blogs/2024-04-certificate.jpg) + +## 活动动态 + +KubeBlocks & WeScale已加入2024开源之夏。快来和我们一起探索技术难题的解决方案!更有丰厚奖金、礼品和证书等你来拿! +点击下方链接查看详情并联系导师: +- 基础项目:[KubeBlocks支持etcd插件](https://summer-ospp.ac.cn/org/prodetail/248330234?lang=en&list=pro) +- 进阶项目:[KubeBlocks支持Redis时间点恢复(PITR)及特定键值恢复](https://summer-ospp.ac.cn/org/prodetail/248330238?lang=en&list=pro) +- 进阶项目:[基于Wescale实现表级读写一致性优化策略](https://summer-ospp.ac.cn/org/prodetail/245d80005?lang=en&list=pro) + +## 新手任务 + +我们呼吁更多开发者参与KubeBlocks的协同开发。从[good first issues](https://github.com/apecloud/kubeblocks/contribute)开始,期待与您共同打造下一代数据库管理平台!立即认领 👇 + +- [#7229 [改进] 通过名称指定备份/恢复端口](https://github.com/apecloud/kubeblocks/issues/7229) +- [#7031 [改进] 文档化API `opsrequest.spec.restoreSpec.restoreTimeStr`](https://github.com/apecloud/kubeblocks/issues/7031) + +![2024-04-good-first-issues](/img/blogs/2024-04-good-first-issues.jpg) + +相关链接: +- [新手任务](https://github.com/apecloud/kubeblocks/contribute) +- [贡献指南](https://github.com/apecloud/kubeblocks/blob/main/docs/CONTRIBUTING.md) +- [开发手册](https://github.com/apecloud/kubeblocks/blob/main/docs/00%20-%20index.md) + +欢迎加入GitHub讨论区或[KubeBlocks Slack群组](https://join.slack.com/t/kubeblocks/shared_invite/zt-29tx52d8n-vli24S6gtD5ODJlNUqLqbQ)! + +![2024-04-overview](/img/blogs/2024-04-overview.jpg) \ No newline at end of file diff --git a/blogs/zh/community-monthly-report-for-february-2024.mdx b/blogs/zh/community-monthly-report-for-february-2024.mdx new file mode 100644 index 00000000..04380cb8 --- /dev/null +++ b/blogs/zh/community-monthly-report-for-february-2024.mdx @@ -0,0 +1,61 @@ +--- +date: 2024-03-07 +description: 分片API、Camellia Redis代理... 来看看KubeBlocks社区的最新动态。 +image: /img/blogs/thumbnails/blog-2024-02-monthly-report.png +slug: Community-Monthly-Report-for-February-2024 +tags: +- Monthly Report +- Sharding API +- Community +title: 2024年2月社区月度报告 +--- +# 2024年2月社区月报 + +## 概述 + +2月份,KubeBlocks 持续推进 v0.8.2 和 v0.9.0 版本的开发工作,重点实现了分片(sharding)API 功能,该特性将在 v0.8.2 版本中发布。 + +在过去一个月里,社区共合并了 76 个 PR,解决了 57 个 issue。共有 21 位社区成员参与贡献,其中包括 1 位新贡献者。 + +## 重点进展 + +- 支持 ShardingSpec API 用于定义具有分片拓扑结构的数据库集群(将在 v0.8.2 版本发布) +- 支持基于 KubeBlocks 分片 API 创建和删除官方 Redis 集群(将在 v0.8.2 版本发布) +- 支持 Camellia Redis Proxy 的一键部署、配置管理、水平扩展、重启等操作(将在 v0.8.2 版本发布) +- 支持声明主机网络能力 (#6705) +- 支持指定 Operator 应管理的命名空间 (#6641) +- MongoDB 支持通过主机网络从 K8s 外部连接 (#6689) +- Lorry 支持 action 命令 (#6474) +- 兼容 CronJob v1beta1,解决 K8s v1.20 上的备份失败问题 (#6687) +- 【外部贡献】为 Addon spec 添加版本验证。Addon CRD 支持 provider 和 version 字段,控制器可将 provider 信息添加到标签以便快速查询 (#6603) + +## 缺陷修复 + +- 修复了 componentName 为空时暴露 SVC 的问题 (#6712) +- 调整 OceanBase 切换功能以适配 Oracle 租户模式 (#6710) +- 解决了使用主机网络时备份恢复失败的问题 (#6715) +- 修复参数重配置失败问题 (#6664 #6665) +- 修复动态参数不生效问题。在 Patroni PostgreSQL 中,如果同时修改动态参数和静态参数,动态参数不会生效 (#6648) +- 移除暴露 SVC 时生成的重复端口 (#6631) +- 修复 ServiceAccount 相关 bug。如果 RestoreCR 指定了 ServiceAccount,备份和恢复 Pod 将使用指定的 SA (#6605) + +## 新贡献者 + +👏 热烈欢迎 + +💙 @Shuimo03 💙 + +加入 KubeBlocks 社区!Shuimo03 在上月成功合并了一个 PR (#6603)。 + +我们呼吁更多开发者参与 KubeBlocks 的协同开发。从 [good first issues](https://github.com/apecloud/kubeblocks/contribute) 开始,我们期待与您共同打造下一代数据库管理平台! + +![2024-02-good-first-issues](/img/blogs/2024-02-good-first-issues.png) + +相关链接: +- Good first issue: https://github.com/apecloud/kubeblocks/contribute +- 贡献指南: https://github.com/apecloud/kubeblocks/blob/main/docs/CONTRIBUTING.md +- 开发指南: https://github.com/apecloud/kubeblocks/blob/main/docs/00%20-%20index.md + +欢迎加入 GitHub Discussion 或 [KubeBlocks Slack 群组](https://join.slack.com/t/kubeblocks/shared_invite/zt-29tx52d8n-vli24S6gtD5ODJlNUqLqbQ)! + +![2024-02-overview](/img/blogs/2024-02-overview.png) \ No newline at end of file diff --git a/blogs/zh/community-monthly-report-for-january-2024.mdx b/blogs/zh/community-monthly-report-for-january-2024.mdx new file mode 100644 index 00000000..5034f63c --- /dev/null +++ b/blogs/zh/community-monthly-report-for-january-2024.mdx @@ -0,0 +1,60 @@ +--- +date: 2024-02-07 +description: 'v0.8.x 版本发布、新贡献者、客户端 SDK... 一览 KubeBlocks 社区最新动态' +image: /img/blogs/thumbnails/blog-2024-01-monthly-report.png +slug: Community-Monthly-Report-for-January-2024 +tags: +- Monthly Report +- v0.8.0 +- Community +title: 2024年1月社区月度报告 +--- +# 2024年1月社区月报 + +## 概览 + +- **KubeBlocks v0.8.0发布** + + KubeBlocks [v0.8.0](./announcing-kubeblocks-v0.8.0.md) 本月正式发布,当前最新稳定版本为v0.8.1。 + +- **KubeBlocks v0.8.0亮点** + + KubeBlocks v0.8.0引入了Component API,简化了数据库引擎的集成。插件(Addons)也已从KubeBlocks仓库中分离出来独立存在,使用更加便捷。 + +- **社区贡献** + + 过去一个月社区共合并114个PR,解决100个issue。共有24位社区成员参与贡献,其中包括5位新贡献者。 + +## 重点更新 + +- KubeBlocks新增支持TiDB、Xinference、openGauss、InfluxDB、OceanBase(主备模式)和Flink等插件。各引擎支持能力请查看[官方文档](https://kubeblocks.io/docs/release-0.8/user_docs/overview/supported-addons) +- KubeBlocks客户端SDK现已可用(查看[代码库](https://github.com/apecloud/kubeblocks-client)),目前支持Java、Python和Rust语言 +- MySQL复制集群现已支持切换(Switchover)功能,可通过`kbcli promote`命令执行 + +## 新晋贡献者 + +1月份KubeBlocks迎来了5位新贡献者,合并了5个拉取请求。 + +他们是 💙 @earayu @driverby @kissycn @LiuG-lynx @lispking 💙。 + +感谢你们为构建下一代数据库管理平台KubeBlocks做出的贡献。 + +开发者可以通过认领"good first issues"开始参与贡献,只需在对应issue留言即可。维护者分配后即可开始工作! + +此外,由于插件现已独立且客户端SDK发布,您也可以积极参与这些领域的贡献。 + +相关链接: +- 新手任务: https://github.com/apecloud/kubeblocks/contribute +- 贡献指南: https://github.com/apecloud/kubeblocks/tree/main/docs +- KubeBlocks插件仓库: https://github.com/apecloud/kubeblocks-addons +- KubeBlocks客户端SDK仓库: https://github.com/apecloud/kubeblocks-client + +欢迎加入GitHub讨论区或[KubeBlocks Slack群组](https://join.slack.com/t/kubeblocks/shared_invite/zt-29tx52d8n-vli24S6gtD5ODJlNUqLqbQ)! + +## 社区活动 + +1月10日,社区宣布启动第二期开发者训练营招募。为保证训练营效果,本期共招募50名开发者。 + +![2024-01-overview](/img/blogs/2024-01-overview.png) + +另外,中国传统春节即将到来 🎉。祝大家新年快乐! \ No newline at end of file diff --git a/blogs/zh/community-monthly-report-for-march-2024.mdx b/blogs/zh/community-monthly-report-for-march-2024.mdx new file mode 100644 index 00000000..7c438116 --- /dev/null +++ b/blogs/zh/community-monthly-report-for-march-2024.mdx @@ -0,0 +1,61 @@ +--- +date: 2024-04-07 +description: KubeBlocks v0.8.2,新贡献者加入,快来了解社区最新动态。 +image: /img/blogs/thumbnails/blog-2024-03-monthly-report.png +slug: Community-Monthly-Report-for-March-2024 +tags: +- Monthly Report +- v0.8.2 +- Community +title: 2024年3月社区月报 +--- +# 2024年3月社区月报 + +## 概述 + +3月份,KubeBlocks正式发布了v0.8.2版本。本次更新显著增加了对Redis集群、camellia-redis-proxy的支持,并与Pulsar v3.0.2实现了兼容。此外,开发团队正在积极筹备即将发布的v0.9.0版本。 + +在过去一个月中,社区共合并了91个PR,解决了92个issue。共有19位社区成员参与了贡献,其中包括6位新贡献者。 + +截至目前,KubeBlocks在GitHub上已获得1607个star。我们衷心感谢您的支持,并期待更多互动🌟。 + +## 重点更新 + +- 新增Redis集群支持 +- 为Cluster API新增分片拓扑支持,用户可通过ShardingSpec定义分片数量和组件规格 +- 支持camellia-redis-proxy。当前运行于K8s v1.14版本,支持配置外部Redis实例(包括K8s集群外的实例) +- 支持从外部K8s集群高可用访问MongoDB副本集地址 +- 支持Pulsar v3.0.2版本 +- 支持为每个Pulsar broker暴露nodeport地址 +- 为StarRocks新增IPv4/IPv6双栈支持。私有地址和公共地址均可使用IPv4和IPv6(但StarRocks内核仍需支持IPv6) +- Lorry支持自定义角色探测命令。该命令可通过YAML脚本声明 + +## 问题修复 + +- 修复SVC恢复失败问题 (#6768) +- 解决更新KubeBlocks导致PG重启的问题 (#6771) +- 修复集群删除时ConfigMap找不到及代码优化问题 (#6793) +- 解决创建OSS备份仓库时自定义端点无效的问题 (#6819) +- 修复升级过程中etcd角色探测失败问题 (#6839) +- 解决协调工作节点无效的问题 (#6805) + +## 新贡献者 + +👏 热烈欢迎 + +💙 @Aayush Sharma, rustover, luoyuLianga, dingshun-cmss, Yunyinzi, bliubiu 💙 + +加入KubeBlocks社区!他们上个月成功合并了PR。 + +我们呼吁更多开发者参与KubeBlocks的协作开发。从[good first issues](https://github.com/apecloud/kubeblocks/contribute)开始,期待与您共同打造下一代数据库管理平台! + +![2024-03-good-first-issues](/img/blogs/2024-03-good-first-issues.jpg) + +相关链接: +- Good first issue: https://github.com/apecloud/kubeblocks/contribute +- 贡献指南: https://github.com/apecloud/kubeblocks/blob/main/docs/CONTRIBUTING.md +- 开发指南: https://github.com/apecloud/kubeblocks/blob/main/docs/00%20-%20index.md + +欢迎加入GitHub Discussion或[KubeBlocks Slack群组](https://join.slack.com/t/kubeblocks/shared_invite/zt-29tx52d8n-vli24S6gtD5ODJlNUqLqbQ)! + +![2024-03-overview](/img/blogs/2024-03-overview.jpg) \ No newline at end of file diff --git a/blogs/zh/community-monthly-report-for-may-2024.mdx b/blogs/zh/community-monthly-report-for-may-2024.mdx new file mode 100644 index 00000000..9c2ce8b6 --- /dev/null +++ b/blogs/zh/community-monthly-report-for-may-2024.mdx @@ -0,0 +1,59 @@ +--- +date: 2024-06-06 +description: 了解 KubeBlocks 社区的最新动态。 +image: /img/blogs/thumbnails/blog-2024-05-monthly-report.png +slug: Community-Monthly-Report-for-May-2024 +tags: +- Monthly Report +- Community +title: 2024年5月社区月报 +--- +# 2024年5月社区月报 + +## 概述 + +5月份,KubeBlocks发布了v0.8.3版本并持续推进v0.9.0版本的开发。主要更新包括支持跨集群引用配置对象和NodeCountCluster功能。社区还修复了相关缺陷。 + +在过去一个月里,社区合并了170个PR,解决了133个issue,共有24位开发者参与贡献。 + +## 重点更新 + +- [#7153](https://github.com/apecloud/kubeblocks/pull/7153) 支持跨集群引用配置 +- [#7258](https://github.com/apecloud/kubeblocks/pull/7258) 支持NodeCountScaler功能 + +## 缺陷修复 + +- [#7475](https://github.com/apecloud/kubeblocks/pull/7475) 修复MongoDB执行切换后崩溃的问题 +- [#7447](https://github.com/apecloud/kubeblocks/pull/7447) 适配rolecheck以支持部分数据库引擎的集群初始化 +- [#7365](https://github.com/apecloud/kubeblocks/pull/7365) 支持PVC创建幂等性 +- [#7352](https://github.com/apecloud/kubeblocks/pull/7352) 添加标签区分KubeBlocks和数据保护组件 +- [#7323](https://github.com/apecloud/kubeblocks/pull/7323) 修复configmap 'xxx-rsm-env'未找到的问题 +- [#7299](https://github.com/apecloud/kubeblocks/pull/7299) 为host网络添加lorry端口 +- [#7256](https://github.com/apecloud/kubeblocks/pull/7256) 移除webhook中对集群组件spec的验证 +- [#7266](https://github.com/apecloud/kubeblocks/pull/7267) 修复config-manager容器中存在重复volume挂载的问题 + +## 新晋贡献者 + +👏 让我们欢迎 + +💙 @[d976045024](https://github.com/d976045024), [duiniwukenaihe](https://github.com/duiniwukenaihe), [starnop](https://github.com/starnop) 💙! + +很高兴你们加入KubeBlocks社区。上个月他们都完成了首个PR合并。感谢你们的贡献! + +## 新手友好任务 + +我们呼吁更多开发者参与KubeBlocks的协作开发。从[good first issues](https://github.com/apecloud/kubeblocks/contribute)开始,期待与您共同打造下一代数据库管理平台!立即认领👇 + +- [#7229 [改进] 通过名称指定备份/恢复端口](https://github.com/apecloud/kubeblocks/issues/7229) +- [#7031 [改进] 文档API `opsrequest.spec.restoreSpec.restoreTimeStr`](https://github.com/apecloud/kubeblocks/issues/7031) + +![2024-04-good-first-issues](/img/blogs/2024-04-good-first-issues.jpg) + +相关链接: +- [新手友好任务](https://github.com/apecloud/kubeblocks/contribute) +- [贡献指南](https://github.com/apecloud/kubeblocks/blob/main/docs/CONTRIBUTING.md) +- [开发指南](https://github.com/apecloud/kubeblocks/blob/main/docs/00%20-%20index.md) + +欢迎加入GitHub讨论或[KubeBlocks Slack群组](https://join.slack.com/t/kubeblocks/shared_invite/zt-29tx52d8n-vli24S6gtD5ODJlNUqLqbQ)! + +![2024-05-overview](/img/blogs/2024-05-overview.png) \ No newline at end of file diff --git a/blogs/zh/deploy-harbor-on-kubeblocks.mdx b/blogs/zh/deploy-harbor-on-kubeblocks.mdx new file mode 100644 index 00000000..dbbab25b --- /dev/null +++ b/blogs/zh/deploy-harbor-on-kubeblocks.mdx @@ -0,0 +1,366 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/81566017?v=4 + name: Keyu Liang + url: https://github.com/koriyyy +date: 2024-07-15 +description: 本博客介绍如何在5分钟内通过KubeBlocks在Kubernetes上部署高可用Harbor集群。 +image: /img/blogs/thumbnails/blog-harbor.png +slug: deploy-harbor-on-kubeblocks +tags: +- Kubernetes +- operator +- PostgreSQL +- Redis +- KubeBlocks +title: 5分钟内在Kubernetes上通过KubeBlocks(PostgreSQL与Redis的Operator)部署高可用Harbor集群 +--- +# 5分钟内在Kubernetes上通过KubeBlocks(PostgreSQL和Redis的Operator)部署高可用Harbor集群 + +当需要构建自托管的Docker镜像仓库时,Harbor通常是一个备受推荐的解决方案。然而,**Harbor本身并不内置HA(高可用性)集成**,这使得其服务可靠性相对较低。**要创建HA Harbor集群,开发者通常需要自行搭建HA Redis和PostgreSQL集群**,这一过程相当繁琐。 + +![Harbor架构图](/img/blogs/blog-harbor-1.png) + +图1. [Harbor架构](https://goharbor.io/docs/2.1.0/install-config/harbor-ha-helm/#architecture) + +幸运的是,现在您只需几个步骤就能使用KubeBlocks搭建一个高可用的Harbor集群。 + +## 为什么选择 KubeBlocks + +KubeBlocks 是一款开源的管控平面软件,用于在 Kubernetes 上运行和管理数据库、消息队列等数据基础设施,它能够管理多种类型的引擎,包括关系型数据库(MySQL、PostgreSQL)、缓存(Redis)、NoSQL(MongoDB)、消息队列(Kafka、Pulsar)等。 + +在本篇博客中,我们将介绍如何通过 KubeBlocks 在短短 5 分钟内构建一个高可用的 Harbor 集群。 + +## 环境准备 + +在开始之前,请确保您的环境满足 [KubeBlocks](https://kubeblocks.io/docs/preview/user_docs/installation/install-with-kbcli/install-kubeblocks-with-kbcli#environment-preparation) 和 [Harbor](https://goharbor.io/docs/2.11.0/install-config/installation-prereqs/) 的要求。 + +## 安装 kbcli 和 KubeBlocks + +1. 安装 kbcli。 + +```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash + ``` + +2. 安装 KubeBlocks。 + +```bash + kbcli kubeblocks install + ``` + +3. 检查 KubeBlocks 是否安装成功。 + +```bash + kbcli kubeblocks status + ``` + +4. 启用 `postgresql` 和 `redis` 插件。默认情况下,这两个插件已启用。您可以通过运行以下命令检查插件状态。如果未启用,请按照[此处指南](https://kubeblocks.io/docs/preview/user_docs/overview/supported-addons#use-addons)进行启用。 + +```bash + kbcli addon list + ``` + +## 创建 PostgreSQL 和 Redis 集群 + +1. 为保持环境隔离,创建一个名为 `demo` 的独立命名空间。 + +```bash + kubectl create namespace demo + ``` + +2. 创建 PostgreSQL 集群。此处我们使用复制模式,该模式会创建一个支持自动故障转移的复制集群。更多详细信息,请参阅[创建 PostgreSQL 集群](https://kubeblocks.io/docs/preview/user_docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster)。 + +```bash + kbcli cluster create postgresql mypg --mode replication --namespace demo + ``` + +3. 创建 Redis 集群。此处我们创建一个 Redis 复制集群并指定版本为 `redis-7.0.6`,KubeBlocks 将创建一个带哨兵的主从 Redis 集群。更多详细信息,请参考[创建 Redis 集群](https://kubeblocks.io/docs/preview/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster)。 + +```bash + kbcli cluster create redis myredis --mode replication --version redis-7.0.6 --namespace demo + ``` + +4. 查看集群状态。等待两个集群的状态都变为 `Running`。 + +```bash + kbcli cluster list --namespace demo + ``` + + + + +## 连接集群 + +您也可以根据不同的场景,按照[这里的详细指南](https://kubeblocks.io/docs/preview/api_docs/connect_database/overview-of-database-connection)连接集群。为了方便演示,我们将使用测试环境进行说明。 + +### 连接 PostgreSQL 集群 + +1. 连接到 PostgreSQL 集群。 + +```bash + kbcli cluster connect mypg --namespace demo + ``` + +2. 在 PostgreSQL CLI 中,创建新用户。 + +```bash + create user test with password 'password'; + ``` + +3. 为 Harbor 创建新的数据库注册表。 + +```bash + CREATE DATABASE registry OWNER test; + ``` + +在此创建的用户和数据库将在后续安装Harbor时使用。 + +### 连接Redis集群 + +1. 连接到Redis集群。 + +```bash + kbcli cluster connect myredis --namespace demo + ``` + +2. 创建用户。 + +```bash + ACL SETUSER test on >password ~* +@all + ``` + + + + +## 安装 Harbor + +1. 下载 Harbor Helm Chart。 + +```bash + helm repo add harbor https://helm.goharbor.io + + helm fetch harbor/harbor --untar + ``` + +2. 获取集群中服务的信息。`mypg-postgresql` 和 `myredis-redis-redis` 的 ClusterIP 是 Harbor 连接到的地址。 + +(严格保留原始格式与换行) + +```bash + kubectl get service -n demo + > + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + mypg-postgresql ClusterIP 172.16.155.121 5432/TCP,6432/TCP 74m + myredis-redis-redis ClusterIP 172.16.190.126 6379/TCP 66m + +3. Configure the PostgreSQL database in `values.yaml`. Use the external database KubeBlocks provides and fill in the necessary database information. For other configurations (e.g. `expose.type`), refer to [the official documentation](https://goharbor.io/docs/2.11.0/install-config/configure-yml-file/). + + ``` + +bash + database: + type: external + ... + external: + host: "172.16.155.121" # PostgreSQL的clusterIP + port: "5432" + username: "test" # 用户名 + password: "password". # 密码 + coreDatabase: "registry" # 数据库名称 + existingSecret: "" + sslmode: "disable" + +``` + +4. Configure the Redis database in `values.yaml`. + + ``` + +bash + redis: + type: external + ... + external: + addr: "172.16.190.126:6379" # redis的clusterIp:端口 + sentinelMasterSet: "" + coreDatabaseIndex: "0" + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + trivyAdapterIndex: "5" + username: "test" # 用户名 + password: "password" # 密码 + existingSecret: "" + +``` + +5. Install Harbor. + + ``` + +bash + helm install myharbor. -n demo + +``` + +6. Check the status of pods. Make sure all services are running. + + ``` + +bash + kubectl get pods -n demo + > + NAME READY STATUS RESTARTS AGE + myharbor-core-66d95c9f45-vpcnn 1/1 Running 0 44m + myharbor-jobservice-85b5676456-kl5r9 1/1 Running 0 44m + myharbor-nginx-55dd86f5d8-s78gn 1/1 Running 0 44m + myharbor-portal-869c6656c5-5dtsc 1/1 Running 0 44m + myharbor-registry-c66cd79b-77k5j 2/2 Running 0 44m + myharbor-trivy-0 1/1 Running 0 44m + mypg-postgresql-0 4/4 Running 0 65m + mypg-postgresql-1 4/4 Running 0 82s + myredis-redis-0 3/3 Running 0 57m + myredis-redis-1 3/3 Running 0 57m + myredis-redis-sentinel-0 1/1 Running 0 58m + myredis-redis-sentinel-1 1/1 Running 0 58m + myredis-redis-sentinel-2 1/1 Running 0 58m + ``` + +现在您可以像往常一样访问 Harbor UI。 + +## 高可用性 + +为了展示通过 KubeBlocks 构建的 Harbor 集群的高可用性,我们将模拟 PostgreSQL 集群主 Pod 发生故障的场景。 + +1. 查看 PostgreSQL 集群及 Pod 的初始状态。当前 `mypg-postgresql-0` 为主 Pod,`mypg-postgresql-1` 为从 Pod。 + +```bash + kubectl -n demo get pod -L kubeblocks.io/role + > + NAME READY STATUS RESTARTS AGE ROLE + ... + mypg-postgresql-0 4/4 Running 0 66m primary + mypg-postgresql-1 4/4 Running 0 66m secondary + ... + ``` + +2. 将测试镜像 `busybox` 推送至 Harbor 镜像仓库。 + +```bash + docker docker tag busybox harbor.domain.com/library/busybox + docker push harbor.domain.com/library/busybox + ``` + +3. 您可以看到镜像已成功推送至 Harbor 镜像仓库。 + + ![Figure 2](/img/blogs/blog-harbor-2.png) + +4. 现在,模拟 PostgreSQL 集群主节点 Pod 的故障。 + +```bash + # Enter the primary pod + kubectl exec -it mypg-postgresql-0 -n demo -- bash + + # Delete the data directory of PostgreSQL to simulate an exception + root@mycluster-postgresql-0:/home/postgres# rm -fr /home/postgres/pgdata/pgroot/data + ``` + +5. 查看日志以观察异常发生时 Pod 角色的切换情况。 + +```bash + # View the primary pod logs + kubectl logs mypg-postgresql-0 -n demo + ``` + +在日志中,主节点 Pod 的领导者锁被释放,并发生了高可用切换。已使用备份数据创建了一个新副本。服务在几秒内恢复。 + +(严格保留原文格式与换行,技术术语按规范翻译) + +```bash + 2024-06-26 08:00:51,759 INFO: no action. I am (mypg-postgresql-0), the leader with the lock + 2024-06-26 08:01:01,726 INFO: Lock owner: mypg-postgresql-0; I am mypg-postgresql-0 + 2024-06-26 08:01:01,802 INFO: Leader key released + 2024-06-26 08:01:01,824 INFO: released leader key voluntarily as data dir empty and currently leader + 2024-06-26 08:01:01,825 INFO: Lock owner: mypg-postgresql-1; I am mypg-postgresql-0 + ... + 2024-06-26 08:01:04,475 INFO: replica has been created using basebackup_fast_xlog + 2024-06-26 08:01:04,475 INFO: bootstrapped from leader 'mypg-postgresql-1' + 2024-06-26 08:01:04,476 INFO: closed patroni connection to the postgresql cluster + ``` + + + + + +```bash + # View secondary pod logs + kubectl logs mypg-postgresql-1 -n demo + ``` + +原从节点 Pod `mypg-postgresql-1` 已获取领导者锁并升级为主节点 Pod。 + +```bash + 2024-06-26 08:02:13,638 INFO: no action. I am (mypg-postgresql-1), the leader with the lock + ``` + +6. 查看 PostgreSQL 集群和 Pod 的状态。故障转移后,`mypg-posgresql-0` 成为从节点 Pod,`mypg-postgresql-1` 成为主节点 Pod。 + +```bash + kubectl -n demo get pod -L kubeblocks.io/role + > + NAME READY STATUS RESTARTS AGE ROLE + ... + mypg-postgresql-0 4/4 Running 0 89m secondary + mypg-postgresql-1 4/4 Running 0 26m primary + ... + ``` + +7. 连接到 PostgreSQL 集群以查看主节点 Pod 中的复制信息。 + +```bash + postgres=# select * from pg_stat_replication; + ``` + +![Figure 3](/img/blogs/blog-harbor-3.png) + + 结果显示 `mypg-postgresql-0` 已被分配为从节点的 Pod。 + +8. 验证 Harbor 集群的服务。此处我们拉取先前推送的 `busybox` 镜像。该镜像已成功从 Harbor 镜像仓库中拉取。我们还推送了一个新镜像 `hello-world`,该镜像也成功推送至 Harbor 镜像仓库。故障转移后,Harbor 集群的读写功能均已恢复,这证明了 KubeBlocks 提供的高可用性功能的有效性。 + + ![Figure 4](/img/blogs/blog-harbor-4.png) + +## 扩缩容集群 + +KubeBlocks 提供垂直扩展和水平扩展两种能力。您可以通过执行以下命令轻松实现集群扩缩容。 + +- 垂直扩展 + +```bash + kbcli cluster vscale mypg \ + --components="postgresql" \ + --memory="4Gi" --cpu="2" \ + --namespace demo + ``` + +- 水平扩展 + +```bash + kbcli cluster hscale mypg + --replicas 3 \ + --namespace demo \ + --components postgresql + ``` + + + + +## 结论 + +通过集成 KubeBlocks,您可以在短短 5 分钟内搭建一个高可用性的 Harbor 集群,确保您的 Harbor 集群提供持续可靠的服务。KubeBlocks 简化了整个搭建流程,让您可以专注于更重要的任务,而无需担心底层基础设施的配置和管理。 + +## 参考 + +- [KubeBlocks](https://github.com/apecloud/kubeblocks) +- [KubeBlocks.io](https://kubeblocks.io/) +- [KubeBlocks 插件](https://github.com/apecloud/kubeblocks-addons) +- [Harbor](https://github.com/goharbor/harbor/wiki/Architecture-Overview-of-Harbor) \ No newline at end of file diff --git a/blogs/zh/deploy-wordpress-on-kubeblocks.mdx b/blogs/zh/deploy-wordpress-on-kubeblocks.mdx new file mode 100644 index 00000000..60d9b383 --- /dev/null +++ b/blogs/zh/deploy-wordpress-on-kubeblocks.mdx @@ -0,0 +1,214 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/57531827?v=4 + name: skyrise-l + url: https://github.com/skyrise-l +date: 2024-07-16 +description: 本博客介绍如何使用KubeBlocks MySQL Operator在Kubernetes上部署高可用WordPress站点。 +image: /img/blogs/thumbnails/blog-wordpress.png +slug: deploy-wordpress-on-kubeblocks +tags: +- Kubernetes +- operator +- MySQL +- KubeBlocks +- WordPress +title: 使用KubeBlocks MySQL Operator在Kubernetes上部署高可用性WordPress站点 +--- +# 使用 KubeBlocks MySQL Operator 在 Kubernetes 上部署高可用 WordPress 站点 + +(保持原始格式的空行与间距) + +## 简介 + +### WordPress + +WordPress 是全球最受欢迎的内容管理系统(CMS)。自 2003 年发布以来,它已成为构建网站的首选工具。其庞大的插件和主题生态系统让用户可以轻松扩展功能并增强网站外观。活跃的 WordPress 社区还提供了丰富的资源和支持,进一步降低了开发和维护的难度。 + +因此,WordPress 成为全球数百万用户的选择,在网站构建领域占据主导地位。 + +### 什么是 KubeBlocks? + +KubeBlocks 是一个开源的 Kubernetes Operator,用于管理多种数据库和有状态中间件。它支持超过 30 种数据库系统,包括 MySQL、PostgreSQL、Redis、MongoDB、Kafka、ClickHouse 和 Elasticsearch。KubeBlocks 的核心理念是使用一组通用的抽象 API(CRD)来描述这些不同数据库引擎之间的共享属性。这使得数据库供应商和开发者可以通过插件(addons)来适配引擎间的差异。 + +### 为什么使用 KubeBlocks? + +当使用 Bitnami 镜像部署 WordPress 时,内置的 MariaDB 数据库提供了开箱即用的解决方案。然而,这种方式存在以下缺点: + +- **高可用性限制**:Bitnami 镜像中的 MariaDB 实例通常部署在单个节点上。如果该节点出现问题,可能导致网站服务中断。此外,内置的 MariaDB 缺乏自动故障转移机制。 +- **资源竞争**:将 MariaDB 数据库和 WordPress 服务托管在同一个 Pod 中会导致资源争用,使资源分配变得复杂。 +- **扩展性差**:虽然 MariaDB 支持扩展,但水平扩展数据库(通过添加实例来提高性能和容量)非常复杂,需要额外的管理和配置工具。 +- **监控和管理不足**:内置的 MariaDB 缺乏全面的监控和管理功能,难以及时发现和解决性能问题或故障。 + +而 KubeBlocks 可以有效解决这些不足: + +- **高可用性**:KubeBlocks 可以为 WordPress 和数据库分别提供高可用性解决方案,提升整体系统可靠性。 +- **资源隔离**:KubeBlocks 将 WordPress 和数据库运行在独立的 Pod 中,提供更好的资源隔离,避免争用。 +- **强大的扩展性**:KubeBlocks 支持独立扩展 WordPress 和数据库副本,允许根据需求动态调整资源。 +- **易于管理**:KubeBlocks 只需一条命令即可为 WordPress 创建所需的数据库集群。同时,它还提供了数据库的内置备份和监控功能,提高管理效率。 + +## 部署 + +### 安装 KubeBlocks + +KubeBlocks 包含一个专用的命令行工具 kbcli。如果您尚未安装 KubeBlocks,只需几条简单命令即可同时安装 kbcli 和 KubeBlocks。 +在安装前,请确保您的环境满足 [KubeBlocks 的要求](https://kubeblocks.io/docs/preview/user_docs/installation/install-with-kbcli/install-kubeblocks-with-kbcli#environment-preparation)。 + +1. 安装 kbcli。 + +```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash + ``` + +2. 安装 KubeBlocks。 + +```bash + kbcli kubeblocks install + ``` + +3. 检查 KubeBlocks 是否安装成功。 + +```bash + kbcli kubeblocks status + ``` + +您可以参考官方文档了解详细操作步骤。 + +- [安装 kbcli](https://kubeblocks.io/docs/preview/user_docs/installation/install-with-kbcli/install-kbcli) +- [安装 KubeBlocks](https://kubeblocks.io/docs/preview/user_docs/installation/install-with-kbcli/install-kubeblocks-with-kbcli) + +### 创建高可用集群 + +在部署 WordPress 之前,您需要创建一个数据库集群来管理 WordPress 的后端数据。您可以使用 [kbcli](https://kubeblocks.io/docs/preview/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster) 或 [kubectl](https://kubeblocks.io/docs/preview/api_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster) 创建该集群。 + +1. 创建高可用集群。 + + 此处我们使用 KubeBlocks 提供的 `apecloud-mysql` 插件为 WordPress 创建一个 MySQL 集群。通过 kbcli,您可以快速创建一个具有多副本、高可用且生产就绪的 MySQL 数据库集群。 + + 以下示例将副本数设置为 3,这将启用 RaftGroup 模式并创建一个包含三个副本的 MySQL 集群。 + +```bash + # Enable addon (enabled by default) + kbcli addon install apecloud-mysql + + # Create a cluster and you can set parameters, such as creating a cluster with three replicas by --set replicas=3 + kbcli cluster create apecloud-mysql --cluster-definition=apecloud-mysql --set replicas=3 + ``` + +检查集群状态并等待所有 Pod 处于运行状态。 + +```bash + kubectl get pods + ``` + +![Figure 1](/img/blogs/blog-wordpress-1.png) + +2. 获取访问地址 + + 您可以直接通过Pod或服务访问MySQL集群。本示例将演示通过服务访问集群的方式。 + + 运行以下命令获取服务地址,其格式为`apecloud-mysql.default(命名空间).svc.cluster.local(默认后缀)`。 + +```bash + kubectl get services + ``` + + + +```bash + kbcli cluster connect apecloud-mysql + ``` + +2. 在数据库中,执行以下SQL语句创建用户并授予权限。根据需要设置数据库权限。 + +```bash + CREATE USER 'myadmin'@'%' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'myadmin'@'%'; + FLUSH PRIVILEGES; + create database wordpress; + ``` + +3. 运行以下命令创建 `mysql-secret` 并设置键值 `mariadb-password=password`。在安装过程中,WordPress 将使用此密码键值作为数据库密码。请注意密码键名必须为 mariadb-password,用户名不会从此 secret 中读取。 + +(严格保持原有格式和换行,技术术语按规范翻译) + +```bash + kubectl create secret generic mysql-secret --from-literal=mariadb-password=password + ``` + +:::注意 + +您可以创建一个 Secret 以便在后续 WordPress 安装过程中引用,从而避免以明文形式传输密码。 + +::: + +2. 安装 WordPress。 + + 1. 安装 WordPress 并配置参数。 + +```bash + helm install my-release oci://registry-1.docker.io/bitnamicharts/wordpress \ + --set mariadb.enabled=false \ + --set externalDatabase.host=apecloud-mysql.default.svc.cluster.local \ + --set externalDatabase.database=wordpress \ + --set externalDatabase.port=3306 \ + --set externalDatabase.user="myadmin" + --set externalDatabase.existingSecret="mysql-secret" \ + --set replicaCount=2 + ``` + +参数说明: + + - `mariadb.enabled`:设置为 `false` 可禁用 MariaDB 安装,转而使用外部数据库服务。 + - `host`:使用先前获取的 MySQL 服务地址来访问 MySQL 服务,例如 `apecloud-mysql.default.svc.cluster.local`。 + - `user`、`database`、`port`:根据实际需求设置这些参数。 + - `existingSecret`:这是安全传输密码的推荐方式。您可以在此引用先前创建的 secret 来传输密码,以避免明文传输。注意 secret 必须包含连接密码。设置 `existingSecret` 后,password 字段将被忽略。 + - `password`:这是可选参数,因为本文推荐使用 `existingSecret` 来避免明文传输。此外,当设置了 `existingSecret` 时,password 字段将被忽略。 + - `replicaCount`:表示要启动的 WordPress 实例 Pod 数量。 + + 2. 检查 Pod 状态并确保所有 Pod 均已就绪且正在运行。 + +```bash + kubectl get pods + ``` + +![Figure 3](/img/blogs/blog-wordpress-3.png) + + 3. 进入 WordPress 容器。您可以远程查看数据库,并查看 WordPress 的数据库信息。 + +```bash + kubectl exec -it wordpress-584444f68b-sxcss -- bash + mysql -h apecloud-mysql.default.svc.cluster.local -u Wordpress + ``` + +![Figure 4](/img/blogs/blog-wordpress-4.png) + +现在,WordPress 和数据库集群已成功部署。 + +### 高可用性演示 + +本博客将通过删除一个 Pod 来模拟故障转移。 + +```bash +kubectl delete pod apecloud-mysql-0 +``` + +删除 Pod `apecloud-mysql-0` 后,可以看到它有 3/4 个容器已就绪,其中 MySQL 容器不可用。 + +![Figure 5](/img/blogs/blog-wordpress-5.png) + +但这不会影响数据库的整体连接性。 + +此外,可以观察到 `apecloud-mysql-1` 成为了新的主 Pod,这展示了 KubeBlocks 强大的故障转移能力。 + +![Figure 6](/img/blogs/blog-wordpress-6.png) + +### 集群扩缩容 + +当遇到性能问题或类似情况时,可能需要对数据库进行扩容。KubeBlocks 提供了便捷的扩缩容命令 `kbcli vscale`,可以轻松增加计算资源。 + +```bash +kbcli cluster vscale mycluster --components=apecloud-mysql --cpu=500m --memory=500Mi +``` + +有关更多数据库参数设置,请参考[官方文档](https://github.com/bitnami/containers/tree/main/bitnami/WordPress#connect-WordPress-container-to-an-existing-database)。 \ No newline at end of file diff --git a/blogs/zh/dify-on-kb.mdx b/blogs/zh/dify-on-kb.mdx new file mode 100644 index 00000000..61985dec --- /dev/null +++ b/blogs/zh/dify-on-kb.mdx @@ -0,0 +1,373 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1765402?v=4 + name: iziang + url: https://github.com/iziang +date: 2024-07-19 +description: 本博客介绍如何利用KubeBlocks(面向PostgreSQL、Redis和Qdrant的K8s Operator)与Dify,在Kubernetes上部署生产就绪的AIGC应用。 +image: /img/blogs/thumbnails/blog-dify.png +slug: deploy-aigc-applications-using-kubeblocks-and-dify +tags: +- Kubernetes +- operator +- PostgreSQL +- Qdrant +- Redis +- KubeBlocks +- Dify +- AIGC +title: 使用KubeBlocks(适用于PostgreSQL、Redis和Qdrant的K8s Operator)与Dify在Kubernetes上部署生产就绪的AIGC应用 +--- +# 使用 KubeBlocks(PostgreSQL、Redis 和 Qdrant 的 K8s Operator)与 Dify 在 Kubernetes 上部署生产级 AIGC 应用 + +## 简介 + +AI生成内容(AIGC)技术正以前所未有的速度改变着我们的世界。AIGC不仅为内容创作者提供了强大的工具,也为企业带来了前所未有的商业机遇。通过AIGC,应用可以自动生成文本、图像、音频甚至视频,极大提升了内容生产的效率和质量。更重要的是,AIGC能够根据用户的特定需求实时生成定制化内容,显著改善用户体验。 + +然而,要充分发挥AIGC的潜力,开发者面临着一系列挑战,如高技术门槛、复杂的模型集成以及困难的运维管理等。正是在这样的背景下,Dify应运而生。Dify是一个开源的大语言模型(LLM)应用开发平台,它巧妙融合了Backend as Service和LLMOps的理念,旨在为开发者提供从创意到产品的快速通道。Dify提供了一系列LLM、直观的提示词设计工具、强大的Agent框架以及灵活的流程编排能力,所有这些都通过友好的用户界面和API呈现,大幅降低了技术门槛,使得没有技术背景的人也能参与构建AI应用。 + +尽管Dify极大简化了AI应用的开发过程,但如何高效管理这些应用所依赖的基础设施,在部署和运维阶段仍是重要课题。AIGC应用通常会使用多种数据库,例如用PostgreSQL等关系型数据库存储应用元数据,用Redis等内存数据库保存会话历史,用Qdrant等向量数据库实现RAG召回。确保这些关键组件的稳定运行、数据一致性与安全性,并满足快速增长的业务需求,对任何团队来说都是不小的挑战。 + +这正是KubeBlocks大显身手的领域。KubeBlocks是一个基于Kubernetes的数据基础设施管理平台,提供完整的解决方案,帮助自动化管理和调度数据基础设施。无论是OLTP、OLAP、NoSQL、消息队列、流引擎,还是新兴的向量数据库和LLM,KubeBlocks都能轻松管理,大幅提升有状态工作负载的管理效率。KubeBlocks支持多云环境,提供一键部署、无缝扩缩容和自动故障恢复,确保生产级应用的高性能、高弹性和可观测性。 + +通过将Dify快速应用开发和迭代的能力与KubeBlocks保障基础设施稳定可管理的能力相结合,本方案不仅能提高开发效率,还能保证应用的可靠性和可扩展性。本篇博客将演示如何基于KubeBlocks + Dify方案构建生产级AIGC应用。 + +## 要求 + +- Kubernetes 集群,版本 >= 1.21 +- 已安装 Ingress Controller 的 Kubernetes 集群 + +## 部署 + +### 安装 KubeBlocks + +1. 安装最新版本的 kbcli。 + + kbcli 是 KubeBlocks 提供的命令行工具,可以直观且高效地管理 KubeBlocks 相关资源。 + + 对于 KubeBlocks 用户,kbcli 提供了更直接和高效的操作选项,使 Kubernetes 新手能够快速上手。 + +```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh + ``` + +2. 安装最新版本的 KubeBlocks。 + +```bash + kbcli kubeblocks install + ``` + +3. 运行 `kbcli addon list` 命令查看已启用的数据库。 + + KubeBlocks 默认会安装一些常见数据库,例如 MySQL、PostgreSQL、Redis 和 MongoDB。 + +```bash + kbcli addon list + > + NAME VERSION PROVIDER STATUS AUTO-INSTALL + llm 0.9.0 community Disabled false + minio 12.8.12 community Disabled false + prometheus 15.16.1 community Disabled false + qdrant 0.9.0 community Disabled false + apecloud-mysql 0.9.0-beta.10 apecloud Enabled true + elasticsearch 0.9.0 community Enabled true + kafka 0.9.0 community Enabled true + mongodb 0.9.0 apecloud Enabled true + mysql 0.9.1 community Enabled true + postgresql 0.9.0 community Enabled true + pulsar 0.9.0 community Enabled true + redis 0.9.0 community Enabled true + ``` + +4. 运行以下命令启用 Qdrant 插件。 + + Dify 依赖的向量数据库(如 Qdrant)默认未启用。 + +```bash + kbcli addon enable qdrant + ``` + +5. 运行 `kbcli addon describe qdrant` 并等待状态变为 "Enabled"。 + +```bash + kbcli addon describe qdrant + > + Name: qdrant + Description: Qdrant is an open source (Apache-2.0 licensed), vector similarity search engine and vector database. + Labels: addon.kubeblocks.io/model=vector,addon.kubeblocks.io/name=qdrant,addon.kubeblocks.io/provider=community,addon.kubeblocks.io/version=0.9.0 + Type: Helm + Status: Enabled + Auto-install: false + + Installed Info: + + NAME REPLICAS STORAGE CPU (REQ/LIMIT) MEMORY (REQ/LIMIT) STORAGE-CLASS TOLERATIONS PV-ENABLED + main / / + ``` + +默认情况下,KubeBlocks 中的 Qdrant 插件使用官方 Docker Hub 镜像 docker.io/qdrant/qdrant。如果从官方 Docker Hub 拉取镜像时遇到问题,您可以在启用插件时改用从 KubeBlocks 官方仓库同步的镜像。 + +```bash + kbcli addon enable qdrant --set image.registry=apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com --set image.repository=apecloud/qdrant + ``` + +除了安装 KubeBlocks 外,kbcli 还提供了一套丰富的数据库集群操作功能。您可以通过简单的命令轻松创建数据库集群、检查集群状态、执行水平和垂直扩缩容、扩展存储卷,以及启动、停止或重启集群。这显著降低了用户的学习曲线。对于希望快速部署和测试 KubeBlocks 的用户,kbcli 提供了 [Playground](https://kubeblocks.io/docs/preview/user_docs/try-out-on-playground/try-kubeblocks-on-your-laptop),使其成为学习和实验场景的理想选择。 + +对于熟悉 Kubernetes 并希望深入了解 KubeBlocks 与 Kubernetes 集成细节的高级用户,KB 提供了声明式 API。您可以使用 kubectl 像管理 Kubernetes 原生资源一样管理数据库,为观察和操作数据库集群提供了更全面和底层的视角。 + +### 为元数据创建 PostgreSQL 集群 + +Dify 将其元数据存储在 PostgreSQL 中。这里我们首先创建一个 PostgreSQL 复制集群。该集群由两个副本组成,每个副本配置为 1C2G 和 20 GiB 存储。 + +```bash +# kbcli +kbcli cluster create postgresql postgresql --cpu=1 --memory=2 --storage=20 --mode=replication --version=postgresql-14.8.0 + +# or kubectl +kubectl apply -f https://kubeblocks.io/yamls/dify/postgresql.yaml +``` + +### 创建用于向量的 Qdrant 集群 + +Dify 使用向量数据库来存储用户上传的文档及其对应的特征向量。KubeBlocks 支持多种向量数据库,包括专用向量数据库如 Qdrant、Milvus 和 Weaviate,以及具备向量能力的传统数据库如带有 pgvector 插件的 PostgreSQL。本文演示如何通过 Qdrant 创建向量数据库。我们将创建一个具有三个副本的 Qdrant 集群,每个副本配置为 1核2G 内存和 20 GiB 存储空间。 + +```bash +# kbcli +kbcli cluster create qdrant --cluster-definition=qdrant --cluster-version=qdrant-1.8.4 --set cpu=1,memory=2Gi,storage=20Gi,replicas=3 + +# or kubectl +kubectl apply -f https://kubeblocks.io/yamls/dify/qdrant.yaml +``` + +### 为存储创建 Redis 集群 + +Dify 使用 Redis 存储用户会话历史。这里我们创建一个高可用性的 Redis 集群,包含 Redis 和 Sentinel 组件。 + +- Redis 组件配置为 2 个副本(一个主节点,一个从节点),每个副本分配 2 个 CPU、1 GiB 内存和 20 GiB 存储空间。 +- Sentinel 组件配置为 3 个副本,每个副本分配 0.2 个 CPU、0.2 GiB 内存和 20 GiB 存储空间。 + +```bash +# kbcli +kbcli cluster create redis redis --version=redis-7.0.6 --mode=replication --cpu=2 --memory=1 --storage=20 --replicas=2 --sentinel.cpu=0.2 --sentinel.memory=0.2 --sentinel.replicas=3 --sentinel.storage=20 + +# or kubectl +kubectl apply -f https://kubeblocks.io/yamls/dify/redis.yaml +``` + +### 部署 Dify + +在部署 Dify 之前,请先运行 `kubectl get cluster` 并等待 PostgreSQL、Redis 和 Qdrant 集群全部处于 "Running" 状态。 + +```bash +kubectl get cluster +> +NAME CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS AGE +postgresql postgresql postgresql-14.8.0 Delete Running 20m +qdrant qdrant qdrant-1.8.4 Delete Running 11m +redis redis redis-7.0.6 Delete Running 73s +``` + +我们还需要手动初始化 Dify 元数据。 + +1. 连接到 PostgreSQL 集群。 + +```bash + kbcli cluster connect postgresql + ``` + + + +```bash + create database dify; + ``` + +3. 获取默认 Redis 账户的密码,稍后将其配置到 Dify 中。 + +```bash + # Get the Redis password + kubectl get secret redis-redis-account-default -o jsonpath="{.data.password}" |base64 -d + ``` + +:::注意 + +在本博客中,我们使用 Kubernetes 的流行包管理器 Helm 来部署 Dify。 + +::: + +4. 添加相关的 Helm 仓库。 + +```bash + helm repo add douban https://douban.github.io/charts/ + ``` + +5. 将以下内容保存到 `values.yaml` 文件中,该文件设置了相关数据库的访问信息,包括连接地址、用户名和密码。 + + - `global.host` 是访问 Dify 的域名。如果该域名无法公开访问,后续需要配置本地静态解析。为了方便起见,请禁用 TLS。 + - KubeBlocks 保存了 PostgreSQL 集群的账号和密码,`values.yaml` 文件直接通过 `env` 引用这些信息,因此无需修改。 + - Dify 使用 Redis 作为 Celery 的消息代理。由于代理连接地址是动态构建的,无法通过引用 Secret 来配置。因此,请先运行 `kubectl get secret redis-redis-account-default -o jsonpath="{.data.password}" | base64 -d` 命令获取 Redis 的默认账号和密码,并将 `values.yaml` 文件中的 `${REDIS_PASSWORD}` 替换为从该命令获取的密码。 + - 将 `ingress.className` 替换为 Kubernetes 集群实际的 Ingress 类别。可以通过运行 `kubectl get ingressclass` 查看集群中可用的 Ingress 类别。 + - `env.SECRET_KEY` 用于登录认证和加密敏感信息。出于安全考虑,请替换为足够强度的密钥,可以使用 `openssl rand -base64 42` 生成。 + +```yaml + global: + host: "mydify.example.com" + enableTLS: false + + image: + # Set to the latest version of dify + # Check the version here: https://github.com/langgenius/dify/releases + # If not set, Using the default value in Chart.yaml + tag: "0.6.11" + extraBackendEnvs: + - name: CELERY_BROKER_URL + value: redis://default:${REDIS_PASSWORD}@redis-redis-redis:6379/1 + - name: REDIS_USERNAME + value: default + - name: REDIS_PASSWORD + value: ${REDIS_PASSWORD} + - name: REDIS_HOST + value: redis-redis-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "0" + - name: DB_USERNAME + valueFrom: + secretKeyRef: + name: postgresql-conn-credential + key: username + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgresql-conn-credential + key: password + - name: DB_HOST + value: postgresql-postgresql + - name: DB_PORT + value: "5432" + - name: DB_DATABASE + value: dify + - name: VECTOR_STORE + value: "qdrant" + - name: QDRANT_URL + value: "http://qdrant-qdrant:6333" + - name: SECRET_KEY + value: "PleaseReplaceThisToYourSecretOrUse" + + ingress: + enabled: true + className: "nginx" + + minio: + embedded: true + + redis: + embedded: false + + postgresql: + embedded: false + + api: + envs: + - name: "MIGRATION_ENABLED" + value: "true" + ``` + +6. 部署 Dify。 + +```bash + helm upgrade -i dify douban/dify -f values.yaml + ``` + +7. 查看 Pod 状态,等待所有与 Dify 相关的 Pod 都处于 "Running" 状态。 + +```bash + kubectl get pods -l app.kubernetes.io/name=dify + > + NAME READY STATUS RESTARTS AGE + dify-worker-5f5f99b7b7-p6qk8 1/1 Running 0 97s + dify-sandbox-7bf987566c-2fj6w 1/1 Running 0 97s + dify-frontend-c9df5ddb4-4v8jp 1/1 Running 0 97s + dify-api-7c98b9847c-4tgjx 1/1 Running 0 97s + ``` + +### 访问 Dify + +1. 运行以下命令确认 Dify Ingress 的访问地址,因为 Dify 通过 Ingress 对外暴露服务。 + +```bash + kubectl get ingress -l app.kubernetes.io/name=dify + > + NAME CLASS HOSTS ADDRESS PORTS AGE + dify nginx mydify.example.com 10.43.65.209 80 2m48s + ``` + +如果 Ingress 域名无法公开解析(例如自定义私有域名),您需要在访问 Dify 的客户端上配置域名 `mydify.example.com` 的静态解析。 + + - 将 10.43.65.209 替换为您环境中 Dify Ingress 的实际 IP 地址。 + +(严格保持原文格式、间距和换行,包括代码块前的缩进和空行) + +```bash + sudo echo '10.43.65.209 mydify.example.com' >> /etc/hosts + ``` + +2. 在浏览器中输入 `http://mydify.example.com` 打开 Dify 控制台。 + + ![Figure 1](/img/blogs/blog-dify-1.png) + +3. 注册管理员账号。登录后即可看到 Dify 操作界面。 + + ![Figure 2](/img/blogs/blog-dify-2.png) + +## 基础设施运维 + +随着业务扩展,您需要对数据库执行各种运维操作。KubeBlocks 为升级、扩缩容和重启等日常运维(Day-2 Operations)提供全面支持。以下是一些示例。 + +### 扩缩容 + +当知识库中用户上传的文件数量增加时,现有三个副本的 Qdrant 集群可能无法处理负载。此时有两种选择: + +- **垂直扩展**集群,为每个 Qdrant 节点增加更多 CPU 和内存。 +- **水平扩展**集群,添加更多 Qdrant Pod 以将负载分布到更多 Pod 上。 + +KubeBlocks 支持这两种扩缩容方式。 + +以垂直扩展为例,您可以将 CPU 增加到 8 核,内存增加到 32 GiB。 + +```bash +kbcli cluster vscale qdrant --components qdrant --cpu 8 --memory 32Gi +``` + +对于水平扩展,您可以添加更多 Pod。 + +(严格保持原文的换行和间距) + +```bash +kbcli cluster hscale qdrant --replicas 5 +``` + +如果业务需求下降,例如用户删除了大量文档,您也可以同时进行垂直和水平缩容。 + +### 存储卷扩容 + +如果注册用户数量增加且 PostgreSQL 元数据数据库存储不足,您可以扩展存储空间,例如扩容至 50 GiB。 + +```bash +kbcli cluster volume-expand postgresql --components postgresql --storage=50Gi --volume-claim-templates=data +``` + +### 重启 + +如果 PostgreSQL 集群出现异常,可以尝试重启集群来解决问题。 + +```bash +kbcli cluster restart postgresql +``` + + + + +## 概述 + +本篇博客展示了如何利用 KubeBlocks 和 Dify 构建生产级 AIGC 应用。KubeBlocks 在数据基础设施管理方面的强大能力与 Dify 在 AIGC 应用开发方面的功能相结合,显著提升了 AIGC 应用的开发和部署效率,其高度灵活的架构提供了强大的可扩展性,并降低了生产环境中的运维复杂度。 \ No newline at end of file diff --git a/blogs/zh/does-containerization-affect-the-performance-of-databases.mdx b/blogs/zh/does-containerization-affect-the-performance-of-databases.mdx new file mode 100644 index 00000000..f03f24b3 --- /dev/null +++ b/blogs/zh/does-containerization-affect-the-performance-of-databases.mdx @@ -0,0 +1,502 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/111858489?v=4 + name: dullboy + url: https://github.com/nayutah +date: 2024-01-26 +description: 容器化会影响数据库的性能吗? +image: /img/blogs/thumbnails/blog-containerization.png +slug: Does-containerization-affect-the-performance-of-databases +tags: +- containerization +- database performance +- Kubernetes +title: 容器化会影响数据库的性能吗? +--- +# 容器化会影响数据库性能吗? + +数据库容器化的浪潮正在兴起,如图1所示。数据库和分析技术已成为技术领域的重要组成部分。然而,一个普遍的困境依然存在:容器化是否会影响数据库性能?如果是,哪些因素会起作用?我们该如何应对容器化带来的性能和稳定性问题? + +Usage of containerized workloads by category<sup>[4]</sup> + +图 1. 按类别划分的容器化工作负载使用情况[4] + +## 容器化的优势与技术原理 + +容器化是一种将应用程序及其所有必要组件打包成独立、可移植且不可变的运行时环境的智能方式。可以将其视为简化应用打包、部署和管理过程的技术魔法。这种魔法由Docker或Containerd等容器运行时引擎实现,这些引擎负责创建、部署和监管容器。 + +Kubernetes(K8s)是容器编排领域的颠覆者。这个开源平台作为管理容器的中央枢纽,提供了可扩展的基础架构,能自动化多种操作。作为主流的容器编排工具,它处理从无缝部署到高效扩缩容、全面管理和智能调度的所有环节。 + +### 容器化的优势 + +1. 灵活性与可移植性 + + 数据库的部署和迁移变得更简单可靠。通过容器化,可以以基础设施即代码(IaC)的形式,通过声明式API指定数据库的运行时环境和版本。 + +2. 资源隔离与可扩展性 + + 借助容器运行时引擎,容器化确保每个数据库实例在具有专用资源的独立环境中运行。这种隔离最大限度地减少了工作负载间的干扰,实现了计算资源的高效利用,同时提升了性能和可靠性。 + +3. 更友好的调度策略 + + 容器化精细的资源管理为智能调度策略铺平了道路。它支持针对不同场景定制部署策略,例如混合离线和在线工作负载以平衡资源使用,或组合多种数据库工作负载以提高整体效率。此外,提高部署密度可以显著降低计算成本。 + +### 容器化的技术原理与分类 + +#### 虚拟化 + +谈到容器,虚拟化不可忽视。虚拟化是一种抽象和隔离计算资源的技术,允许多个虚拟实例在同一物理服务器上同时运行。这是通过在硬件和操作系统之间使用称为Hypervisor的软件层实现的。该层将物理服务器划分为多个虚拟机,每个虚拟机都有自己独立的操作系统和资源。 + +容器化则是一种更轻量级的虚拟化技术。它利用操作系统级虚拟化创建隔离空间,使应用程序及其所需环境能够运行。容器化常与虚拟化结合使用,以满足不同计算场景中对隔离的各种需求。 + +#### 虚拟化+容器技术分类 + +根据资源隔离和虚拟化方式,主流的虚拟化+容器技术可分为以下几类: + +请按照要求翻译以下内容,严格保持所有格式、间距和换行: + +1. 标准容器(Standard Containers)遵循开放容器倡议(OCI)标准,如Docker/Containerd,使用runC作为运行时,是当前K8s工作负载的首选方案。 +2. 用户态内核容器(User-Space Kernel Containers)如gVisor同样符合OCI标准,采用runsc作为运行时,以更好的隔离性和安全性著称,但会牺牲部分性能,适合低要求工作负载。 +3. 微内核容器(Microkernel Containers)采用Firecracker和Kata-Container等虚拟机管理程序,也符合OCI规范,使用runC或runv作为运行时,在安全性、隔离性和性能之间取得平衡,介于标准容器和用户态内核容器之间。 +4. 虚拟机(Virtual Machines)包括KVM、Xen、VMWare等,构成主流云服务商服务器的虚拟化基础层,通常作为K8s中的节点运行,比容器更接近底层。 + +各类轻量级虚拟化方法的系统架构对比 + +图2. 各类轻量级虚拟化方法的系统架构对比。橙色代表内核空间,绿色代表用户空间。[2] + + +#### OCI合规容器化技术探究 + +以下段落分析了几种主流的符合OCI规范的容器化技术。 + +1. RunC + + RunC是符合OCI规范的容器运行时,作为Docker/Containerd核心容器引擎的关键组件。它利用Linux的Namespace和Cgroup功能为容器创建安全隔离环境。 + + 在容器运行时,RunC通过Namespaces隔离容器的进程、网络、文件系统和进程间通信(IPC),同时使用Cgroups限制容器进程的资源消耗。这种隔离方式确保容器内的应用在相对独立的环境中运行,与主机系统及其他容器相互隔离。 + + 虽然RunC的隔离方式会引入一定开销,但这种开销仅限于namespace映射、约束检查及部分统计流程,理论上微乎其微。此外,当系统调用涉及长耗时操作时,统计开销可忽略不计。总体而言,基于Namespace+Cgroup的隔离方式对CPU、内存和I/O性能影响极小。 + + +RunC架构图 + +图3. RunC架构图 + +2. Kata Containers + + 想象一个安全的气泡,每个应用都在自己的空间运行,与外界隔绝——这正是Kata Containers通过虚拟机技术实现的场景。基于Intel的Clear Containers创新,Kata Containers将虚拟机监控器的轻量级管控与容器运行时的敏捷性相结合。 + + 每个容器都拥有独立的虚拟机,包含专属内核和用户空间,确保应用被隔离在各自的安全区间。这种方式增强了隔离性,使容器化应用难以窥探主机资源。但需要权衡的是:与传统容器运行时相比,虚拟机的启动和管理额外步骤可能导致系统调用和I/O操作略有延迟。 + +Kata Containers架构图 + + +图4. Kata Containers架构图 + + +3. gVisor + +gVisor 是一种前沿的容器运行时,它利用用户态虚拟化技术来提供增强的安全性和隔离性。gVisor 的核心是一个独特的"沙箱化内核",运行在容器内部,模拟并管理操作系统的接口。 + + 这种巧妙的设计确保容器化应用与主机内核保持隔离,防止它们直接干扰或访问主机资源。虽然这种方法显著提升了安全性,但需要注意的是,与标准容器运行时相比,它可能会导致系统调用和 I/O 性能开销的增加。 + +Architecture of gVisor + + +图 5. gVisor 架构 + + +4. Firecracker + + Firecracker 是专为无服务器计算和轻量级工作负载定制的虚拟化解决方案。它采用微虚拟机(micro-VM)概念,将每个容器视为独立的虚拟机。 + + Firecracker 核心使用 KVM(基于内核的虚拟机)实现虚拟化。每个容器运行在自己的虚拟机中,拥有独立的内核和根文件系统,并通过单独的虚拟设备模拟器与主机系统交互。这种方法确保了更高层级的安全性和隔离性。然而,与传统容器运行时相比,Firecracker 可能会导致更高的系统调用和 I/O 操作开销。 + +Architecture of Firecracker + + +图 6. Firecracker 架构 + + +#### 基础原理对比 + +表 1. 容器化中虚拟化与隔离实现方式概览 + +| | Containerd-RunC | Kata-容器 |gVisor |FireCracker-Containerd | +|:----------------|:---------| :---------| :---------| :---------| +| 隔离机制 | 命名空间 + Cgroup| 客户机内核| 沙箱化内核| 微虚拟机| +| OCI 运行时| RunC| Clear Container + runv| runsc| RunC| +| 虚拟化方式| 命名空间| QEMU/Cloud Hypervisor+KVM| 基于规则的执行| rust-VMM + KVM| +| vCPU| Cgroup| Cgroup| Cgroup| Cgroup| +| 内存| Cgroup| Cgroup| Cgroup| Cgroup| +| 系统调用| 主机| 客户机 + 主机| Sentry| 客户机 + 主机| +| 磁盘 I/O| 主机| virtio| Gofer| virtio| +| 网络 I/O| 主机 + veth| tc + veth| netstack| tap + virtio-net + +此外,已有分析探讨了 Containerd 和 CRI-O 等容器引擎在实现方式上的差异[3][5]。不过这些比较超出了本文讨论范围,留给感兴趣的读者自行探索。 + +## Kubernetes 与容器化对数据库的影响 + +如前所述,容器化为数据库带来诸多优势。它简化了数据库的部署和管理流程,提供了统一且隔离的运行环境。这项技术使得数据库能够在多样且复杂的环境中轻松部署和灵活迁移,同时也为版本控制提供了更标准化、用户友好的方式。此外,借助 Kubernetes 的支持,数据库内部的各类角色与组件能够实现无缝、动态的整合。 + +### 容器化为数据库带来的挑战 + +然而,Kubernetes 与容器化的结合也为数据库带来了诸多挑战,这些挑战源于数据库运行方式的本质特性。与常见的无状态应用不同,数据库具有以下特征: + +1. **数据库是由多角色构成的复杂应用** + + 一个功能完备的数据库由多个角色组成,每个角色承担特定功能。例如在 MySQL 主从架构中,存在两个 MySQL 容器:一个作为主节点(Primary),另一个作为从节点(Secondary)。主节点提供读写能力,而从节点为只读状态并作为热备节点。这些角色具有差异性,准确表达其不对等关系至关重要。此外,在创建、重启、删除、备份和高可用性维护等操作中,如何正确管理这些角色也极为关键。其核心在于如何管理跨容器的数据依赖关系,而当前容器和 Kubernetes 尚未提供完善的抽象方案来解决此类相互依赖性问题。 + +2. **数据库要求强大的数据持久性与一致性** + + 它们对存储有严苛要求,仅靠容器化技术无法满足,生产级负载还需要依赖容器存储接口(CSI)和持久卷(PersistentVolume)等附加组件。存储介质的选择也直接决定了数据库可支持的操作范围。例如云盘具备高持久性、快照备份能力,以及跨计算节点灵活挂载/卸载的特性,这对数据库备份、恢复和高可用保障非常有利;而本地盘在这些方面则存在局限。例如当节点故障时,本地盘上的数据副本可能永久丢失,这对维持高可用性构成重大挑战,且备份方案也会受限。不同的存储解决方案意味着不同级别的持久性、不同的数据库功能集和架构设计。 + +3. **数据库是追求极致性能的资源消耗大户** + + 数据库存在多样化的性能需求,可分为 CPU、内存、网络和存储等类别。例如在处理海量数据分析时,ClickHouse 和 Greenplum 这类产品对 CPU 和存储 I/O 的需求极高;而 Redis 和 Memcached 等数据库则更依赖内存和网络 I/O;还有 MySQL 和 PostgreSQL 这类经典的传统 OLTP 数据库,同样对 CPU 和存储 I/O 有强需求。此外,即使在单一数据库内部,不同查询类型对资源的需求也可能存在巨大差异。 + +4. **数据库具备特有的安全需求** + + 数据库中存储的数据通常具有高价值和高机密性,因此需要严格的环境隔离、数据访问控制和审计机制。 + +总结来说,在容器与Kubernetes结合的平台上运行数据库时,数据库本身和容器+K8s系统都面临着一系列严峻挑战。数据库需要足够灵活以应对容器的短暂生命周期、浮动IP、底层基础设施的持续升级,以及不同环境下的性能复杂性。与此同时,容器化和K8s必须解决诸如引入角色、编排容器与全局一致数据状态的底层需求、满足高性能期望,以及符合严格安全措施等问题。 + +鉴于前文提到的1、2、4点,KubeBlocks已制定了一套全面的解决方案。如需了解更多详情,可访问http://kubeblocks.io。现在回到讨论的核心,本文后续部分将更详细探讨容器化如何影响数据库性能。 + +### Kubernetes与容器化如何影响数据库性能 + +如前所述,数据库性能依赖于CPU、内存、存储和网络等关键要素。本节将深入探讨K8s和容器化可能如何从这些方面影响数据库性能。值得一提的是,虽然K8s具有某些可能影响性能的调度和亲和性策略,但这些策略与容器化并无本质关联,因此不在本文讨论范围内。 + +接下来的部分将从上述视角展示容器化如何影响应用程序(尤其是数据库)的性能。这些章节汇集了大量行业研究论文和最新测试结果,以剖析数据背后的原因和差异。我们还进行了额外测试以填补空白,重点关注先前被忽视的特定领域,例如K8s的容器网络接口(CNI)如何影响网络效率。 + +#### CPU + +测试服务器配置:四核超线程Intel Core i5-7500处理器,8GB内存,1TB硬盘,Ubuntu 18.04 LTS系统。 + +测试案例:本实验数据和场景基于文献[1]的研究。案例1中,使用sysbench以四个并发线程执行质数计算,性能以每秒处理的事件数衡量。该测试案例旨在模拟纯计算型工作负载,大部分操作发生在用户空间,系统调用可忽略不计。因此理论上预期不同容器技术的性能表现应相近。 + +结果:不同容器间的CPU性能差异可忽略不计,与裸金属系统相比仅有约4%的性能下降。 + +分析:观察到的4%性能下降很可能源于Cgroup施加的CPU限制。当Sysbench的并发进程数与超线程数相同时,极易触发Cgroup限流。此时进程因限流必须等待一个CFS周期(默认为100ms)。由于Cgroup基于jiffies而非秒级分配资源,配置4个vCPU的容器几乎不可能达到400%的利用率。预期会出现一定性能损失,且可通过Cgroup内的cpu.stat文件追踪此类限流频率。 + +CPU性能(Sysbench基准测试) + +图7. CPU性能(Sysbench基准测试)(王星宇 2022) + +案例:使用Davi1d进行视频解码,视频文件大小为数百兆字节。该测试涉及大量系统调用,因为需要从磁盘读取数据。这些系统调用会在一定程度上影响应用程序的性能。 + +结果:runC和Kata-QEMU的性能下降约4%,这与质数测试中观察到的结果一致。gVisor-ptrace表现出更显著的性能下降(13%),而gVisor-KVM提供的性能与裸机设置相当。 + +分析:视频解码涉及顺序读取,Linux对顺序读取有预读优化。因此,大多数I/O操作直接从页缓存读取数据。RunC主要受Cgroup限制约束,而其他三种解决方案更多受系统调用执行方式的影响。论文未进一步分析gVisor-ptrace和gVisor-KVM之间的差异。gVisor使用名为gofer的组件进行文件系统操作,该组件有其独特的缓存方法。进一步分析可能需要关注gVisor的系统调用过程及其缓存机制。 + +CPU性能(Dav1d基准测试) + +图8. CPU性能(Dav1d基准测试)(Xingyu Wang 2022) + +#### 内存 + +案例:RAMSpeed,包含4个子场景(复制、缩放、加法、三元组)。此处未详细说明底层原理的具体细节。 + +结果:各种解决方案的性能相似。 + +分析:一旦内存分配并处理了页错误,理论上容器化不应显著影响内存访问。真正影响内存性能的因素是mmap和brk等系统调用。但在本测试中,此类系统调用的比例极小。 + +内存访问性能 + + +图9. 内存访问性能(Xingyu Wang 2022) + + +案例:Redis-Benchmark,包含子场景(GET、SET、LPUSH、LPOP、SADD)。 + +结果:K8s+容器化对runC和Kata-QEMU影响极小,而gVisor性能显著下降。gVisor-ptrace性能下降约95%,gVisor-KVM性能下降约56%。 + +分析:Redis运行单线程应用程序,网络I/O负载较重。所有网络I/O操作均通过系统调用执行,这严重影响了gVisor的性能。原论文错误地将性能损失主要归因于内存分配。然而,Redis内部使用用户空间内存管理工具jemalloc。Jemalloc利用mmap系统调用从操作系统请求大块内存,然后在本地分配较小块。由于jemalloc成熟的内存分配和缓存机制,mmap系统调用的频率极低。当Redis满载时,网络I/O的CPU系统使用率约为70%。因此,gVisor在此场景下性能问题的主要原因是拦截系统调用的开销及其内部网络栈(称为netstack)。此评估还表明,gVisor不适合网络I/O需求密集的环境。 + +不同容器运行时的Redis性能 + +图10. 不同容器运行时的Redis性能(Xingyu Wang 2022) + +#### 磁盘I/O + +案例:IOZone读写16GB文件。 + +结果:K8s + 容器化对顺序读写性能影响可忽略不计。但Kata-QEMU表现出显著性能下降,降幅在12-16%之间。 + +分析:大块数据的读写本质上是顺序操作。如前所述,顺序读取受益于操作系统预取数据的优化能力,且大部分顺序读写任务由页缓存处理。原研究检测了Kata-QEMU的影响,发现virtio-9p文件系统是根源。virtio-9p系统最初为网络应用设计,缺乏针对虚拟化环境的专门优化。 + +Disk read and write performance + +图11. 磁盘读写性能(王星宇 2022) + +案例:在tmpfs(共享内存中的临时文件存储)上进行测试,以隔离并评估系统调用和内存拷贝对性能的影响。 + +结果:除gVisor外,其他解决方案性能相近。 + +分析:gVisor的系统调用开销更高,导致与redis-benchmark场景中观察到的类似性能下降。 + +Disk read and write performance (tmpfs overlay) + +图12. 磁盘读写性能(tmpfs覆盖层)(王星宇 2022) + +案例:单线程SQLite数据插入基准测试,执行时间越短越好。 + +结果:RunC表现与裸金属相当,Kata执行时间增加17%,gVisor执行时间增加125%。 + +分析:数据库工作负载复杂,涉及CPU、内存、网络和磁盘I/O的组合,且频繁进行系统调用。在此类复杂环境中,gVisor可能并非最优选择。 + +Database record insertion performance + +图13. 数据库记录插入性能(王星宇 2022) + +#### 网络I/O + +案例:TCP流吞吐量测试,吞吐量越高越好。 + +结果:gVisor网络性能较差,与redis-benchmark案例观察到的现象类似。其他解决方案受影响极小。 + +分析:gVisor受限于其系统调用机制和netstack实现,导致整体吞吐量较低。 + +TCP_STREAM network performance + +图14. TCP_STREAM网络性能(王星宇 2022) + +案例:本案例评估TCP_RR、TCP_CRR和UDP_RR。RR代表请求与响应,TCP连接仅建立一次并复用后续请求。CRR表示每次测试创建新TCP连接。TCP_RR对应长连接场景,TCP_CRR对应短连接场景。 + +结果:RunC表现与裸金属相当;Kata有轻微损耗;gVisor仍存在大幅性能下降,其底层原理与前述相同。 + +TCP_RR, TCP_CRR and UDP_RR performance + +图15. TCP_RR、TCP_CRR和UDP_RR性能(王星宇 2022) + +#### CNI网络 + +容器常与K8s配合使用,基于K8s的容器编排已成为事实标准。在K8s环境中,网络通常通过CNI与容器技术组合实现。市面上有多种广受欢迎的CNI方案,例如Calico、Flannel、Cilium等。最新版本中,Calico和Cilium都大量运用了eBPF(扩展版伯克利包过滤器)技术。尽管具体实现存在差异,这两种CNI在多数测试场景中表现出相近的性能。关于性能细节,请参阅[CNI基准测试:理解Cilium网络性能](https://cilium.io/blog/2021/05/11/cni-benchmark/)[6]。 + +以下测试通过对比Cilium eBPF传统主机路由模式与Cilium eBPF模式,来考察CNI对数据库性能的具体影响。 + +传统主机路由: + +在Cilium eBPF的传统主机路由模式中,iptables在数据包过滤和定向方面起着关键作用。它仍是设置和控制网络流量路由规则的重要工具。在此框架下,Cilium通过iptables规则将数据流导向自身的代理,随后由代理接管并进行流量处理和转发。 + +该模式下,Cilium利用iptables的NAT(网络地址转换)功能来实现地址转换和服务负载均衡。 + +基于eBPF的主机路由: + +在新的eBPF路由模式中,Cilium不再依赖iptables,而是利用Linux内核的扩展伯克利包过滤器(eBPF)进行数据包过滤和转发。eBPF主机路由允许绕过主机命名空间内所有iptables及上层协议栈开销,同时减少穿越虚拟网络接口时的部分上下文切换开销。网络数据包从面向网络的设备早期捕获后,直接递送至K8s Pod的网络命名空间。对于出站流量,数据包虽仍经过veth pair,但会被eBPF快速捕获并直接发往外部网络接口。eBPF直接查询路由表,确保该优化完全透明,并能与系统上运行的其他路由服务无缝集成。 + +传统与eBPF容器网络对比 + + +图16. 传统与eBPF容器网络对比[6] + + +测试环境: + +Kubernetes: v1.25.6 +CNI: cilium:v1.12.14 + +节点CPU: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz +内存: 128G + +Redis: 7.0.6, 2 vCPU, 最大内存: 2Gi + +测试用例: + +表2. K8s中不同服务路由路径概览 + +| | 网络类型 | 源 |目标 | +|:----------------|:---------| :---------| :---------| +| NodeLocal2HostPod | 主机网络 | 节点 | 本地Pod | +| NodeLocal | 以太网 | 节点 | 本地进程 | +| PodLocal2Pod| Pod| Pod |本地Pod| +|Node2HostPod |主机网络| 节点 |远端Pod| +|NodeLocal2NodePort |NodePort |节点 |本地NodePort| +|Node2Node |以太网| 节点| 远端进程| +|NodeLocal2Pod| Pod |节点 |本地Pod| +|Pod2Pod |Pod| Pod |远端Pod| +|Node2NodePort| NodePort |节点| 远端NodePort| +|Pod2NodePort |Pod + NodePort |Pod |远端NodePort| +|Node2Pod| Pod| 节点 |远端Pod| + +测试结果: + +传统iptables主机路由模式: + +iptables传统主机路由下的Redis基准测试 + +图17. iptables传统主机路由下的Redis基准测试 + +Comparison between Host network and Pod network under legacy host-routing + + +图 18. 传统主机路由模式下主机网络与Pod网络的性能对比 + + +基于eBPF的主机路由: + +Redis benchmark under eBPF-based host-routing + + +图 19. 基于eBPF主机路由的Redis基准测试 + + +Comparison between Host network and Pod network under eBPF-based host-routing + + +图 20. 基于eBPF主机路由模式下主机网络与Pod网络的性能对比 + + +分析:传统主机路由方式会拖累网络效率,导致Pod网络与主机网络之间存在高达40%的性能差距。而采用eBPF实现主机路由后,两者性能表现趋于接近。无论路由规则多么复杂,这种改进都能有效弥合两类网络之间的性能鸿沟。这一技术进步具有颠覆性意义,尤其对于Redis这类重度依赖网络性能的应用而言。 + +#### 总结 + +在CPU、内存和磁盘I/O方面,runC的表现最接近裸金属。Kata Containers性能略逊于runC,但提供了更好的安全性和隔离性。由于系统调用实现方式的差异,gVisor性能表现最差——这可能与其侧重安全特性有关,不过新版gVisor正在持续优化性能表现。 + +网络性能需要特别关注,因为它受Kubernetes CNI(容器网络接口)影响。在将Cilium eBPF与runC结合的测试中发现,容器网络性能可以达到与主机网络相当的水平。此外,Cilium虽然支持Kata-containers,但与其他容器技术的兼容性存在一定限制。 + +总体而言,runC在多方面都能提供媲美传统裸金属的性能表现,因此成为运行Kubernetes工作负载的首选方案。Kata Containers虽然在速度上稍逊于runC,但通过增强隔离性在效率与安全之间取得了良好平衡。而gVisor虽然能实现更灵活的隔离,却以性能下降为代价,更适合安全性优先于速度的场景。Firecracker通常适用于与Kata Containers相似的场景。 + +因此,对于数据库工作负载,我们推荐优先考虑runC和Kata-containers方案。 + +### 常见数据库性能问题 + +数据库性能问题困扰着众多用户。本节将深入分析导致此类问题的典型场景,剖析数据库及其支撑基础设施的复杂工作机制,并重点介绍我们团队正在着力改进的领域。 + +#### 磁盘I/O挂起 + +设想这样一种场景:MySQL 正忙于将临时文件写入页缓存(page cache),这涉及到对Ext4文件系统元数据的频繁更新。在此类高强度操作期间,CPU和I/O都可能处于高负载状态。MySQL进程可能会频繁遭遇CPU节流(throttling),导致脏页(dirty pages)不断堆积。最终系统会尝试通过刷写这些脏页来清理缓存,但这可能使硬件通道被脏I/O操作完全占满。如果恰巧此时持有Ext4日志锁(Journal Lock)的进程被CPU暂停,就会导致使用同一文件系统的其他进程陷入冻结状态。若此类暂停频繁发生且持续时间较长,就可能引发IO挂起(IO hang)。该问题在共享本地磁盘的环境中尤为突出,例如裸金属系统或使用hostpath CSI存储的场景。目前公认的解决方案是通过Cgroup V2提供的BufferedIO流控功能来调节写入流量。 + +如图所示,瓶颈往往并非源于单一问题,而是由多个相互关联的要素复杂交织形成。就磁盘I/O挂起而言,涉及多个组件:页缓存与内存和磁盘I/O交互;CPU节流与CPU调度机制相关;而Ext4日志系统则与锁机制紧密相连。正是这些因素相互影响的复杂网络,最终导致了全面的IO挂起。 + +值得一提的是,许多数据库厂商推荐使用XFS作为首选文件系统来优化I/O操作。若想深入了解磁盘I/O对数据库的深远影响,可参阅[《Kubernetes上优化PG性能的测试报告》](./../blog/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.md)[7]。 + +#### 内存不足(OOM) + +采用Cgroup进行内存隔离后,操作系统的内存管理方式与传统裸金属环境截然不同。这种变化使得系统在内存分配和回收方面面临更多挑战和更高要求。 + +假设某个Pod被配置为请求和限制均为1GB内存。在这1GB物理内存范围内,所有页面的分配与回收都必须完成。考虑到数据库本身就是内存消耗大户,即便启动一个空数据库就可能占用数百MB内存,这导致实际应用可用的内存空间极为有限。如果再加入监控、日志采集等通常以边车(sidecar)形式运行的附加任务,数据库很快就会面临极高的内存耗尽风险。 + +但真正的恐怖之处并不在于内存不足(OOM)错误本身,而在于OOM发生前那段漫长而痛苦的性能衰退过程。对于数据库和同节点上的其他Pod而言,这无异于无尽的噩梦。在系统最终因OOM崩溃前,页面回收机制会陷入低效的慢路径(slow path),徒劳地反复尝试回收足够内存,直到达到设定阈值才放弃。在此期间,连接到数据库的客户端可能会遭遇大量事务超时和连接中断问题。 + +被称为"页面回收慢路径"的过程不仅会干扰单个Cgroup命名空间,还会对整个操作系统产生更广泛的影响。这是因为操作系统在主机级别共享许多数据结构。以Pod的内存为例:理论上它可能属于特定的Cgroup命名空间,但实际上主机内核通过依赖全局锁的统一伙伴系统(Buddy System)来管理它。这意味着如果一个Pod面临严重的内存压力并触发页面回收慢路径,可能会无意中拖累其他运行良好的Pod的内存管理子系统。极端情况下,这可能导致整个节点上的数据库性能下降,而原因仅仅是一个Pod的内存限制设置过于严格。 + +要彻底解决这个问题,需要更精细的隔离策略,例如采用微内核或虚拟机技术,为不同Pod分配独立的内存管理空间。此外,另一种方法是在OOM不可避免时,主动监控和评估数据库内的各项性能指标,从而确保采取"快速失败"策略。 + +#### 连接数过多 + +OLTP数据库通常具有专门预分配的缓冲池,其内存分配相对稳定。而容易波动的组件包括连接结构体、中间计算的工作内存、页表、页缓存等。 + +对于PostgreSQL和Oracle等多进程模型数据库,每个数据库连接本质上都是一个独立进程。假设您有一个大型缓冲池(内存中的数据存储区),当创建新进程时,系统需要建立映射来跟踪所有这些数据,而这个映射表并不小。缓冲池中每4KB数据需要8字节的映射条目,因此页表与缓冲池的比例为8/4K=1/512。如果有512个连接,这些条目所需的内存就与缓冲池本身一样大!这会严重限制数据库的扩展能力,特别是在需要同时处理大量用户时,导致许多用户可能注意不到的巨大隐性内存成本。 + +通常有两种策略可以解决这个问题。第一种策略是在数据库前部署代理层。该层拦截大量传入连接,但仅维持少量到实际数据库后端的连接。例如,如果代理保持P个到后端数据库的连接,但可以处理来自应用程序的C个连接(C >> P),这种连接复用能显著减轻数据库负载。第二种策略使用大页(Hugepages),如果大小为2M,页表与缓冲池的比例将变为1/256k(从8/2M计算得出)。这种调整几乎消除了页表开销,使多进程模型能够支持更多连接。然而,大页技术自身也存在复杂性,会给资源管理带来额外压力。因此,基于代理的方案通常被视为更优且用户友好的选择。 + +多线程处理主要有两种方式。第一种方式为每个连接分配一个线程。虽然这避免了连接数增加时复制页表的问题,但可能导致资源冲突和过多上下文切换,进而降低性能。不过,通过引入代理可以缓解这些问题。第二种方式使用线程池,由较少的线程(P)处理较多的连接(C)(C >> P),Percona MySQL等系统采用这种方式。 + +无论是代理还是线程池,都旨在通过不同的实现方式实现连接复用。此外,组合使用这些策略可以提升系统容量并降低总体负载。 + +表 3. 不同数据库进程-连接模型概览 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
连接数:进程数 页表 备注
多进程代理C:P*PC >> P
直连C:C*C
多线程线程池C:P*1C >> P
每线程C:C*1
+ +#### TCP 重传 + +网络主要从两个方面影响数据库性能。 + +一是延迟。网络延迟会影响数据传输时长,进而影响客户端的整体响应时间。随着延迟增加,在相同时间内处理相同数量的请求需要更多连接,导致内存使用量增加、上下文切换频繁和资源争用加剧,这些都会随时间推移降低性能。 + +二是带宽。网络传输质量和相关延迟很大程度上取决于单个TCP连接的可用带宽,以及网络设备和交换机端口的峰值带宽能力。这些环节中任何一处出现拥塞都可能导致操作系统内核或硬件层面的丢包,进而引发重传和乱序问题,进一步增加延迟并引发连锁性能问题。 + +除性能外,网络问题还会影响系统可用性和稳定性,例如因高延迟导致心跳超时触发故障转移,或造成主备系统间数据复制出现显著延迟。 + +#### CPU 调度等待 + +在某些基于虚拟机的容器化方案中,容器内运行的进程在宿主机内核中没有直接对应实体。对宿主机内核而言,它只能看到属于虚拟机虚拟化层的进程。如果在虚拟机内发现某个进程处于"运行中"状态,并不代表它在宿主机上实际运行。这是因为宿主机和虚拟机运行在两个独立的CPU调度系统上。只有当虚拟机内进程处于"运行中"状态,且宿主机上对应的虚拟机进程也处于活跃状态时,该进程才真正开始执行。 + +从进程被设为"运行中"到真正执行之间的间隔称为额外调度等待时间。这种延迟会影响数据库性能,在性能关键场景中,可以通过降低宿主机负载或配置虚拟机CPU亲和性等策略来减轻影响。 + +#### 锁与闩锁 + +在数据库技术中,锁(Lock)用于保护资源,而闩锁(Latch)用于保护临界区。尽管目的不同,但两者在操作系统层面依赖相同的底层机制。例如在Linux中,常用futex来实现更高层次的互斥锁和条件变量。 + +当CPU、I/O和内存等资源充足时,数据库的可扩展性通常受限于其自身的事务和锁系统。以TPC-C基准测试为例:大多数单机数据库的可扩展性上限在32核(64超线程)~64核(128超线程)之间。超过32核后,额外CPU对数据库整体性能的提升效果会逐渐减弱。 + +此问题的讨论与容器关系不大,本文不再赘述。 + +#### 各类性能瓶颈 + +表 4. 不同数据库性能瓶颈概览 + +请按照要求翻译以下内容,严格保持所有格式、间距和换行: + +| | 存储引擎 |磁盘 I/O |I/O 单元 |进程模型 |性能瓶颈 | +|:----------------|:---------| :---------| :---------|:---------|:---------| +|MySQL|InnoDB|DirectIO + BufferedIO|页|多线程|I/O 带宽 + 锁 + 连接数| +|PostgreSQL|HeapTable|BufferedIO|页|多进程|I/O 带宽 + 锁 + 连接数| +|MongoDB|WiredTiger|BufferedIO/DirectIO|页|多线程|I/O 带宽 + 锁 + 连接数| +|Redis|RDB + Aof|BufferedIO|键值对|单线程*|CPU 系统态(网络)| + +- 对于 MySQL,监控溢出临时文件的管理至关重要。这些文件通过 BufferedIO 管理,如果没有通过 Cgroup 进行适当限制,可能会导致操作系统中脏页快速累积。这将形成一个性能瓶颈:刷脏页操作几乎会耗尽存储设备的全部带宽,导致常规请求处理变慢甚至停滞——这是典型的磁盘 I/O 挂起案例。 +- PostgreSQL 采用多进程模型,因此需要重点监控连接数和页表大小。虽然 Hugepages 可以缓解页表的部分压力,但其本身也存在缺陷。使用 pgBouncer 等代理实现连接池是更好的解决方案。当启用全页写入时,PostgreSQL 对 I/O 带宽有极高需求,此时 I/O 带宽会成为瓶颈。当 I/O 和连接数都运作良好时,在高并发场景下 PostgreSQL 的内部锁机制可能成为瓶颈。更多细节可参考[《Kubernetes 上优化 PG 性能的测试报告》](./../blog/a-testing-report-for-optimizing-PG-performance-on-kubeblocks.md)[7]。 +- MongoDB 通常能提供稳定的性能表现,但容易遇到磁盘 I/O 和连接数限制的问题。WiredTiger 存储引擎在缓存与 I/O 之间的流量管理方面表现优异,即使在高 I/O 资源需求下也能最大限度避免 I/O 挂起。但需要注意,OLTP(在线事务处理)数据库的工作负载比 MongoDB 更为复杂,保持平衡更具挑战性。 +- Redis 的性能瓶颈通常出现在网络层面,因此必须密切关注应用与 Redis 服务器之间的延迟。延迟质量取决于网络连接状况。当 Redis 满负荷运行时,网络栈会消耗超过 70% 的 CPU 资源。为解决这一难题并提升网络性能,Redis 6.0 推出了新特性:网络 I/O 多线程。尽管有此升级,Redis 的核心工作线程仍保持单线程设计,在提升整体效率的同时保留了平台标志性的简洁性。 + + + +## 摘要 + +本文基于对行业研究的全面梳理,通过测试容器与网络CNI的组合填补了研究空白,深入探讨了容器化对CPU、内存、磁盘I/O和网络性能的影响机制,并提出了解决方案。测试数据分析表明,runC + cilium eBPF提供了接近裸金属性能的容器化方案。对于追求更高安全隔离性的场景,Kata-containers展现出卓越的替代价值。 + +进一步地,本文在容器化基础上对数据库常见性能瓶颈进行了理论分析,指出重负载数据库对Host Kernel的复杂依赖关系,特别关注了页表、日志锁、TCP重传和CPU调度等待等易被忽视的因素。这些问题大多并非容器化特有,而是普遍存在的共性问题。最后,本文对几款热门数据库进行了定性分析,并基于团队多年运维经验总结了常见问题,希望这些问题能持续获得关注并在架构层面得到解决。 + +数据库容器化已成为高频讨论话题。做,还是不做?这个问题萦绕在每个决策者心头。从我们的视角看,数据库容器化在性能、稳定性、有状态依赖等关键挑战正被逐个击破。只要有需求,每个挑战都会得到完美解答。 + +# 参考文献 + +[1] Wang, Xing et al. “Performance and isolation analysis of RunC, gVisor and Kata Containers runtimes.” Cluster Computing 25 (2022): 1497-1513. + +[2] Goethals, Tom et al. “A Functional and Performance Benchmark of Lightweight Virtualization Platforms for Edge Computing.” 2022 IEEE International Conference on Edge Computing and Communications (EDGE) (2022): 60-68. + +[3] Espe, Lennart et al. “Performance Evaluation of Container Runtimes.” International Conference on Cloud Computing and Services Science (2020). + +[4] 10 insights on real-world container use: https://www.datadoghq.com/container-report/. + +[5] Kube container Performance CRI-O vs containerD maybe alternatives: https://www.reddit.com/r/kubernetes/comments/x75sb4/kube_container_performance_crio_vs_containerd/. + +[6] CNI Benchmark: Understanding Cilium Network Performance: https://cilium.io/blog/2021/05/11/cni-benchmark/. + +[7] A testing report for optimizing PG performance on Kubernetes: https://kubeblocks.io/blog/A-testing-report-for-optimizing-PG-performance-on-Kubernetes. \ No newline at end of file diff --git a/blogs/zh/does-running-mysql-on-kubernetes-lead-to-significant-performance-degradation.mdx b/blogs/zh/does-running-mysql-on-kubernetes-lead-to-significant-performance-degradation.mdx new file mode 100644 index 00000000..814b8e93 --- /dev/null +++ b/blogs/zh/does-running-mysql-on-kubernetes-lead-to-significant-performance-degradation.mdx @@ -0,0 +1,184 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1814084?v=4 + name: Thomas + url: https://github.com/realzyy +date: 2023-08-18 +description: 在Kubernetes上运行MySQL会导致显著的性能下降吗? +image: /img/blogs/thumbnails/blog-mysql.png +slug: Does-running-MySQL-on-Kubernetes-lead-to-significant-performance-degradation +tags: +- benchmark +- MySQL database operator +title: 在Kubernetes上运行MySQL会导致显著的性能下降吗? +--- +# 在Kubernetes上运行MySQL会导致显著的性能下降吗? + +在Kubernetes上运行MySQL会导致显著的性能下降吗?尽管最近有人提出了这个担忧,但很少有人进行测试,结果也很少公开分享。 +为了回答这个问题,我们使用了一个流行的基准测试工具,在以下典型场景中评估MySQL的吞吐量和延迟,并尝试给出报告和我们的见解: +- MySQL和基准测试工具部署在同一个K8s集群中,模拟应用和数据库运行在同一个K8s集群的情况。 +- MySQL和基准测试工具部署在两个K8s集群中,模拟应用和数据库运行在两个K8s集群的情况。 + +此外,我们使用相同的测试方法获取了Amazon RDS MySQL的性能数据。通过比较Amazon RDS MySQL的性能数据,用户可以更全面地了解K8s上的MySQL性能是否能满足其生产需求。 + +## 方法论 + +### 应用负载 +作为LAMP技术栈的一部分,MySQL常被用于构建网站和Web应用。用户通常不会在MySQL中运行长事务或复杂查询,因此我们采用包含相对简单事务和查询的OLTP工作负载来测试性能。以下是几种具有代表性的工作负载: +- 读密集型负载:80%操作为读取,20%为写入 +- 读写均衡负载:50%操作为读取,50%为写入 +- 写密集型负载:20%操作为读取,80%为写入 + +我们选择[sysbench](https://github.com/akopytov/sysbench)作为基准测试工具。这是一个广泛使用的工具,支持多线程脚本化运行。Sysbench能够模拟上述应用负载,并以每秒查询数(QPS)输出吞吐量指标,以99百分位数(ms)输出延迟指标。 + +### 开源MySQL Operator + +对于K8s而言,MySQL是一个复杂的有状态应用,需要通过operator进行部署和配置。我们选取了以下几个开源MySQL operator进行测试,其信息如下: +- [MySQL Operator for Kubernetes](https://github.com/mysql/mysql-operator) by the Oracle team +该Operator用于管理Kubernetes集群内的MySQL InnoDB Cluster设置,提供包含自动化升级和备份在内的完整生命周期管理。 + +- [XtraDB Cluster Operator](https://github.com/percona/percona-xtradb-cluster-operator) by the Percona team +基于Percona XtraDB Cluster的最佳实践,该Operator提供了在本地或云端的Kubernetes环境中快速、一致地部署和扩展Percona XtraDB Cluster实例所需的全部功能。 + +- [KubeBlocks](https://github.com/apecloud/kubeblocks) by the ApeCloud团队 +KubeBlocks是一个开源Kubernetes operator,用于管理公有云或本地环境中的关系型、NoSQL、向量和流式数据库。它专为生产环境设计,在大多数场景下提供可靠、高性能、可观测且经济高效的数据基础设施。 + +### 基础设施 - 计算、存储与网络 + +我们采用全球最大的云计算供应商AWS来提供基准测试所需的计算、存储和网络资源。通过测试Amazon RDS多可用区双备用实例建立了基准性能。 + +我们测试了三种主流实例类型:4 vCPU/16GB内存、8 vCPU/32GB内存以及16 vCPU/64GB内存。选择这些实例类型的原因是:更小规格的实例通常用于开发环境且不存在性能瓶颈,而更大规格的实例使用范围有限、代表性不足。我们选择了最新的Intel CPU平台实例类型,虽然AMD和ARM平台在特定场景下可能表现更优,但测试结论与本测试保持一致。 + +我们指定gp3作为主要块存储选项。该选择可确保3000 IOPS的稳定基线性能,同时支持根据卷大小弹性扩展IOPS和吞吐量。在gp3不适用的情况下,我们会考虑使用io2作为替代方案。虽然gp3和io2都能提供卓越性能,但需要注意成本差异。我们为不同vCPU数量配置了多种卷大小,并调整IOPS以在CPU密集型与IO密集型场景下均获得最佳性能。 + +为满足生产环境的灾难恢复需求,我们在同一区域但不同可用区的EC2实例上部署了MySQL。这使得我们能够观察跨可用区容器网络对MySQL性能的影响。此外,我们在MySQL上层配置了四层负载均衡器,用于处理来自其他K8s集群上部署的sysbench的请求。 + +## 洞察分析 + +### 参数默认值对吞吐量影响最大 + +无论是全托管MySQL服务还是自建MySQL,都提供了参数的自动配置并允许用户手动调整。由于大多数用户对MySQL参数并不熟悉,这些参数的默认值就显得尤为重要。本报告测试中,对MySQL性能影响较大的参数包括:innodb_flush_log_at_trx_commit、innodb_redo_log_capacity和sync_binlog。它们的默认值如下: + +| | AWS RDS MySQL集群 | KubeBlocks operator | Percona operator | MySQL operator | +|--------------------------------|-------------------|---------------------|------------------|----------------| +| innodb_flush_log_at_trx_commit | 1 | 1 | 0 | 1 | +| innodb_redo_log_capacity | 100M | 2G | 100M | 100M | +| sync_binlog | 1 | 1 | 1 | 1 | + +为了实现MySQL的AZ级故障恢复能力,用户必须将innodb_flush_log_at_trx_commit和sync_binlog都设置为1,但这可能会带来一定的性能损失。AWS RDS MySQL集群、KubeBlocks operator和MySQL operator的参数默认值遵循这一最佳实践,而Percona operator由于其数据复制的实现方式采用了不同的默认值。 +为了获得更好的性能,用户应将innodb_redo_log_capacity设置为2G。这可能需要牺牲一些存储空间,但可以获得更好的吞吐量。AWS RDS MySQL集群、Percona operator和MySQL operator在这方面还有提升空间。通过手动修改AWS RDS MySQL集群的innodb_redo_log_capacity设置,可以大幅提升吞吐量。 + +### IOPS对响应时间影响最大 + +无论是全托管MySQL服务还是自建MySQL,响应时间(RT)都与并发线程数呈正相关。增加并发线程数会导致RT上升。当负载较重时,增加并发线程数会带来额外负担,降低整体吞吐量,甚至导致数据库崩溃。通常情况下,大多数MySQL连接处于空闲状态,活跃连接数并不高。在10个并发线程的测试中,MySQL的RT数据如下: + +
+ mysql rt +
+ +本文选择的三个工作负载都涉及对EBS的大量读写操作。通过将innodb_redo_log_capacity设置为2G,KubeBlocks在IO密集型场景下具有更低的RT。将IOPS调整到12000后,AWS RDS MySQL集群的RT有明显改善。为了获得更好的性能,用户应购买足够的IOPS。但需注意AWS的IOPS价格较高,并非越多越好。 + +### 在K8s上运行MySQL不一定会导致性能下降 + +本报告还测试了在IOPS充足情况下的其他场景,以验证缓冲池命中率和跨K8s集群网络可能产生的影响。具体测试数据如下: + +
+ max throughput +
+ +当数据未完全加载到内存时,AWS RDS MySQL集群的吞吐量略低于由KubeBlocks operator管理的MySQL。随后,将sysbench部署到另一个K8s集群以访问AWS RDS MySQL集群和由KubeBlocks operator管理的MySQL。sysbench模拟的三种工作负载均实现了一定性能提升,但AWS RDS MySQL集群的吞吐量仍略低于由KubeBlocks operator管理的MySQL。 + +我们可以得出以下结论: +1. 参数配置不当会导致MySQL性能下降。 +2. IOPS不足会导致MySQL性能下降。 +3. 缓冲池命中率低会导致MySQL性能下降。 +4. 如果以上三种情况均未发生,在K8s上运行MySQL不会对性能产生负面影响。 + +以上结论已在AWS上得到验证,很可能在其他公有云上仍然成立。但请注意,本报告未在裸金属服务器上进行验证,因此结果可能存在偏差。 + +## 详细基准测试 + +### MySQL 和 sysbench 部署在同一个 K8s 集群中 + +
+ 在同一个K8s集群中部署MySQL和sysbench +
+ +#### 读密集型工作负载 + +
+ 图1 + 图3 + 图6 +
+
+ 图2 + 图5 + 图7 +
+ +#### 读写均衡工作负载 + +
+ 图1 + 图3 + 图5 +
+
+ 图2 + 图4 + 图6 +
+ +#### 写密集型工作负载 + +
+ 图1 + 图3 + 图5 +
+
+ 图2 + 图4 + 图6 +
+ +### MySQL 和 sysbench 部署在两个 K8s 集群中 + +
+ MySQL和sysbench部署在两个K8s集群中 +
+ +#### 读密集型工作负载 + +
+ 图1 + 图3 + 图5 +
+
+ 图2 + Image 1 + Image 3 + Image 5 +
+
+ Image 2 + Image 4 + Image 6 +
+ +#### 读写均衡型工作负载 + +
+ Image 1 + Image 3 + Image 5 +
+
+ Image 2 + Image 4 + Image 6 +
\ No newline at end of file diff --git a/blogs/zh/how-to-fix-pod-stuck-in-terminating-status.mdx b/blogs/zh/how-to-fix-pod-stuck-in-terminating-status.mdx new file mode 100644 index 00000000..94cec155 --- /dev/null +++ b/blogs/zh/how-to-fix-pod-stuck-in-terminating-status.mdx @@ -0,0 +1,272 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/28781141?v=4 + name: free6om + url: https://github.com/free6om +date: 2024-08-19 +description: Pod 处于 Terminating 状态无法终止是 Kubernetes 用户常遇到的难题。本文将通过场景复现问题成因,深入分析其根本原因,并提供解决方案。 +image: /img/blogs/thumbnails/blog-pod-terminating.png +slug: how-to-fix-pods-stuck-in-terminating-status +tags: +- pod +- pods stuck in terminating +- K8s +title: 如何修复 Kubernetes 中 Pod 卡在 Terminating 状态的问题? +--- +# 如何修复 Kubernetes 中 Pod 卡在 Terminating 状态的问题? + +遇到 Pod 卡在 "Terminating" 状态是每个 Kubernetes 用户都会经历的偶尔但几乎不可避免的问题之一。 + +最近,在协助一位社区用户测试从 KubeBlocks v0.8 升级到 v0.9 时,我们遇到了一个集群无法删除的情况。经过一整天的反复排查,我们找出了几个奇怪问题的原因,但有一个问题始终存在 —— 一个 Pod 卡在 Terminating 状态。巧合的是,我当时也在与 KubeBlocks SIG 成员讨论 Pod 生命周期管理,因此决定借此机会深入探究问题的根源。 + +## 问题复现 + +经过回忆和多次尝试,复现该问题的步骤如下: + +1. 在 KubeBlocks v0.8 中运行 `kbcli playground init`。 +2. 运行 `kbcli cluster create xxxx`。 +3. 将 kbcli 升级至 v0.9 版本。 +4. 在 KubeBlocks v0.9 中运行 `kbcli kubeblocks upgrade`,此时 `helm upgrade job` 超时并失败。 +5. 运行 `helm uninstall kubeblocks`。 +6. 运行 `Delete cluster xxxx`。 +7. 运行 `helm install kubeblocks`。 +8. 此时 `Cluster xxxx` 仍保持 Deleting 状态。 + +## 问题排查 + +### 定位问题根源 + +根据过往经验,当集群删除失败时,通常是由于某些附属资源无法被移除。我们的社区用户也观察到,Pod 和 PVC 都卡在了 Terminating 状态。由于 PVC 在关联的 Pod 被删除前无法移除(受 finalizer 保护机制影响),问题焦点自然转向了 Pod 为何无法删除。 + +首先检查 Pod 对象的 YAML 文件。根据 Kubernetes 官方文档中关于 Pod 终止流程的说明,我们筛选出与终止相关的字段: +相关字段仍然较多,但我们需要系统性地逐项分析。幸运的是,分析过程很快有了结论: + +1. `deletionTimestamp` 已设置 +2. `finalizer` 为空 +3. 已超过 `terminationGracePeriodSeconds(设置为30秒)` +4. 所有容器均已 `terminated` +5. 两个容器以非零代码退出:137 和 143 +根据 K8s 文档,此时 kubelet 应该将 pod phase 更新为 Failed,但当前仍显示为 `Running` + +看起来 kubelet 的实际行为与文档描述不符,我们需要进一步查看 kubelet 日志来了解具体情况。 + +### Kubelet 日志分析 + +再次面对冗长的日志文件,但幸运的是错误信息出现在末尾附近: +![](/img/blogs/blog-pod-terminating-1.png) + +日志显示 kubelet 在尝试卸载数据卷时,由于找不到对应的 Hostpath CSI Driver 而报错。经确认,Hostpath CSI Driver 确实在之前的某个步骤中被移除。我们立即重新安装了该驱动。 + +```bash +kbcli addon enable csi-hostpath-driver +``` + +很遗憾,虽然卸载过程不再报错,但Pod和PVC仍卡在Terminating状态: + +![](/img/blogs/blog-pod-terminating-2.jpeg) + +![](/img/blogs/blog-pod-terminating-3.jpeg) + +![](/img/blogs/blog-pod-terminating-4.jpeg) + +kubelet日志未能提供更多线索。 + +![](/img/blogs/blog-pod-terminating-5.png) + +### Kubelet详细日志 + +正如任何维护良好的开源项目一样,健全的日志机制至关重要。 + +查阅文档后,我们确认kubelet的启动命令和配置文件都支持设置日志级别。该值默认为0,最高可设为7。 + +于是我们尝试将日志级别提升至6以获取更详细的日志。 + +遗憾的是,尽管多次尝试,我们无法在K3d中修改日志级别,甚至连GPT-4o也无计可施。 + +随后我们额外花费时间在minikube中复现该问题,并成功将日志级别设置为6: + +```bash +minikube start --extra-config=kubelet.v=6 +``` + +然后我们获取到了日志: + +```bash +minikube logs -f +``` + +### 简化复现流程 + +最初的复现步骤相当耗时。根据先前的分析,该问题与KubeBlocks并无直接关联,因此我们可以按以下方式简化流程: + +1. 创建一个Pod和PVC。 +2. 禁用Hostpath CSI驱动。 +3. 删除Pod和PVC以触发问题。 + +随后我们创建了以下Pod和PVC对象: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-xxxxxx +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-xxxxxx +spec: + containers: + - name: nginx + image: nginx:latest + volumeMounts: + - name: volume-xxxxxx + mountPath: /data + volumes: + - name: volume-xxxxxx + persistentVolumeClaim: + claimName: data-xxxxxx +``` + +我们已将 CSI 驱动切换为使用 minikube 内置的 Hostpath: + + + +```bash +minikube addon enable csi-hostpath-driver +``` + +通过简化的步骤,重现问题并分析冗长的 kubelet 日志变得更容易且更高效。 + +### 发现更多信息 + +由于日志量庞大,需要极大的耐心进行梳理。经过一段时间后,浮现出两个疑点。 + +#### 疑点 1:每 100 毫秒就会出现一条消息报告 "Pod 已终止,但部分存储卷尚未清理" + +```bash +Jul 15 03:49:33 minikube kubelet[1432]: I0715 03:49:33.850919 1432 kubelet.go:2168] "Pod is terminated, but some volumes have not been cleaned up" pod="default/hello-xxxxxx" podUID="9a42f711-028a-4ca2-802a-0e4db734592d" +``` + +#### 疑点 2:我们注意到频繁出现指数级增长的"failed to remove dir ...: device or resource busy"错误消息 + +```bash +Jul 15 03:49:34 minikube kubelet[1432]: E0715 03:49:34.053463 1432 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/hostpath.csi.k8s.io^e9216f9a-425b-11ef-9abf-4e607aa5fc75 podName:9a42f711-028a-4ca2-802a-0e4db734592d nodeName:}" failed. No retries permitted until 2024-07-15 03:51:36.0534518 +0000 UTC m=+881.487053596 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "volume-xxxxxx" (UniqueName: "kubernetes.io/csi/hostpath.csi.k8s.io^e9216f9a-425b-11ef-9abf-4e607aa5fc75") pod "9a42f711-028a-4ca2-802a-0e4db734592d" (UID: "9a42f711-028a-4ca2-802a-0e4db734592d") : kubernetes.io/csi: Unmounter.TearDownAt failed to clean mount dir [/var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount]: kubernetes.io/csi: failed to remove dir [/var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount]: remove /var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount: device or resource busy +``` + +怀疑1表明kubelet会定期检查Pod关联的资源是否已释放,但发现部分存储卷未被清理。 + +怀疑2指出kubelet尝试删除PVC对应的目录时,该目录仍在使用中。 + +两者都指向存储卷资源未被释放的问题。 + +于是我们登录minikube检查目录未被释放的原因: + +```bash +root@minikube:/var/lib/kubelet/pods# lsof +D /var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount +root@minikube:/var/lib/kubelet/pods# mount | grep /var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount +/dev/vda1 on /var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount type ext4 (rw,relatime) +``` + +此外,`lsof`命令未返回任何结果,表明没有进程在使用该目录。而`mount`命令返回了结果,说明该目录仍处于挂载状态。 + +这进一步缩小了问题范围:与PVC对应的目录未被卸载或卸载失败。 + +### 直接得出结论 + +至此,我们可以得出结论:在删除Pod和PVC的过程中,CSI Driver被重启,导致与PVC对应的目录未能正确卸载,从而阻塞了Pod的终止流程。 + +解决方案是手动卸载该目录。 + +### 还有更多疑问? + +但等等,CSI Driver本质上不是存储资源的控制平面吗?控制平面的重启不应阻碍存储资源的释放——Kubernetes不可能如此脆弱。 + +这表明存在更深层次的问题:为什么CSI Driver的重启会导致PVC目录无法卸载? + +### 深入挖掘 + +这无疑是kubelet或CSI Driver中的一个bug。下一步是定位这个bug。由于日志未能提供更多线索,我们需要深入代码层面。 + +| 组件名称 | 版本 | +| :------------- | :------ | +| K8s | 1.29.3 | +| Hostpath CSI Driver | 1.11.0 | + +基于先前的怀疑,我们利用错误信息追踪到对应的代码位置。 + +代码位置1: + +![](/img/blogs/blog-pod-terminating-6.png) + +这里可以清楚地看到,在Pod终止过程中,kubelet每100毫秒检查一次存储卷是否已被清理。直到该方法返回`true`,这个循环才会结束,意味着kubelet会一直卡在这个点。因此,Pod删除流程无法继续,Pod保持Terminating状态。 + +这一行为与我们观察到的现象以及"怀疑1"中的日志完全吻合。 + +代码位置2: + +![](/img/blogs/blog-pod-terminating-7.png) + +这里我们看到kubelet确实尝试删除与PVC对应的目录。继续向上查看代码,我们发现如果删除失败,kubelet会采用指数退避的重试机制。 + +但一个新的发现是:`removeMountDir`只有在`csi.NodeUnpublishVolume`成功返回(即卸载操作成功)时才会执行。 + +因此,问题根源被锁定在Hostpath CSI Driver上。 + +### 真凶浮现 + +接下来我们检查Hostpath日志,其中报告"Volume...未在...发布"。 + +```bash +I0715 03:49:34.052729 1 server.go:101] GRPC call: /csi.v1.Node/NodeUnpublishVolume +I0715 03:49:34.052749 1 server.go:105] GRPC request: {"target_path":"/var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount","volume_id":"e9216f9a-425b-11ef-9abf-4e607aa5fc75"} +I0715 03:49:34.053026 1 nodeserver.go:228] Volume "e9216f9a-425b-11ef-9abf-4e607aa5fc75" is not published at "/var/lib/kubelet/pods/9a42f711-028a-4ca2-802a-0e4db734592d/volumes/kubernetes.io~csi/pvc-ea3178bf-fc12-432c-a01b-8389b04c508e/mount", nothing to do. +``` + +原来目录并未发布——难道是重启后元数据丢失了?会不会元数据只存储在内存中?这看起来不太可靠。 + +不过我们必须考虑到CSI驱动是Hostpath,根据官方文档说明,它仅用于测试用途。真的会这么不可靠吗? + +为了确认这一点,我们进行了进一步的代码调查。 + +![](/img/blogs/blog-pod-terminating-8.png) +![](/img/blogs/blog-pod-terminating-9.png) + +看起来状态元数据确实存储在文件中。打开文件后,我们发现条目显示为:`"Published": null`。 + +(严格保持原文格式与换行,技术术语"CSI Driver"、"Hostpath"保留英文,中文补充说明其测试用途特性) + +```bash +minikube ssh +cat /var/lib/csi-hostpath-data/state.json + +{"Volumes":[{"VolName":"pvc-be2792ca-6937-4296-a234-381bd7c94f1d","VolID":"f595ce89-4283-11ef-ba81-6a2e54b029dc","VolSize":2147483648,"VolPath":"/csi-data-dir/f595ce89-4283-11ef-ba81-6a2e54b029dc","VolAccessType":0,"ParentVolID":"","ParentSnapID":"","Ephemeral":false,"NodeID":"","Kind":"","ReadOnlyAttach":false,"Attached":false,"Staged":null,"Published":null}],"Snapshots":null} +``` + +### 解决问题 + +谜底揭晓:Hostpath CSI Driver 在持久化元数据时存在一个缺陷。 +是时候向官方仓库提交 issue 和 PR 了。但在提交前,我检查了现有的 issue 列表,发现[该问题已被报告过](https://github.com/kubernetes-csi/csi-driver-host-path/issues/457)。 + +好消息!已经有人报告了这个问题,并且它已被修复。我们只需要按照[这个 PR](https://github.com/apecloud/helm-charts/pull/21) 中的说明升级驱动即可。 + +## 总结 + +实际的故障排查过程远比描述的更加复杂且耗费了大量精力。为了让解释更易于理解,叙述中省略了一些分析步骤和分支路径。 + +例如,设计一个高效的复现流程花费了相当长的时间,整理kubelet日志也是如此。我们还花费了大量时间阅读kubelet和存储卷管理相关的代码。 + +一些关键收获: + +1. 在Pod终止期间,Kubernetes会无限期等待所有存储卷资源被释放。 +2. kubelet通过为每个Pod分配一个goroutine来处理Pod,该goroutine负责管理其整个生命周期,包括创建、运行、终止和已终止状态。 +3. kubelet中处理存储卷相关任务的模块称为volume manager(存储卷管理器),它会为每个存储卷分配一个goroutine来处理挂载、卸载、分离等操作。 +4. Hostpath CSI Driver绝对不适合生产环境。 + +(严格保持原文格式和换行,技术术语采用标准译法) \ No newline at end of file diff --git a/blogs/zh/how-to-manage-database-clusters-without-a-dedicated-operator.mdx b/blogs/zh/how-to-manage-database-clusters-without-a-dedicated-operator.mdx new file mode 100644 index 00000000..b56a9ad1 --- /dev/null +++ b/blogs/zh/how-to-manage-database-clusters-without-a-dedicated-operator.mdx @@ -0,0 +1,195 @@ +--- +authors: + name: Shanshan Ying & Shun Ding +date: 2024-09-03 +description: 这是由ApeCloud与中国移动云在KubeCon China 2024上联合呈现的演讲。本次演讲将介绍KubeBlocks的诞生背景与实现原理,并分享中国移动云如何在不依赖专用Operator的情况下运行其云数据库服务。 +image: /img/blogs/thumbnails/blog-kubecon-china-mobile-cloud.png +slug: how-to-manage-database-clusters-without-a-dedicated-operator +tags: +- database management +- operator +- K8s +title: 如何在没有专用Operator的情况下管理数据库集群? +--- +# 如何在没有专用Operator的情况下管理数据库集群? + +随着云计算和数据库技术的发展,寻找一种高效且经济的方式来管理数据库集群变得至关重要。我希望本次演讲能为您提供有价值的见解和实用解决方案。 + +在深入细节之前,请允许我介绍一下自己和共同演讲者。我是来自KubeBlocks的Shanshan,ApeCloud是KubeBlocks背后的初创公司。在加入ApeCloud之前,我曾在阿里云数据库团队工作多年,担任SQL优化和SQL执行的数据库开发人员,更专注于数据库而非云原生技术。我的共同演讲者Shun Ding来自中国移动云的系统架构师,同时也是KubeBlocks的贡献者。他在今年早些时候提交了第一个commit,几个月后便将他们的内部数据库作为插件集成到了KubeBlocks中。 + +今天,我将介绍KubeBlocks及其设计理念,而Shun将分享他们选择KubeBlocks的原因、KubeBlocks如何帮助他们减少工作量以及他们与KubeBlocks的下一步计划。本次演讲中我们不会深入探讨具体的API或特定的数据库技术。 + +## KubeBlocks 的设计初衷与实现方式 + +### 如何在 K8s 上管理多种数据库? + +我们启动 KubeBlocks 项目的动机非常简单:如何更高效地管理数据库。 +我们的团队由兼具数据库和 K8s 背景的成员组成,包括数据库开发者、数据库管理员和 SRE。我们每天处理数百个云数据库问题,从数据库崩溃、慢查询、高可用性到数据迁移、资源调度等。 + +我们团队的有趣之处在于,我们处理不同类型的数据库,例如广泛使用的开源数据库 MySQL、PostgreSQL、Redis、MongoDB。当我们交流如何管理特定数据库时,发现这些数据库实际上有许多共同点。如果我们能改进一个数据库的管理方式,其他数据库是否也能受益?此外,数据库正处于快速发展的时代,新的数据产品不断涌现。这些新数据库在某种程度上仍然继承了那些共同特性。 + +因此我们面临的挑战是:能否设计一个统一平台,不仅能管理一种,还能管理一批广泛使用的数据库,甚至进一步管理那些新兴数据库?更重要的是,如果有这样的平台,我们希望它既能部署在云端也能部署在本地基础设施上。 + +于是,Kubernetes 成为自然选择,因为它已成为容器编排的事实标准,并且可以将平台带到任何有 Kubernetes 的地方,如公有云、私有云、边缘云等。因此挑战在于如何在 Kubernetes 上管理多种数据库。 + +"在 Kubernetes 上"的答案很直接,即开发一个 Operator。但"多种数据库"呢? + +首先让我们看看"管理"指的是什么。当我们谈论"管理数据库"时,通常包括: +部署、扩缩容(水平或垂直)、备份与恢复、配置(特别是参数和性能调优)、次版本升级、存储卷扩容和监控。 + +要开发一个涵盖所有这些功能的专用 Operator,需要一个由数据库专家和云原生专家组成的团队,数月的投入,最重要的是要有大量用户基础。用户越多,Operator 就越好。但这可能相当具有挑战性,并非每个数据库团队都能承担这样的工作。而且 Operator 的成熟度可能参差不齐。 + +与蓬勃发展的 Kubernetes 相比,数据库领域似乎相对小众。我们能否找到一种方法,将数据库专家的知识快速转化为生产力,即转化为代码?这样越来越多的数据库就能轻松运行在 Kubernetes 上。 + +这里我们以备份和恢复为例。备份和恢复对灾难恢复至关重要。让我们回顾经典的备份和恢复流程。 + +1. 备份方法:该流程通常有一种或多种备份方法,例如定期全量备份和持续归档日志。要进行定期全量备份,可以采用一些广泛使用的数据库专用备份工具,或者简单地拍摄存储卷快照。 +2. 备份调度器:然后有一个备份调度器定期触发备份,比如每小时、每天、每周或在用户指定的时间。 +3. 备份仓库:之后,必须配置一个备份仓库,以存储每个备份及其元数据一段时间。备份仓库可以是本地 PV,也可以是对象存储,如 S3、OSS 或测试环境中的 MinIO。 +4. 恢复:当从备份恢复数据库集群时,必须设置数据恢复流程,并决定是在 Pod 运行之前还是之后恢复数据。 + +![回顾备份与恢复](/img/blogs/dedicated-operator-1.png) + +在整个流程中,我们发现数据库专家的工作非常专注且有限,主要包括: + +- 如何使用数据库专用备份工具; +- 如何设置归档日志; +- 以及如何配置恢复命令(如有)。 + +其他列出的任务可以交给云原生专家处理。云原生专家只需开发支持该流程的框架,该框架可被不同数据库专家复用,或用于同一数据库的不同版本。 + +### KubeBlocks 架构 + +基于这些理念,KubeBlocks 应运而生。KubeBlocks 是一个开源且云中立的项目,去年六月开源,目前已有 2k star。KubeBlocks 是一个数据库类型无关的 Operator,设计上具备可扩展性、灵活性和可插拔性。 + +其可扩展性源于统一的 API。KubeBlocks 提供了集成数据库引擎的插件机制,这也是中国移动云能在两个月内将其自研和开源数据库集成到 KubeBlocks 的关键。目前我们已有约 30 个插件,大部分由社区根据自身需求贡献。 + +它具有灵活性,意味着支持非常灵活的集群拓扑。用户可以自定义集群拓扑,每个组件都来自插件市场。 + +通过模块化设计实现可插拔性。备份恢复、监控、配置等模块均可插拔,可根据具体需求选择使用哪些模块。 + +若对 KubeBlocks 感兴趣,可访问我们的 GitHub 仓库,获取更多技术细节、文档以及完整的数据库插件列表。 + +所有这些特性都得益于我们对数据库的精准简洁建模,这是从数据库视角的关键所在。我们通过以下三个步骤完成建模。 + +**第一步:构建分层模型** + +我们首先将每个独立数据库系统或服务建模为组件,例如提供 MySQL 服务的 MySQL 组件,以及负责 MySQL 高可用和复制管理的编排器组件。这种建模非常直观。 + +而数据库集群本质上是协同处理特定数据库任务的组件集合。例如,由 MySQL 组件和编排器组件组成的集群可构成高可用的 MySQL 复制集群。此外,您还可以使用任意偏好的代理组件(如 HAProxy 和 ProxySQL)自定义该集群。 + +这两个层级——独立的数据库组件和整体集群——提供了清晰易懂的结构。但数据库的特殊性何在?它不仅是有状态的,更重要的是:它具有角色。 + +我们专门为数据库设计了一种工作负载类型 InstanceSet,提供基于角色的管理能力。 + +KubeBlocks InstanceSet 是改进版的 StatefulSet,它按特定角色顺序管理数据库以提升可用性,而非采用 StatefulSet 的升序或降序数字编号。InstanceSet 支持资源与配置各异的异构副本,并能将单个数据库实例主动下线进行维护(例如当副本异常或节点需要升级时)。 + +后天将有 KubeBlocks 与快手的另一场分享,我的同事薛强将详细介绍 instanceset。 + +![第一步:构建分层模型](/img/blogs/dedicated-operator-2.png) + +**第二步:分离集群模板 API 与集群 API** + +我们注意到对于仅想创建集群的数据库用户而言,这仍然过于复杂。 +除了四层建模外,我们进一步将每层分为两部分:一部分面向数据库专家,另一部分面向数据库用户。 + +数据库专家负责处理集群拓扑定义、描述引擎特定行为的组件定义,以及描述引擎镜像并确保兼容性的组件版本。 +数据库用户只需了解所需的集群和组件,对于每个组件,他们只需关注资源配置,例如 CPU、内存、存储卷大小和副本数量。 + +![Step 2. Separate the cluster template API and the cluster API](/img/blogs/dedicated-operator-3.png) + +步骤 3. 引入数据库接口。 + +随着越来越多的数据库集成到 KubeBlocks 中,我们发现初始化脚本远不足以描述引擎的特定行为。 + +为此,我们提出了一套数据库接口来管理副本或组件生命周期,在 KubeBlocks 中称为生命周期操作(Lifecycle Actions)。没有这些操作,就无法良好地管理数据库。每个操作的具体细节因数据库而异。我将这些操作分为三类: + +- 用于角色管理的角色探测(role probe)和切换(switchover); +- 用于水平扩缩容的成员加入(memberJoin)、成员退出(memberLeave)、数据转储(dataDump)和数据加载(dataLoad); +- 用于组件级管理的后置部署(post-provision)和前置终止(pre-termination)操作。 + +目前最广泛使用的操作是角色探测(roleProbe)。它会定期触发,检查每个副本的角色,并通过事件或 API 调用将结果报告给 Pod。这样数据库服务就能按预期路由到正确的副本。 + +随着更多插件的加入,我们将持续完善这些操作,目前仍有未覆盖的操作,例如重新配置(reconfiguring)和账户供应(account provisioning)。 + +![Lifecycle actions](/img/blogs/dedicated-operator-4.png) + +总结来说,KubeBlocks 是一个与数据库类型无关的 Operator。它提供了统一的 API,数据库专家可以通过我们的插件机制集成特定引擎,专注于数据库专业知识。而数据库用户或管理员可以使用相同的 API 与不同数据库交互。这显著降低了数据库管理的复杂性和学习曲线。 + +![Summary](/img/blogs/dedicated-operator-5.png) + +KubeBlocks 1.0 版本将于两个月内正式发布,欢迎加入我们的社区。 + +## 中国移动云如何运用KubeBlocks + +大家下午好,我是来自中国移动云的Shun,担任高级系统架构师。今天很高兴能与各位分享我们如何在不编写专用Operator的情况下,通过KubeBlocks管理云数据库。 + +### 中国移动云DBaaS系统面临的挑战 + +在进入正题前,请允许我先介绍中国移动云DBaaS系统的概况。这个系统管理着我们所有的云数据库,产品线涵盖事务型数据库、分析搜索型数据库、NoSQL数据库等。我们不仅为热门开源数据库和第三方数据库提供服务,还在自主研发数据库引擎并基于其构建服务。目前我们为超过3.5万家企业客户提供服务,覆盖政务、通信、金融、医疗、教育等9大行业。 + +在我们的15个一级节点和31个二级节点上,运行着超过13万个数据库集群实例。除数据库供给外,我们还构建了完善的管理生态,包括数据迁移工具、数据库管理控制台以及具备AIOps能力的工具集。中国移动云DBaaS平台采用云原生架构,这意味着大多数数据库实例都运行在Kubernetes集群中。 + +管理如此大规模的数据库实例充满挑战。虽然现有DBaaS系统能有效管理各类数据库,但在系统维护方面我们正面临难题。当前DBaaS系统主要分为API层和Operator层,其中Operator层是核心组件。 + +我们遇到的第一个挑战是:针对不同数据库引擎需要开发不同的Operator。这些Operator差异显著,导致开发人员难以在引擎A和引擎B的Operator开发间灵活切换,造成人力资源分配僵化。 + +其次,这对开发者的能力要求极高,他们需要同时精通数据库引擎原理和Operator框架。尽管有现成框架可用,但开发门槛仍然很高,难以快速扩充高效团队。更重要的是,我们正在自研数据库引擎,希望能快速为其构建DBaaS系统。 + +但由于前述挑战,我们无法快速为新引擎开发DBaaS系统。要实现这个目标,必须组建同时掌握数据库引擎和Operator框架的高水平团队。接着开发者需要从零构建新Operator,因为是自研引擎,没有现成Operator可用。这种从零开发模式会产生大量重复工作,即便某些逻辑与其他数据库Operator相似也无法复用。 + +于是我们开始寻找解决方案:如何统一Operator接口?如何降低DBaaS系统开发门槛?如何实现新数据库引擎的快速集成? + +![挑战](/img/blogs/dedicated-operator-6.png) + +### 为什么选择KubeBlocks? + +正是在这个阶段,我们发现了能有效解决这些痛点的KubeBlocks项目。KubeBlocks是专为数据库工作负载设计的通用Operator框架,开发者可以通过编写不同数据库引擎的插件将其集成到KubeBlocks体系中。这个项目在以下几个方面尤为突出: + +首先,KubeBlocks 是一个通用的 Operator 框架,这意味着单个 Operator 可以支持所有类型的数据库引擎。开发者只需维护一个 Operator 和一套 CRD,从而更容易在团队内部共享 Operator 层面的知识。这也使得开发者可以灵活分配到不同的引擎团队。 + +此外,该框架采用低代码开发模式,通过创建插件(addon)来集成不同的数据库引擎,而非从零开发专用的 Operator。 + +这些插件本质上是包含 KubeBlocks 框架 CR 对象的 Helm Chart。开发插件时,我们只需编写所需 CR 对象的 YAML 文件和少量功能脚本。后续我们会深入探讨这些细节。插件中的 CR 对象采用声明式定义,开发者可以像描述其他 Kubernetes 对象一样描述运行中数据库集群的期望状态,而由 KubeBlocks 框架负责协调(reconciliation)。这种低代码开发模式大幅降低了为新数据库引擎构建 DBaaS 系统的门槛——开发者只需了解引擎的工作原理即可快速上手。 + +同时,更少的代码意味着更少的潜在错误,以及更快的新数据库引擎集成速度,这完全符合我们的需求。 + +更重要的是,KubeBlocks 是专为数据库工作负载设计的通用框架。它完整覆盖了 Kubernetes 上数据库的所有基础管理操作,例如生命周期管理、备份恢复、配置管理、高可用性等。 + +此外,KubeBlocks 还包含可扩展机制,允许特定数据库引擎的管理操作无缝集成到整体框架中。 + +![Why KubeBlocks](/img/blogs/dedicated-operator-7.png) + +### 基于 KubeBlocks 构建 H-DB 插件 + +经过深入研究后,我们决定尝试 KubeBlocks。当时我们需要将一个内部研发的数据库引擎(代号 H-DB)集成到 DBaaS 系统中,这成为测试 KubeBlocks 集成能力的理想场景。 + +首先简要介绍我们的 H-DB:这是一个完全自研的云原生分布式数据库引擎,采用计算存储分离架构。通常为如此复杂的数据库系统编写 DB Operator 具有挑战性,更不用说快速实现。但得益于 KubeBlocks 项目,我们可以通过低代码方式实现。以下是构建完整 KubeBlocks 插件的过程: + +以下是严格遵循您要求的翻译结果,保持所有格式、间距和换行不变: + +- 第一步是设计集群拓扑结构,并搭建一个插件脚手架。通常,初始插件仅包含一个粗略的ClusterDefinition脚手架和一个非常基础的ClusterVersion,用于指定所有组件容器的镜像。回到我们的案例,H-DB集群包含两个组件:计算节点和数据节点。因此我们定义了一个包含这两个组件的集群定义对象,在ClusterVersion中为每个组件配置了镜像,并暂时在ClusterDefinitions中设置了一个虚拟启动命令。然后我们编写了一个简单的Cluster CR对象进行测试,以确保所有插件可以无问题安装且Pod能够成功启动。 +- 下一步是完善ClusterDefinition,在ConfigMap中设置正确的配置参数,同时编写引导集群的脚本。我们调整配置和脚本使集群正常运行。这是关键步骤,因为它意味着第一个可工作的插件已完成。 +- 接下来的部分是支持备份和恢复能力。我们需要编写关于备份和恢复的功能脚本,并将其集成到KubeBlocks的ActionSet CR对象中。我们可以创建备份OpsRequest和恢复OpsRequest来测试功能。 +- 然后,我们在插件上编写ConfigConstraint,以控制哪些参数可以修改、是否可以动态重新加载以及重新加载命令。这使得插件能够修改数据库引擎中的某些配置参数。 +- 之后,我们对该插件进行了进一步调整。下一步是启用高可用性和角色检测,向数据库集群添加可观测性边车以从实例收集指标和日志,最后添加更多集群版本以映射不同的内核版本。 + +现在,一个完整的KubeBlocks插件为我们的H-DB就完成了。通过使用KubeBlocks,我们仅用两个月时间、仅由一人就构建了首个H-DB的DBaaS系统。这甚至可以更快,因为构建插件的后续步骤可以并行化。这是我们在中国移动云中首个成功的KubeBlocks集成案例。 + +以下是开发KubeBlocks插件与开发专用Operator的总结对比。我们将KubeBlocks插件开发过程与为类似数据库引擎编写专用Operator的过程进行比较。 + +在开发资源方面,KubeBlocks插件仅需2人月,而具有Operator的类似产品需要约6人月。 + +![Comparison](/img/blogs/dedicated-operator-8.png) + +H-DB案例是一个很好的起点,展示了我们如何利用KubeBlocks解决当前DBaaS系统面临的问题。 + +### 关于特性合作 + +我们的下一步是通过KubeBlocks进一步集成更多引擎以进行评估,并升级到新版本的KubeBlocks,评估我们感兴趣的新特性。 + +在中国移动云中,我们的理想目标是构建统一的云原生DBaaS平台。在这个平台上,我们旨在实现统一的多云架构、API和Operator层的统一接口,支持不同架构的数据库集群,并且数据库实例可以按需部署在无服务器Kubernetes集群上。这将形成一个支持公有云、私有云、专有云、边缘云等不同基础设施的统一数据库编排和通用管理平台。 + +![Cooperation](/img/blogs/dedicated-operator-9.png) + +随着KubeBlocks的持续发展和完善,一旦达到足够的成熟度,我们将考虑基于KubeBlocks框架重构现有的DBaaS引擎。尽管可能涉及初始的重构工作,但从长远来看,我们预计可以节省约50%在不同DB引擎上的开发资源。 \ No newline at end of file diff --git a/blogs/zh/in-place-updates.mdx b/blogs/zh/in-place-updates.mdx new file mode 100644 index 00000000..4b0448ce --- /dev/null +++ b/blogs/zh/in-place-updates.mdx @@ -0,0 +1,69 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/28781141?v=4 + name: free6om + url: https://github.com/free6om +date: 2024-11-19 +description: 本博客介绍KubeBlocks如何在Kubernetes中实现原地更新。 +image: /img/blogs/thumbnails/blog-in-place-update.png +slug: in-place-update-in-k8s +tags: +- statefulset +- kubernetes +- instance +- instanceset +- in-place update +- high availability +title: 如何在Kubernetes中实现Pod原地更新? +--- +# 如何在K8s中实现Pod原地更新? + +## 为什么需要原地更新? + +在早期版本中,如"[下线指定实例](https://kubeblocks.io/blog/take-specified-instances-offline)"所述,KubeBlocks 使用 StatefulSet 作为最终的工作负载类型,继承了其局限性。 + +StatefulSet 的一个关键缺陷在于其更新机制:对 PodTemplate 的变更会触发所有 Pod 的更新,且采用 Recreate 策略。该过程会删除并重建 Pod,这对于数据库等需要高可用性的系统来说远非理想方案。 + +为解决此问题,从 v0.9 版本开始,KubeBlocks 用 InstanceSet 替代了 StatefulSet,并引入了原地更新功能。该功能允许 InstanceSet 在实例模板的某些字段更新时,通过原地 Pod 更新或扩展 PVC 存储卷来更新实例,从而显著降低对系统可用性的影响。 + +## 哪些字段支持原地更新? + +KubeBlocks 利用 Kubernetes 原生的 Pod API 实现原地更新功能。支持的字段包括: + +- `annotations` +- `labels` +- `spec.activeDeadlineSeconds` +- `spec.initContainers[*].image` +- `spec.containers[*].image` +- `spec.tolerations`(仅支持新增 Toleration) + +从 Kubernetes v1.27 开始,InPlacePodVerticalScaling 特性支持对 CPU 和内存资源进行原地更新。KubeBlocks 也集成了该特性以进一步支持以下能力: + +对于 Kubernetes v1.27 或更高版本,且启用了 InPlacePodVerticalScaling 时,支持的字段如下: + +- `spec.containers[*].resources.requests["cpu"]` +- `spec.containers[*].resources.requests["memory"]` +- `spec.containers[*].resources.limits["cpu"]` +- `spec.containers[*].resources.limits["memory"]` + +请注意,调整资源后,部分应用可能需要重启才能感知更新后的资源配置。您可以通过 ClusterDefinition 或 ComponentDefinition 中的容器 `restartPolicy` 来管理此行为。 + +对于 PVC(持久卷声明),KubeBlocks 利用 PVC API 支持存储卷扩容。若扩容失败,PVC 可回退至原始大小。相比之下,StatefulSet 在声明后不允许修改 `VolumeClaimTemplate`。尽管 Kubernetes 计划解决此限制,但预计要到 v1.32 版本之后才会实现。 + +## 哪些API字段会触发原地更新? + +KubeBlocks API(包括`Cluster`、`ClusterDefinition`、`ClusterVersion`、`ComponentDefinition`和`ComponentVersion`)中包含可能直接或间接触发原地更新的字段。这些字段用于渲染实例对象并决定更新行为。 + +此类字段数量众多,下表提供了简明概览。注意,已弃用和不可变字段未包含在内。 + +| API | 字段 | 描述 | +|:-----|:-------|:-----------| +|Cluster| `annotations`,

`labels`,

`spec.tolerations`,

`spec.componentSpecs[*].serviceVersion`,

`spec.componentSpecs[*].tolerations`,

`spec.componentSpecs[*].resources`,

`spec.componentSpecs[*].volumeClaimTemplates`,

`spec.componentSpecs[*].instances[*].annotations`,

`spec.componentSpecs[*].instances[*].labels`,

`spec.componentSpecs[*].instances[*].image`,

`spec.componentSpecs[*].instances[*].tolerations`,

`spec.componentSpecs[*].instances[*].resources`,

`spec.componentSpecs[*].instances[*].volumeClaimTemplates`,

`spec.shardingSpecs[*].template.serviceVersion`,

`spec.shardingSpecs[*].template.tolerations`,

`spec.shardingSpecs[*].template.resources`,

`spec.shardingSpecs[*].template.volumeClaimTemplates`

| 资源相关字段指:

`requests["cpu"]`,

`requests["memory"]`,

`limits["cpu"]`,

`limits["memory"]` | +| ComponentVersion | `spec.releases[*].images` | 是否触发原地更新取决于对应镜像是否发生变更。 | +| KubeBlocks 内置 | `annotations`, `labels` | | + +## IgnorePodVerticalScaling 功能开关 + +对原地资源更新的需求一直很高。在 v1.27 之前的 Kubernetes 版本中,许多 Kubernetes 发行版通过自定义实现提供对 Resources 的原地更新。这些方法在不同发行版中各有差异。 + +为确保兼容性,KubeBlocks 引入了 IgnorePodVerticalScaling 开关。当启用时,KubeBlocks 会在实例更新期间忽略 Resources 中的 CPU 和内存更新,确保运行中的 Pod 的 Resources 配置保持不变。这有助于保持不同 Kubernetes 发行版之间的一致性。 \ No newline at end of file diff --git a/blogs/zh/instance-template.mdx b/blogs/zh/instance-template.mdx new file mode 100644 index 00000000..8492b2da --- /dev/null +++ b/blogs/zh/instance-template.mdx @@ -0,0 +1,249 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/28781141?v=4 + name: free6om + url: https://github.com/free6om +date: 2024-12-17 +description: 本博客介绍如何在KubeBlocks中配置实例模板。 +image: /img/blogs/thumbnails/blog-instance-template.png +slug: configure-instance-template +tags: +- instance +- instance template +- kubernetes +- instanceset +title: 如何在KubeBlocks中配置实例模板 +--- +# 如何在 KubeBlocks 中配置实例模板 + +## 为何引入实例模板? + +在 KubeBlocks 中,一个 **Cluster** 由多个 **Component** 组成,每个 Component 管理多个 Pod 及其辅助对象。 + +在 v0.9 版本之前,这些 Pod 是从共享的 **PodTemplate** 渲染而来,该模板定义在 **ClusterDefinition** 或 **ComponentDefinition** 中。然而,这种设计无法完全满足以下几个关键用例: + +1. 从同一个 **Addon** 渲染的 **Cluster** 需要能够配置独立的调度设置,例如 **NodeName**、**NodeSelector** 或 **Tolerations**。 +2. 从同一个 **Addon** 渲染的 **Component** 需要能够灵活地为其管理的 Pod 应用自定义的 **Annotations**、**Labels** 或 **Environment Variables**(环境变量)。 +3. 由同一个 **Component** 管理的 **Pod** 需要能够定义不同的 **CPU**、**Memory** 以及其他 **Resource Request** 和 **Limits**。 + +随着这些需求变得越来越普遍,KubeBlocks v0.9 版本在 Cluster API 中引入了 **Instance Template** 功能。该功能提供了对 Pod 配置更精细的控制,有助于解决上述用例。 + +## 什么是实例模板? + +**Instance**(实例)是 KubeBlocks 中的基本单元,由一个 Pod 和若干辅助对象组成。为简化理解,您可以将 **Instance** 视为 **Pod**。为了保持术语一致性,下文中我们将继续使用 "**Instance**" 这一称谓。 + +自 v0.9 版本起,KubeBlocks 支持为 **Cluster**(集群)中的某个 **Component**(组件)定义多个 **instance templates**(实例模板)。这些实例模板包含多个字段,例如 **Name**(名称)、**Replicas**(副本数)、**Annotations**(注解)、**Labels**(标签)、**Env**(环境变量)、**Tolerations**(容忍度)、**NodeSelector**(节点选择器)。模板中定义的值将覆盖默认模板(即在 **ClusterDefinition** 和 **ComponentDefinition** 中定义的 **PodTemplate**)中的对应值,最终生成用于渲染实例的配置。 + +## 如何应用实例模板? + +实例模板可应用于多种场景。本节以 [RisingWave 集群](https://github.com/risingwavelabs/risingwave) 为例进行说明。 + +KubeBlocks 现已支持管理 RisingWave 集群。RisingWave Addon 是由 RisingWave 团队贡献的组件。为实现最佳性能,RisingWave 需要依赖外部存储解决方案(如 AWS S3 或阿里云 OSS)作为其状态后端。在配置 RisingWave 集群时,必须设置凭证及其他存储相关参数。这些配置可能因集群而异,因此确保正确设置对集群的平稳运行至关重要。 + +在 RisingWave 的官方镜像中,这些参数可通过环境变量注入。KubeBlocks v0.9 支持直接在实例模板中配置这些环境变量,使您能够在每次创建集群时指定必要的凭证和存储参数,将所需信息无缝注入 RisingWave 容器。 + +在 RisingWave Addon 的默认模板中,[环境变量](https://github.com/apecloud/kubeblocks-addons/blob/main/addons/risingwave/templates/cmpd-compute.yaml#L26) 的配置如下: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterDefinition +metadata: + name: risingwave +... +spec: + componentDefs: +name: compute +... + podSpec: + containers: + - name: compute + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + command: + - /risingwave/bin/risingwave + - compute-node + env: + - name: RUST_BACKTRACE + value: "1" + - name: RW_CONFIG_PATH + value: /risingwave/config/risingwave.toml + - name: RW_LISTEN_ADDR + value: 0.0.0.0:5688 + - name: RW_ADVERTISE_ADDR + value: $(KB_POD_FQDN):5688 + - name: RW_META_ADDR + value: load-balance+http://$(metaSvc)-headless:5690 + - name: RW_METRICS_LEVEL + value: "1" + - name: RW_CONNECTOR_RPC_ENDPOINT + value: $(connectorSvc):50051 + - name: RW_PROMETHEUS_LISTENER_ADDR + value: 0.0.0.0:1222 +... +``` + +在将实例模板添加到[集群资源](https://github.com/apecloud/kubeblocks-addons/blob/main/addons-cluster/risingwave/templates/cluster.yaml)后,环境变量配置如下: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: {{ include "risingwave-cluster.name" . }} + namespace: {{ .Release.Namespace }} +... +spec: + componentSpecs: +componentDefRef: compute + name: compute + replicas: {{ .Values.risingwave.compute.replicas }} + instances: + - name: instance + replicas: {{ .Values.risingwave.compute.replicas }} + env: + - name: RW_STATE_STORE + value: "hummock+s3://{{ .Values.risingwave.stateStore.s3.bucket }}" + - name: AWS_REGION + value: "{{ .Values.risingwave.stateStore.s3.region }}" + {{- if eq .Values.risingwave.stateStore.s3.authentication.serviceAccountName "" }} + - name: AWS_ACCESS_KEY_ID + value: "{{ .Values.risingwave.stateStore.s3.authentication.accessKey }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ .Values.risingwave.stateStore.s3.authentication.secretAccessKey }}" + {{- end }} + - name: RW_DATA_DIRECTORY + value: "{{ .Values.risingwave.stateStore.dataDirectory }}" + {{- if .Values.risingwave.stateStore.s3.endpoint }} + - name: RW_S3_ENDPOINT + value: "{{ .Values.risingwave.stateStore.s3.endpoint }}" + {{- end }} + {{- if .Values.risingwave.metaStore.etcd.authentication.enabled }} + - name: RW_ETCD_USERNAME + value: "{{ .Values.risingwave.metaStore.etcd.authentication.username }}" + - name: RW_ETCD_PASSWORD + value: "{{ .Values.risingwave.metaStore.etcd.authentication.password }}" + {{- end }} + - name: RW_ETCD_ENDPOINTS + value: "{{ .Values.risingwave.metaStore.etcd.endpoints }}" + - name: RW_ETCD_AUTH + value: "{{ .Values.risingwave.metaStore.etcd.authentication.enabled}}" +... +``` + +在上面的示例中,我们通过`instances`字段添加了一个名为`instance`的实例模板。该模板定义了多个环境变量,包括`RW_STATE_STORE`和`AWS_REGION`。这些环境变量将由KubeBlocks追加到默认模板定义的环境变量列表中。因此,最终渲染的实例将同时包含来自默认模板和此实例模板的环境变量。 + +此外,实例模板中的`replicas`字段与`componentSpec`中定义的字段完全相同(两者均为`{{ .Values.risingwave.compute.replicas }}`)。这确保了在覆盖默认模板后,此实例模板将用于渲染该组件内的所有实例。 + +## 实例模板详情 + +每个组件(Component)可以定义多个实例模板,每个模板应通过 Name 字段配置唯一名称。同一组件内的实例模板名称必须保持唯一。 + +每个模板中的 `Replicas` 字段决定了从该模板渲染的实例数量,默认值为 1。组件内所有**实例模板**的 `Replicas` 总和不得超过**组件**定义的 `Replicas` 值。如果实例模板渲染的实例数量不足**组件**所需总数,剩余实例将使用默认模板渲染。 + +基于实例模板渲染的实例命名模式为 `(集群名)-(组件名)-$(实例模板名)-序号`。例如在上述 RisingWave 集群中,集群名为 **risingwave**,组件名为 **compute**,实例模板名为 **instance**,**Replicas** 数量为 3,则渲染的实例名称为: + +- `risingwave-compute-instance-0` +- `risingwave-compute-instance-1` +- `risingwave-compute-instance-2` + +实例模板可在创建集群时使用,也可在运行阶段进行更新,包括添加、删除或修改模板。在执行更新前需谨慎评估变更可能带来的影响,因为这可能会更新、删除或重建实例。 + +### 注解 + +实例模板中的 `Annotations` 字段用于覆盖默认模板中的 `Annotations` 字段。如果实例模板的 `Annotations` 中的某个 Key 已存在于默认模板中,则实例模板的 `value` 将优先使用。如果该 Key 在默认模板中不存在,则该键值对将被添加到最终的 `Annotations` 中。 + +例如,默认模板中的 `annotations` 为: + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar" +``` + +以及实例模板中的 `annotations` 为: + +```yaml +annotations: + "foo1": "bar1" + "foo2": "bar2" +``` + + + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar1" + "foo2": "bar2" +``` + +请注意,KubeBlocks 会添加系统级别的 `Annotations`,这些注解无法被实例模板覆盖。 + +### 标签(Labels) + +在 KubeBlocks v0.9 之前,标签可以在集群或组件级别的默认模板中定义。此外,KubeBlocks 会自动添加一些系统默认标签。这些标签共同构成资源的最终 `Labels` 集合。 + +从 KubeBlocks v0.9 开始,您可以通过实例模板设置 `Labels`。 + +与 `Annotations` 类似,实例模板中的 `Labels` 遵循相同的覆盖逻辑应用于现有的 `Labels`。 + +实例模板的 `Labels` 具有最高优先级。但是,它们无法覆盖由 KubeBlocks 添加的系统 `Labels`。 + +### 镜像(Image) + +实例模板中的 `Image` 字段用于覆盖默认模板中第一个容器的 `Image` 字段。 + +使用 `Image` 字段时应格外谨慎:对于像数据库这样的 StatefulSet 应用,更改 `Image` 通常会引入兼容性问题,尤其是数据格式方面。更改此字段时,请确保实例模板中的镜像版本与默认模板中的版本完全兼容。 + +从 KubeBlocks v0.9 开始,`ComponentVersion` 字段用于更有效地管理镜像版本。建议使用 `ComponentVersion` 来管理版本,以避免兼容性问题。 + +### 调度策略.调度器名称(SchedulingPolicy.SchedulerName) + +实例模板中的 `SchedulerName` 会覆盖默认模板中的相同字段。 + +### 调度策略.节点名称(SchedulingPolicy.NodeName) + +实例模板中的 `NodeName` 会覆盖默认模板中的相同字段。 + +### 调度策略.节点选择器(SchedulingPolicy.NodeSelector) + +实例模板中的 `NodeSelector` 会覆盖默认模板中的相同字段。 + +### 调度策略.容忍(SchedulingPolicy.Tolerations) + +实例模板中的 `Tolerations` 字段会覆盖默认模板中的相同字段。 + +如果实例模板中的 `Toleration` 与默认模板中的 `Toleration` 完全相同(所有属性——`Key`、`Operator`、`Value`、`Effect` 和 `TolerationSeconds`——都相同),则该 `Toleration` 将被忽略。否则,它将被添加到默认模板的 `Tolerations` 列表中。 + +### 调度策略.亲和性(SchedulingPolicy.Affinity) + +实例模板中的 `Affinity` 字段会覆盖默认模板中的相同字段。 + +### 调度策略.拓扑分布约束(SchedulingPolicy.TopologySpreadConstraints) + +实例模板中的 `TopologySpreadConstraints` 字段会覆盖默认模板中的相同字段。 + +### 资源(Resources) + +在 v0.9 之前,KubeBlocks 支持在多个级别定义 `Resource` 值:默认模板、集群和组件级别。最终资源值通过级联覆盖机制确定,组件级别优先级最高。从 v0.9 开始,实例模板可以定义 `Resource` 值,并且优先级高于组件级别设置。 + +### 环境变量(Env) + +在 v0.9 之前,KubeBlocks 中的环境变量可以在默认模板和组件级别设置,直接或间接地。此外,KubeBlocks 通过 ConfigMaps 使用 `EnvVarSource` 机制提供某些系统默认环境变量。这些变量共同构成最终的 Env 列表。 + +从 v0.9 开始,实例模板引入了一种新的定义环境变量的方式。实例模板中定义的值将覆盖其他 Env 变量,除了 KubeBlocks 的系统默认环境变量。覆盖逻辑与 `Annotations` 和 `Labels` 相同:如果 Env 的 `Name` 匹配,则实例模板中的 `Value` 或 `ValueFrom` 优先;如果不匹配,则新的 Env 将被添加到列表中。 + +### 存储卷(Volumes) + +实例模板中的 `Volumes` 字段会覆盖默认模板中第一个容器的相同字段。如果存储卷的 `Name` 匹配,则应用实例模板中的 `VolumeSource`;如果不匹配,则该存储卷将作为新条目添加到最终存储卷列表中。 + +### 存储卷挂载(VolumeMounts) + +实例模板中的 `VolumeMounts` 字段会覆盖默认模板中第一个 Container 的同名字段。如果 VolumeMount 的 `Name` 匹配,则应用实例模板中的 `MountPath` 及其他相关值。如果名称不匹配,则该 VolumeMount 会作为新条目添加。 + +### VolumeClaimTemplates + +实例模板中的 `VolumeClaimTemplates` 字段会覆盖 Component 中由 `ClusterComponentVolumeClaimTemplates` 生成的同名字段。如果 `PersistentVolumeClaim Name` 相同,则应用实例模板中的 `PersistentVolumeClaimSpec` 值;否则会作为新的 `PersistentVolumeClaim` 添加。 \ No newline at end of file diff --git a/blogs/zh/instanceset-introduction.mdx b/blogs/zh/instanceset-introduction.mdx new file mode 100644 index 00000000..974b0521 --- /dev/null +++ b/blogs/zh/instanceset-introduction.mdx @@ -0,0 +1,333 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/28781141?v=4 + name: free6om + url: https://github.com/free6om +date: 2024-08-28 +description: KubeBlocks 在 v0.9.0 版本中引入了 InstanceSet。本文介绍什么是 InstanceSet 及其相关特性。 +image: /img/blogs/thumbnails/blog-instanceset.png +slug: instanceset-introduction +tags: +- pod +- pods stuck in terminating +- K8s +title: 什么是InstanceSet? +--- +# InstanceSet + +在 KubeBlocks 中,实例(instance)是最基础的单元,由一个 Pod 和其他辅助对象组成。简单来说,你可以将其视为一个 Pod,但在本文中我们统一称之为"实例"。 + +**InstanceSet 是一个通用工作负载 API,负责管理一组实例。KubeBlocks 中的所有工作负载最终都是通过 InstanceSet 来管理的**。 + +与 Kubernetes 原生的工作负载 API(如 StatefulSet 和 Deployment)相比,**InstanceSet 额外包含了与数据库相关的特性和设计考量,例如角色和高可用性**。**这使得它能够更好地支持像数据库这样的复杂有状态工作负载**。 + +## 如何使用 InstanceSet? + +InstanceSet 为它管理的每个实例生成固定名称,并创建一个 Headless Service,为每个实例提供稳定的网络标识符。通过该标识符,同一个 InstanceSet 内的实例可以相互发现,同一个 Kubernetes 集群内的其他系统也可以发现该 InstanceSet 下的各个实例。 + +InstanceSet 使用 VolumeClaimTemplates 为每个实例生成具有固定标识符的存储卷。其他实例或系统可以通过固定标识符定位到某个实例,并访问其存储卷中的数据。 + +在更新方面,**InstanceSet 支持以确定性的顺序对所有实例进行滚动更新(RollingUpdate)**,并且可以配置滚动更新的各种行为。同样地,在水平扩缩容时,InstanceSet 也会以确定性的顺序添加或删除实例。 + +基于这些基础特性,为了满足支持数据库高可用性的需求,InstanceSet 进一步支持更多特性,如**原地更新**、**实例模板**、**指定实例下线**、**基于角色的服务**以及**基于角色的更新策略**。 + +下面我们将更详细地探讨这些特性。 + +## 如何生成实例名称? + +InstanceSet 通过使用实例模板来渲染实例对象,实例数量由 `Replicas` 字段控制。 + +```yaml +apiVersion: workloads.kubeblocks.io/v1alpha1 +kind: InstanceSet +metadata: + name: mydb +spec: + replicas: 3 + template: + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: mydb + image: registry.kubeblocks.io/mydb:15.1 + ports: + - containerPort: 5123 + name: db + volumeMounts: + - name: data + mountPath: /var/mydb/ + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 10Gi +``` + +在上面的示例中,声明了一个名为 `mydb` 的 InstanceSet,由三个实例组成(`replicas=3`)。每个实例都是从由 `template` 和 `volumeClaimTemplates` 组成的实例模板渲染而来。`template` 用于渲染实例内的 Pod,而 `volumeClaimTemplates` 用于渲染实例内的 PVC。 + +实例名称的生成模式为 `$(instanceSet.name)-$(instanceID)`。默认情况下,`instanceID` 是一个序数值。在此示例中,`instanceSet.name` 是 `mydb`,序号从 0 开始,每个实例递增。生成的实例名称为:`mydb-0`、`mydb-1` 和 `mydb-2`。当使用多实例模板功能时,`instanceID` 的生成规则扩展为 `$(template.name)-$(ordinal)`。更多详情请参阅[实例模板文档](https://kubeblocks.io/docs/preview/api_docs/instance-template/introduction)。 + +为了提供固定的网络标识符,每个 InstanceSet 会生成一个 Headless Service 对象。该 Service 的名称模式为 `$(instanceSet.name)-headless`。在此示例中,最终的 Headless Service 名称为 `mydb-headless`。通过这种模式,该 InstanceSet 下的三个实例获得了三个固定的网络标识:`mydb-0.mydb-headless.default.local`、`mydb-1.mydb-headless.default.local` 和 `mydb-2.mydb-headless.default.local`。 + +由于 InstanceSet 名称成为固定网络标识符的一部分,因此必须符合 [DNS 标签标准](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names)。 + +## 如何获取InstanceSet下的实例? + +当InstanceSet生成次级资源时,会为它们添加两个标签:`workloads.kubeblocks.io/managed-by=InstanceSet` 和 `workloads.kubeblocks.io/instance=`。这些标签可用于获取特定InstanceSet下的所有次级资源,包括Pod和PVC。 + +在上面的示例中,获取对应Pod的标签应为: + +- `workloads.kubeblocks.io/managed-by=InstanceSet` +- `workloads.kubeblocks.io/instance=mydb` + +如果想自定义用于获取InstanceSet下Pod的标签,可以通过设置`spec.selector`字段来实现。例如: + +```yaml +apiVersion: workloads.kubeblocks.io/v1alpha1 +kind: InstanceSet +metadata: + name: mydb +spec: + selector: + matchLabels: + db: mydb +``` + +通过 `spec.selector` 的 `MatchLabels` 设置的标签将自动添加到由 InstanceSet 生成的 Pod 中。 + +## 创建/删除实例 + +默认情况下,InstanceSet 会按升序创建实例。创建新实例时,必须等待前一个实例的 Pod 达到 `Ready` 状态后,才会生成下一个实例。 + +删除实例则遵循相反的顺序。在删除实例前,其中的 Pod 必须处于 `Ready` 状态。这里主要考虑的是,如果 Pod 未处于 `Ready` 状态,其附加的 PVC 中可能存在数据问题。InstanceSet 会确保在数据问题解决前不执行进一步操作。 + +InstanceSet 在初始设置和水平扩容时也采用相同的创建逻辑。相反地,水平缩容时则应用删除逻辑。 + +InstanceSet 还支持通过 `spec.podManagementPolicy` 配置实例创建和删除策略。目前支持两种策略:`Ordered`(默认)和 `Parallel`。`Parallel` 策略允许同时创建或删除多个实例。 + +### 缩容指定实例 + +在某些场景下,您可能需要在缩容操作时删除特定的实例。 + +例如,当某个节点因物理机故障需要卸载时,应删除该节点上的所有实例(Pod)。这可以通过指定实例缩容功能来实现。 + +以前文提到的 `mydb` InstanceSet 为例,您可以通过删除序号为 `1` 的实例进行缩容,同时保留序号为 `0` 和 `2` 的实例: + +```yaml +apiVersion: workloads.kubeblocks.io/v1alpha1 +kind: InstanceSet +metadata: + name: mydb +spec: + replicas: 2 + offlineInstances: ["mydb-1"] +# ... +``` + +如需更详细的说明,请参阅[卸载指定实例功能](https://kubeblocks.io/docs/preview/api_docs/maintenance/scale/horizontal-scale)。 + +## 更新实例 + +当实例模板中的字段更新时,InstanceSet 下的所有实例也会随之更新。 + +默认情况下,InstanceSet 会按序号降序依次更新每个实例。在更新某个实例前,必须确保前一个实例已完成更新并达到 `Ready` 状态。 + +如果实例被分配了角色(将在后续章节讨论),InstanceSet 会按照角色优先级从低到高的顺序更新实例。若角色优先级相同,则进一步按序号降序更新。 + +通过配置 `spec.updateStrategy`,InstanceSet 支持更多更新行为。例如,您可以通过 `spec.updateStrategy.rollingUpdate.partition` 控制待更新实例的总数,并通过 `spec.updateStrategy.rollingUpdate.maxUnavailable` 管理更新过程中不可用实例的最大数量。更多细节请参考 [`spec.updateStrategy` API 文档](../user_docs/references/api-reference/cluster#apps.kubeblocks.io/v1alpha1.UpdateStrategy)。 + +### 原地更新 + +数据库应用通常对高可用性有严格要求。通常情况下,当 Pod 需要更新时,系统会采取重建操作,这需要一定时间并可能导致数据库服务暂时不可用。 + +为了最小化更新对数据库服务可用性的影响,InstanceSet 支持**原地更新**。当实例模板中的特定字段更新时,InstanceSet 会**原地更新 Pod 或扩展 PVC**,确保对实例的干扰最小化。 + +原则上,原地更新能力利用了 Kubernetes 原生 Pod API 的原地更新特性。具体支持的字段如下: + +- `spec.template.metadata.annotations` +- `spec.template.metadata.labels` +- `spec.template.spec.activeDeadlineSeconds` +- `spec.template.spec.initContainers[*].image` +- `spec.template.spec.containers[*].image` +- `spec.template.spec.tolerations`(仅支持添加 Toleration) +- `spec.instances[*].annotations` +- `spec.instances[*].labels` +- `spec.instances[*].image` + +从 Kubernetes v1.27 开始,启用 `PodInPlaceVerticalScaling` 特性门控可进一步支持 CPU 和内存的原地更新。InstanceSet 会自动检测 Kubernetes 版本和特性门控状态,并为以下字段提供额外支持: + +对于 Kubernetes 1.27 及以上版本且启用 `PodInPlaceVerticalScaling` 时,以下字段支持原地更新: + +- `spec.template.spec.containers[*].resources.requests["cpu"]` +- `spec.template.spec.containers[*].resources.requests["memory"]` +- `spec.template.spec.containers[*].resources.limits["cpu"]` +- `spec.template.spec.containers[*].resources.limits["memory"]` +- `spec.instances[*].resources.requests["cpu"]` +- `spec.instances[*].resources.requests["memory"]` +- `spec.instances[*].resources.limits["cpu"]` +- `spec.instances[*].resources.limits["memory"]` + +对于 PVC,InstanceSet 同样利用 PVC API 的能力,仅支持存储卷扩容。 + +更多细节请参阅[原地更新特性文档](https://kubeblocks.io/docs/preview/api_docs/maintenance/in-place-update/overview)。 + +## 实例模板 + +默认情况下,**InstanceSet 会基于单一模板生成所有实例**。 + +在某些场景中,您可能需要同一个 InstanceSet 内的实例具有不同的配置,例如不同的资源分配或环境变量。InstanceSet **支持在默认模板基础上定义额外的实例模板**来满足这些需求。 + +以前文提到的 `mydb` InstanceSet 为例,若需要将其配置为一个大型主实例和两个小型从实例的集群,可以按如下方式配置: + +```yaml +apiVersion: workloads.kubeblocks.io/v1alpha1 +kind: InstanceSet +metadata: + name: mydb +spec: + replicas: 3 + template: + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: mydb + image: registry.kubeblocks.io/mydb:15.1 + ports: + - containerPort: 5123 + name: db + volumeMounts: + - name: data + mountPath: /var/mydb/ + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 10Gi + instances: + - name: primary + replicas: 1 + resources: + limits: + cpu: 8 + memory: 16Gi + - name: secondary + replicas: 2 + resources: + limits: + cpu: 4 + memory: 8Gi +``` + +如需更详细的说明,请参阅[实例模板功能](https://kubeblocks.io/docs/preview/api_docs/instance-template/introduction)。 + +(严格保持原格式,包括换行和间距不变) + +## 角色 + +大多数数据库系统支持多实例部署,其中每个实例承担不同的角色,通常由它们内部的数据复制关系决定。例如,在 PostgreSQL 中有 Primary 和 Secondary 角色,而在 etcd 中则有 leader、follower 和 learner 等角色。 + +在数据库系统中,不同角色的实例通常具有不同的特性。例如,在服务能力方面,主节点通常支持读写能力,而其他节点提供只读能力。在运维操作时,遵循数据库管理的最佳实践,通常会先逐个升级从节点实例,最后升级主节点实例。在升级主节点实例前,通常会执行切换操作,以确保数据完整性并最小化服务停机时间。 + +针对这些特性,InstanceSet 包含了若干与数据库角色相关的功能。这些角色相关功能包括**角色定义**、**角色探测**、**基于角色的服务**以及**基于角色的更新策略**。 + +**角色定义** 描述了系统中的角色及其属性。 + +**角色探测** 通过配置的探测方法定期检查每个实例的角色,并更新相应实例的标签。 + +**基于角色的服务** 允许服务根据每个实例的角色标签筛选特定角色,从而提供适当的服务能力。 + +**基于角色的更新策略** 根据角色优先级决定实例的更新顺序。 + +### 角色定义 + +InstanceSet 允许通过 `spec.roles` 定义所有角色信息,包括角色名称、读写能力、是否参与选举以及是否为领导者。 + +例如,PostgreSQL 可以配置如下: + +```yaml +spec: + roles: + - name: "primary" + accessMode: ReadWrite + isLeader: true + - name: "secondary" + accessMode: Readonly +``` + +### 角色检测 + +InstanceSet 包含一个预配置的**角色检测 Sidecar**,该容器会定期执行配置的角色检测脚本,并与 InstanceSet Controller 协同工作,更新对应实例标签中的角色名称。 + +![Role detection](/img/blogs/instanceset-role-detection.png) + +角色检测脚本可按如下方式配置: + + + +```yaml +spec: + roleProbe: + customHandler: + - image: probe.kubeblocks.io/sample-probe:1.0 + cmd: ["probe"] + args: ["redis"] + periodSeconds: 5 + roleUpdateMechanism: DirectAPIServerEventUpdate +``` + +在本示例中,配置的角色探测 Sidecar 会每隔 5 秒在 `sample-probe` 镜像中运行探测命令,并将封装在 K8s Event 中的探测结果发送给 InstanceSet Controller。InstanceSet Controller 在接收到该事件后,会解析每个实例的角色信息并更新到实例的角色标签中。角色标签的格式为:`kubeblocks.io/role=`。InstanceSet Controller 还会在标签中更新实例的读写能力,格式为:`workloads.kubeblocks.io/access-mode=`。 + +### 基于角色的服务 + +通过配置 Service 的选择器来匹配实例上不同的角色标签和读写能力标签,Service 可以提供不同的服务能力。 + +例如,PostgreSQL 的读写服务可以配置如下: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: pg-readwrite-svc +spec: + selector: + workloads.kubeblocks.io/managed-by: InstanceSet + workloads.kubeblocks.io/instance: mydb + kubeblocks.io/role: primary +``` + +### 基于角色的更新策略 + +如前所述,当更新实例时,一旦配置了角色,InstanceSet 会考虑角色优先级。 + +具体而言,InstanceSet 通过 `spec.memberUpdateStrategy` 支持三种角色更新策略:`Serial`(串行)、`Parallel`(并行)和 `BestEffortParallel`(尽力并行)。 + +`Serial` 按照角色优先级从低到高的顺序依次更新实例。如果两个实例具有相同的角色优先级,则按照序号降序进一步排序更新。 + +`Parallel` 会同时更新所有实例,遵循 `spec.updateStrategy` 中指定的更新策略。 + +`BestEffortParallel` 在确保系统可用性的前提下,按照角色优先级从低到高分批次更新实例。该策略同样遵循 `spec.updateStrategy` 中指定的更新策略。 + +## 管理大规模实例 + +InstanceSet 最多可管理 **10,000 个实例**。当管理大量实例时,您可以通过设置 `KUBEBLOCKS_RECONCILE_WORKERS` 环境变量来配置 InstanceSet Controller 中的并发工作节点数量,以提高处理速度。 + +## 参考文档 + +[1] 实例模板: https://kubeblocks.io/docs/preview/api_docs/instance-template/introduction + +[2] DNS 标签: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names + +[3] 卸载指定实例: https://kubeblocks.io/docs/preview/api_docs/maintenance/scale/scale-for-specified-instance + +[4] Kubernetes Pod API 原地更新: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement + +[5] 原地更新: https://kubeblocks.io/docs/preview/api_docs/maintenance/in-place-update/overview + diff --git a/blogs/zh/is-k8s-a-database.mdx b/blogs/zh/is-k8s-a-database.mdx new file mode 100644 index 00000000..d501cc0d --- /dev/null +++ b/blogs/zh/is-k8s-a-database.mdx @@ -0,0 +1,127 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1701782?v=4 + name: Lei Zhang + url: https://github.com/resouer +date: 2024-09-26 +description: 以“Kubernetes是数据库吗?”这一问题为切入点,本文揭示了K8s背后的核心概念,例如声明式应用管理和基础设施即数据(IaD)。 +image: /img/blogs/thumbnails/blog-k8s-database.png +slug: is-k8s-a-database +tags: +- Kubernetes +- database +- declarative application management +- Infrastructure as Data +title: Kubernetes 是数据库吗? +--- +# Kubernetes 是数据库吗? + +> 本文最初由张磊于2020年发表,但四年后的今天,这篇文章依然发人深省。"Kubernetes 是数据库吗?"这个问题值得深思。张磊以这个问题为切入点,揭示了 K8s 背后的核心概念,如声明式应用管理和基础设施即数据(IaD)。通过将 K8s 与数据库进行类比,本文引导我们从数据库的角度重新解读 K8s。阅读本文后,您将对 K8s 以及"Kubernetes 是数据库吗?"这个问题有全新的理解。 +> +> 希望您享受阅读! + +*作者:张磊,CNCF TOC 成员(2021-2023),Kubernetes 维护者* + +最近,关于"Kubernetes 就是新型数据库"的言论在 Kubernetes 社区引起了广泛关注。更准确地说,这个观点是指 Kubernetes 的运行方式与数据库类似,而不是建议您将 Kubernetes 当作数据库使用。 + +![](/img/blogs/k8s-as-database-twitter.png) + +乍看之下,将 Kubernetes 与数据库相提并论有些牵强。毕竟,Kubernetes 的典型工作方式,如控制器模式、声明式 API 等,似乎与数据库没有直接关联。然而,这个说法背后蕴含着一个基本概念,可以追溯到 Kubernetes 采用的核心理论之一。 + +## Kubernetes 声明式应用管理基础 + +当谈及 Kubernetes 时,声明式应用管理的概念经常被提及。实际上,这种设计正是 Kubernetes 区别于其他基础设施项目的关键所在,因为这是 Kubernetes 独有的能力。但你是否思考过,在 Kubernetes 中究竟什么是声明式应用管理? + +### 1. 声明式应用管理不仅仅是声明式 API + +如果我们审视 Kubernetes 的核心原则,会发现 Kubernetes 中的大多数功能——无论是运行容器的 kubelet、应用 iptables 规则的 kube-proxy、管理 Pod 调度的 kube-scheduler,还是处理 ReplicaSets 的 Deployment——都遵循我们强调的 Controller 模式。这意味着你可以通过 YAML 文件定义期望的最终状态(无论是网络、存储等),而 Kubernetes 中的组件会努力使集群状态与期望状态保持一致,最终实现完全对齐。这个实际状态逐步与期望状态对齐的过程,被称为调和(reconciliation)。这也是 Operator 和自定义 Controller 运作的根本方式。 + +通过声明式描述驱动 Controller 进行实际状态与期望状态调和的方式,直观地展现了声明式应用管理。这个过程包含两个关键方面: + +- **声明的期望状态**:这种描述必须代表你想要的最终状态。如果你声明了一个中间状态,或期望动态调整期望状态,可能会破坏这种声明式语义的精确执行。 +- **调和驱动的对齐过程**:调和过程理论上确保系统状态与期望状态保持一致。具体来说,调和持续执行"检查 -> 差异分析 -> 执行"的循环,使系统能够检测当前状态与期望状态之间的差异,然后采取必要行动。仅有声明式描述是不够的。这很容易理解:即使系统在首次应用描述时达到了期望状态,也不能保证一小时后仍保持该状态。许多人将"声明式应用管理"与"声明式 API"混淆,很大程度上是因为他们没有认识到调和的重要性。 + +你可能想知道,这种声明式应用管理模式为 Kubernetes 带来了哪些好处。 + +### 2. 声明式应用管理的本质:基础设施即数据 + +实际上,声明式应用管理系统背后的理论基础是一个被称为"基础设施即数据"(Infrastructure as Data,IaD)的概念。这一理念认为,基础设施管理不应绑定任何特定编程语言或配置方法,而应表示为纯粹的、结构化的、系统可读的数据,完整捕获用户期望的系统状态。 + +> 注意: +> +> 基础设施即数据有时也被称为配置即数据(Configuration as Data),但它们共享相同的核心理念。 + +这种方法的优势在于,对基础设施的任何操作最终都变成了对数据的操作——无论是创建、读取、更新还是删除(CRUD)。更重要的是,管理数据的方式与基础设施本身无关。因此,与基础设施交互不会将你束缚于特定的编程语言、特定的远程过程调用协议或 SDK。只要你能生成正确格式的数据,就可以自由使用任何你喜欢的方法与基础设施交互。 + +在 Kubernetes 中,这意味着如果你想执行任何操作,只需提交一个 YAML 文件并根据需要进行修改。你不需要使用 Kubernetes 的 RESTful API 或 SDK。**这个 YAML 文件的内容,实际上就是 Kubernetes IaD 系统中的数据。** + +因此,Kubernetes 将其所有功能都定义为 API,这些 API 本质上就是结构化的数据。这使得 Kubernetes 用户可以通过管理数据而非绑定特定语言或 SDK 来实现目标。更重要的是,与专用的命令式 API 或 SDK 相比,由 YAML 表示的声明式数据更易于抽象并与现有基础设施能力集成。这正是 Kubernetes 生态能够爆发式增长的关键原因之一:IaD(Infrastructure as Data)、声明式 API 和 Controller 模式的结合,让社区开发插件和集成各种能力变得容易得多。此外,这些插件和能力具有高度的可移植性和可复用性,这使得 Kubernetes 与 Mesos、OpenStack 等项目区分开来。本质上,IaD 赋予了 Kubernetes 成为"平台的平台"的能力。 + +现在更清晰的是,Kubernetes 的 IaD 设计中的数据表现为声明式 API,而 Kubernetes 的控制循环确保系统始终遵循这些数据描述的状态。**从这个意义上说,Kubernetes 本质上是一个协调系统(Reconciliation system),它将目标状态表达为数据,并通过控制器使系统与该目标状态保持一致。** + +等等。这种将系统维持在目标状态的理念听起来很熟悉,不是吗? + +没错。Kubernetes 背后的基础概念,大多数有工程背景的读者可能都学习过:它被称为现代控制系统(Modern Control Systems)。 + +![](/img/blogs/modern-control-systems.png) + +现在,这篇博客开头的陈述似乎更有道理了? + +理解了 Kubernetes 的本质后,一些原本难以理解的概念突然变得清晰起来。 + +例如,我们在使用 Kubernetes 时需要编写大量 YAML 文件,是因为我们需要向 Kubernetes 控制系统提交数据。在这个过程中,YAML 只是定义数据的一种人类可读格式。YAML 就像我们小时候用来练字的方格纸,而方格内实际书写的内容才是 Kubernetes 真正关心的数据——整个系统运作的核心。 + +一些读者可能已经意识到:既然 Kubernetes 需要处理数据,那么数据本身是否需要固定格式以便 Kubernetes 解析?确实如此。Kubernetes 中的这种格式被称为 API 的 Schema(模式)。如果你经常编写自定义控制器,可能会在实践中熟悉这个 Schema:CRD(Custom Resource Definition)就是一种用于定义 Schema 的特殊 API。 + +## YAML工程师?不!你是数据库工程师! + +我们已得出结论:Kubernetes的本质是IaD(基础设施即数据),这决定了其工作原理更像数据库而非传统分布式系统。这种差异可能是Kubernetes学习曲线陡峭的根本原因。 + +从这个视角看,Kubernetes暴露给你的各种API本质上就是预定义的Schema表。我们精心编写的YAML文件,不过是对这些表中数据的操作——增删改查(CRUD)。YAML本身就像SQL,是帮助你管理数据库中数据的工具。与传统数据库唯一的区别在于,Kubernetes并不旨在持久化接收到的数据,而是利用这些数据驱动Controller执行特定操作,逐步使系统状态与Data中声明的期望状态对齐——这又回到了我们之前讨论的控制系统。 + +正是因为Kubernetes以Data为核心实体,编写和管理YAML文件几乎成了Kubernetes工程师的日常全部工作。但通过本文介绍的IaD理念,你完全可以开始将自己视为数据库工程师——这个头衔可比YAML工程师贴切得多。 + +## Kubernetes 的视图层 + +如前所述,如果你从数据库的视角重新审视 Kubernetes 的设计,会发现许多概念背后都蕴含着优雅的思考。例如, + +- **数据模型** – Kubernetes API 及 CRD 机制 +- **数据拦截与校验** – Kubernetes Admission Hooks +- **数据驱动机制** – Kubernetes Controller/Operator +- **数据变更监听与索引** – Kubernetes Informer 机制 +- ...... + +另一方面,随着 Kubernetes 基础设施日趋复杂以及第三方插件和能力的增加,社区注意到 Kubernetes 内部表(API 资源)的规模和复杂度呈现爆发式增长。这促使 Kubernetes 社区早期就展开了关于为 Kubernetes 设计视图(类似数据库视图)的讨论,目标是: + +`CREATE VIEW AS ` + +在 Kubernetes 内置 API 资源之上引入视图层,能获得与数据库视图相似的收益。例如: + +1. **简化数据格式与表达** + Kubernetes 视图层应向开发者和运维工程师暴露更简单、抽象的应用层 API,而非原始的基础设施层 API。用户应能自由定义这些视图层对象,而不受底层 Kubernetes 对象 Schema 的约束。 +2. **简化复杂数据操作(简化版 SQL)** + 视图层对象不仅应在 UI 层面更简单,还应定义和管理底层的复杂资源拓扑,从而降低管理 Kubernetes 应用的操作复杂度。 +3. **保护底层表结构** + 由于开发者和运维工程师仅与视图层对象交互,底层 Kubernetes 对象得到了保护。这使得 Kubernetes 对象可以在用户无感知的情况下演进或升级。 +4. **复用数据操作(可重用的 SQL)** + 因为视图层与基础设施完全解耦,通过视图层声明的应用或操作可以在任意 Kubernetes 集群间迁移,而无需担心支持能力的差异。 +5. **视图仍是表——支持标准表操作** + Kubernetes 视图层对象必须仍是标准的 Kubernetes 对象,这样所有 Kubernetes 的 API 操作和原语都适用于视图层对象。我们不应为 Kubernetes API 模型引入额外的认知负担。 + +虽然 Kubernetes 视图层的构想尚未在上游实现,但已成为大规模用户的普遍实践。例如 Pinterest 设计了 `PinterestService` CRD 来描述和定义他们的应用,本质上就是一个视图层对象。不过对大多数企业而言,这种方式仍较为原始。需要注意的是,视图不仅仅是数据的简单抽象或转译。要在生产环境中大规模使用视图,还需解决几个关键挑战: + +- 如何定义和管理视图层对象与底层 K8s 对象间的映射关系?请记住这不是简单的一对一映射,因为一个视图层对象可能对应多个 K8s 对象。 +- 如何建模和抽象运维能力?真实应用不仅是一个简单的 Deployment 或 Operator,而是运行程序与关联运维能力(如容器化应用及其水平扩展策略)的组合。这些运维能力如何体现在应用定义中?将所有内容定义为注解是否可行? +- 如何管理运维能力与运行程序间的绑定关系?如何将此绑定映射到 K8s 内的实际执行关系? +- 如何通过视图层对象标准化云资源定义(例如阿里云 RDS 实例)? +- …… + +这些正是阻碍 Kubernetes 上游实现视图层的关键挑战,也是 [Open Application Model (OAM)](https://github.com/oam-dev/spec) 等应用层开源项目重点关注的领域。需要注意的是,仅靠 OAM 作为规范本身并不足以解决所有这些挑战。创建 Kubernetes 视图层需要标准化视图层依赖库的支持来确保其实现。只有这样,我们才能真正享受到 Kubernetes 中数据视图带来的优势和便利。目前社区中最健壮的 Kubernetes 视图层库来自 Crossplane 团队,名为 [oam-kubernetes-runtime](https://github.com/crossplane/oam-kubernetes-runtime)。 + +## 总结 + +Kubernetes 以 IaD(Infrastructure-as-Database)为核心的设计理念及其类数据库架构,一直是其社区蓬勃发展的关键理论基础。但 IaD 理念是一把双刃剑:一方面它催生了繁荣的生态,另一方面也导致了无数独立的 Controller 和 Operator 的诞生,以及由这些 Controller 拼凑而成的高度复杂的 Kubernetes 集群。这个生产级别的 Kubernetes 集群以其巨大的复杂性,距离成为真正受开发者和运维人员喜爱的云原生应用管理平台仍有很长的路要走。 + +过去五年间,Kubernetes 的巨大成功实际上是一个将基础设施能力(如网络、存储、容器等)通过声明式 API 进行标准化和统一的过程。随着 OAM(Open Application Model)等 Kubernetes 应用层技术被更广泛地采用,我们现在正看到一个标准化应用层生态的崛起。越来越多的团队正在努力通过更易用的数据视图层(Data View Layer)暴露用户友好的 API,同时为基础设施工程师提供具备横向连通性和模块化能力的更强大平台能力。 + +与此同时,类数据库 Kubernetes 的其他缺失部分也将继续从社区中涌现。例如,如今快速成熟的 Open Policy Agent(OPA)项目可以被视为数据拦截、校验和修改机制的演进成果。同样地,阿里巴巴万级节点集群中控制平面的性能调优,其底层理论和实践与现代数据库性能优化有着惊人的相似性。 \ No newline at end of file diff --git a/blogs/zh/kubeblocks-on-kata.mdx b/blogs/zh/kubeblocks-on-kata.mdx new file mode 100644 index 00000000..61723af4 --- /dev/null +++ b/blogs/zh/kubeblocks-on-kata.mdx @@ -0,0 +1,329 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/111858489?v=4 + name: dullboy + url: https://github.com/nayutah +date: 2024-05-28 +description: 如何通过Kata增强容器安全性? +image: /img/blogs/thumbnails/blog-kata.png +slug: kubeblocks-on-kata +tags: +- containerization +- database performance +- runtime +- kata +title: 使用Kata Containers保障工作负载安全——在Kata上运行KubeBlocks +--- +# 使用Kata Containers保障工作负载安全:在Kata上运行KubeBlocks + +传统容器运行在同一个操作系统内核上,可能存在一些安全隐患,例如权限提升和内核漏洞。如果您关注容器安全性,在Kata Containers上运行KubeBlocks可能是一个解决方案。我们已经构建了Kata Containers环境并完成了一些基础功能验证。 + +Kata Containers(简称Kata)是一个开源项目,提供安全且高性能的容器运行时环境。Kata Containers的目标是将虚拟化技术与容器技术相结合,在提供类似轻量级容器用户体验的同时,实现更高的隔离性和安全性。 + +## Kata Containers 的核心特性 + +1. 安全隔离:每个容器运行在独立的虚拟机中,提供硬件级别的隔离,从而带来更高的安全性和隔离性。这使得 Kata Containers 更适合多租户环境和安全敏感型工作负载。 +2. 性能与资源效率:尽管运行在虚拟机中,Kata Containers 仍能提供接近轻量级容器的性能和资源效率。Kata Containers 充分利用硬件虚拟化的优势,并借助硬件加速等技术实现快速启动和高性能。 +3. 生态兼容性:Kata Containers 兼容容器生态系统,支持 Docker 和 Kubernetes,并能与现有的容器工具和平台无缝集成。 +4. 灵活性与可扩展性:Kata Containers 可在多种虚拟化平台上运行,包括基于 KVM、Firecracker 等的平台。这使得用户能够根据具体需求选择合适的虚拟化方案。 + +## 虚拟化环境准备 +1. 虚拟化要求 +在安装Kata Containers之前,需要准备一台支持虚拟化的服务器。通常有以下两种来源: + A. 裸金属服务器:这类服务器上的CPU通常支持虚拟化,这种虚拟化称为L1虚拟化。 + B. 支持嵌套虚拟化的虚拟机:这类虚拟机支持虚拟化指令集,可用于L2虚拟化。 + [img](/static/img/hypervisor.png) +要确认服务器是否支持虚拟化,可以执行以下命令: +`grep vmx /proc/cpuinfo` + + + +``` +slc@kata-1:~$ grep vmx /proc/cpuinfo +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat vnmi md_clear arch_capabilities +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest vapic_reg vid shadow_vmcs +``` + +如果服务器支持虚拟化,执行 `grep vmx /proc/cpuinfo` 命令后输出会显示与 'vmx' 标志相关的信息,这表明支持硬件辅助虚拟化(Intel VT-x)。 + +2.在GCP上创建虚拟机 +不同云服务提供商对嵌套虚拟化的支持程度各不相同。其中,GCP(Google Cloud Platform)据称对嵌套虚拟化的支持最佳。更多详情请参阅GCP嵌套虚拟化文档。 +要在GCP上创建3台嵌套虚拟化服务器,可以使用以下gcloud命令: + + + +``` +slc@bogon aws % gcloud compute instances create kata-1 \ + --enable-nested-virtualization \ + --project=apecloud-labs \ + --zone=us-central1-c \ + --machine-type=n1-standard-4 \ + --network-interface=network-tier=PREMIUM,stack-type=IPV4_ONLY,subnet=default \ + --maintenance-policy=MIGRATE \ + --provisioning-model=STANDARD \ + --service-account=822787046197-compute@developer.gserviceaccount.com \ + --scopes=https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring.write,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append \ + --create-disk=auto-delete=yes,boot=yes,device-name=instance-20240407-035747,image=projects/ubuntu-os-cloud/global/images/ubuntu-2204-jammy-v20240319,mode=rw,size=100,type=projects/apecloud-labs/zones/us-central1-c/diskTypes/pd-balanced \ + --no-shielded-secure-boot \ + --shielded-vtpm \ + --shielded-integrity-monitoring \ + --labels=goog-ec-src=vm_add-gcloud \ + --reservation-affinity=any +``` + +要创建支持 `vmx` 标志的 3 台虚拟机,您可以执行 3 次 gcloud 命令。 +这将创建支持 `vmx` 标志的 3 台虚拟机。此外,您还需要配置 VPC 防火墙规则以允许 Kubernetes 内部节点之间的 `ipip` 和 `dns53` 流量,确保正常通信。这可以通过 GCP 控制台或使用 `gcloud` 命令行工具完成。 + +(严格保持原文格式与换行,技术术语按规范翻译) + +## 为 Kubernetes 和 Kata 准备环境 + +我们选择使用 K3s 作为 Kubernetes 基础进行部署。K3s 是一个轻量级、生产就绪的 Kubernetes 发行版,安装简单快捷。详见 https://k3s.io/。 + +***步骤:*** + +1. 安装 k3s。 + +``` +curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 +``` + +2. 创建引导令牌。 + +``` +sudo k3s token create +``` + +3. 加入其他节点。 + +``` + curl -sfL https://get.k3s.io | K3S_URL=https://10.128.0.49:6443 K3S_TOKEN="K1027e02d430f1a7c8a4e9a67b9a8a354875ff92c366a830d66ceada5784518e8c8::2bhamg.xd7vwu3dlkqyipvh" sh - + ``` + +4. 验证集群状态。 + +``` +kubectl get nodes +slc@kata-1:~$ kg nodes +NAME STATUS ROLES AGE VERSION +kata-2 Ready 100m v1.28.8+k3s1 +kata-1 Ready control-plane,master 104m v1.28.8+k3s1 +kata-3 Ready 99m v1.28.8+k3s1 +``` + +5. 安装 Kata 环境。 + +此处我们使用 cloud hypervisor 作为默认的 hypervisor。详情参见 kata-deploy。 + +``` +git clone https://github.com/kata-containers/kata-containers.git +cd kata-containers/tools/packaging/kata-deploy/ +kubectl apply -f kata-rbac/base/kata-rbac.yaml +kubectl apply -k kata-deploy/overlays/k3s +kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml +kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-clh.yaml +``` + +6. 验证 kata 环境。 + +``` +kubectl get pods -A +slc@kata-1:~$ kg pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system local-path-provisioner-6c86858495-z4fm5 1/1 Running 0 106m +kube-system svclb-traefik-5010304c-zldjh 2/2 Running 0 106m +kube-system svclb-traefik-5010304c-wlmtb 2/2 Running 0 103m +kube-system svclb-traefik-5010304c-49jz5 2/2 Running 0 102m +kube-system kata-deploy-6hw8t 1/1 Running 0 98m +kube-system kata-deploy-g4j9s 1/1 Running 0 98m +kube-system kata-deploy-drn8w 1/1 Running 0 98m +kube-system helm-install-traefik-zl849 0/1 Completed 1 106m +kube-system helm-install-traefik-crd-rhfl4 0/1 Completed 0 106m +default php-apache-kata-clh-67f67d6f89-h2dw6 1/1 Running 0 94m +kube-system metrics-server-54fd9b65b-hqpm7 1/1 Running 0 106m +kube-system coredns-6799fbcd5-68wxv 1/1 Running 0 106m +kube-system traefik-f4564c4f4-4crfq 1/1 Running 0 106m +``` + +我们可以看到对应的运行时。 + +``` +root 6902 0.0 0.2 1419432 39560 ? Sl 07:22 0:03 /opt/kata/bin/containerd-shim-kata-v2 -namespace k8s.io -address /run/k3s/containerd/containerd.sock -publish-binary -id 9e7c835d4868398b062d9735e +root 6914 0.0 0.0 2612 1920 ? S 07:22 0:00 \_ /opt/kata/libexec/virtiofsd --syslog --cache=auto --shared-dir=/run/kata-containers/shared/sandboxes/9e7c835d4868398b062d9735eb28115be58e5ab8ce +root 6919 0.0 0.1 2106124 26756 ? Sl 07:22 0:00 | \_ /opt/kata/libexec/virtiofsd --syslog --cache=auto --shared-dir=/run/kata-containers/shared/sandboxes/9e7c835d4868398b062d9735eb28115be58e5a +root 6915 0.3 0.9 2385428 151808 ? Sl 07:22 0:18 \_ /opt/kata/bin/cloud-hypervisor --api-socket /run/vc/vm/9e7c835d4868398b062d9735eb28115be58e5ab8ce7111f0e729f651b7a3c2c3/clh-api.sock +``` + + + + +## 安装 KubeBlocks + +``` +curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash +kbcli kubeblocks list-versions +kbcli kubeblocks install --version="0.8.2" +``` + +错误信息: + +``` +slc@kata-1:~$ kbcli kubeblocks install --version="0.8.2" +The connection to the server localhost:8080 was refused - did you specify the right host or port? +``` + +该问题的原因是 kbcli 工具在枚举 Kubernetes 上下文时未能定位到 kubeconfig 文件。因此,它最终会尝试作为最后手段连接到 localhost:8080。 + +要解决此问题,我们可以创建一个指向 `kubeconfig` 文件的符号链接(软链接),这应该能让 `kbcli` 正确检测并使用正确的 Kubernetes 上下文。 + +``` +ln -sf /etc/rancher/k3s/k3s.yaml /home/slc/.kube/.kubeconfig +``` + +等待 KubeBlocks 安装完成。 + +## 创建数据库集群 + +***步骤:*** + +1. 创建普通集群 + 该集群由 runc 托管。 + +``` +kbcli cluster create --cluster-definition=apecloud-mysql +kbcli cluster connect aloe03 + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> show databases; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| kubeblocks | +| mydb | +| mysql | +| performance_schema | +| sys | ++--------------------+ +6 rows in set (0.00 sec) +``` + +运行时信息: + +``` +root 11801 0.1 0.0 1238384 15136 ? Sl 08:21 0:02 /var/lib/rancher/k3s/data/ef92c15b8f1c59f266ea4a9589be87b1b799d1682673f29721e8a1f7b0a4a97b/bin/containerd-shim-runc-v2 -namespace k8s.io -id 604c9d603425ef5c172e94a7598af3fd1b7e2004dfef4e7517e5e731e5123fc7 -address /run/k3s/containerd/containerd.sock +65535 11822 0.0 0.0 972 512 ? Ss 08:21 0:00 \_ /pause +ubuntu 12032 0.9 1.4 1105756 217004 ? Ssl 08:21 0:25 \_ mysqld +root 12181 0.0 0.0 22716 2320 ? Ss 08:21 0:00 | \_ crond -i -s +root 12590 0.0 0.0 1604 896 ? Ss 08:21 0:00 \_ /bin/sh /scripts/agamotto.sh +root 12602 0.1 0.6 885824 99328 ? Sl 08:21 0:03 | \_ /bin/agamotto --config=/opt/agamotto/agamotto-config.yaml +root 12752 0.0 0.0 2388 1664 ? Ss 08:22 0:00 \_ /bin/bash /scripts/vttablet.sh +root 14978 0.0 0.0 2312 1280 ? S 09:06 0:00 | \_ sleep 60 +65532 12911 0.1 0.3 1287692 52068 ? Ssl 08:22 0:03 \_ lorry --port 3501 --grpcport 50001 +65532 12950 0.0 0.2 1271036 35072 ? Ssl 08:22 0:00 \_ /bin/reloader --log-level info --operator-update-enable --tcp 9901 --config /opt/config-manager/config-manager.yaml +``` + +2. 创建 kata mysql 集群 +如需启用 Kata Containers 支持,您需要修改 Kubernetes 配置中的 Pod 规范(podSpec)。 + + + +``` + podSpec: + runtimeClassName: kata-clh +``` + +下载 kubeblocks-addon 版本 0.8.3 + +``` +wget https://github.com/apecloud/kubeblocks-addons/archive/refs/tags/v0.8.3-beta.5.tar.gz +tar zxf v0.8.3-beta.5.tar.gz +``` + +修改 apecloud-mysql 插件的 ClusterDefinition + +``` +cd kubeblocks-addons-0.8.3-beta.5/addons +vi apecloud-mysql/templates/clusterdefinition.yaml +All podSpec support kata + podSpec: + runtimeClassName: kata-clh + containers: + - name: mysql +``` + +保存并渲染新的 apecloud-mysql 插件。 + +``` +helm template apecloud-mysql --set resourceNamePrefix="kata-apecloud-mysql" > /tmp/a +kubectl apply -f /tmp/a +slc@kata-1:~$ kubectl get cd +NAME MAIN-COMPONENT-NAME STATUS AGE +redis redis Available 105m +clickhouse clickhouse Available 105m +mongodb-sharding mongos Available 105m +mongodb mongodb Available 105m +kafka kafka-server Available 105m +pulsar-zookeeper zookeeper Available 105m +postgresql postgresql Available 105m +apecloud-mysql mysql Available 105m +pulsar pulsar-broker Available 105m +kata-apecloud-mysql mysql Available 13m +``` + +在Kata上创建apecloud-mysql集群。 + +``` +slc@kata-1:~$ kbcli cluster create --cluster-definition=kata-apecloud-mysql +Info: --cluster-version is not specified, ClusterVersion kata-apecloud-mysql-8.0.30 is applied by default +Cluster cornel23 created +``` + +登录到 kata mysql。 + +``` +kbcli cluster connect cornel23 +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +mysql> show databases; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| kubeblocks | +| mydb | +| mysql | +| performance_schema | +| sys | ++--------------------+ +6 rows in set (0.00 sec) +``` + +测试其他命令。 + +``` +slc@kata-1:~$ kubectl exec -it cornel23-mysql-0 -- /bin/sh +Defaulted container "mysql" out of: mysql, metrics, vttablet, lorry, config-manager +sh-4.2# ls +anaconda-post.log apecloud bin data dev docker-entrypoint-initdb.d etc home lib lib64 media mnt opt proc root run sbin scripts srv sys tmp usr var +sh-4.2# +``` + +检查运行时进度。 + +``` +root 19681 0.2 0.2 1419688 40612 ? Sl 09:52 0:01 /opt/kata/bin/containerd-shim-kata-v2 -namespace k8s.io -address /run/k3s/containerd/containerd.sock -publish-binary -id 115051aa1546b66657f14cd9a +root 19702 0.0 0.0 2612 1920 ? S 09:52 0:00 \_ /opt/kata/libexec/virtiofsd --syslog --cache=auto --shared-dir=/run/kata-containers/shared/sandboxes/115051aa1546b66657f14cd9a19f4223f98370377b +root 19708 1.4 2.4 3155940 380292 ? Sl 09:52 0:10 | \_ /opt/kata/libexec/virtiofsd --syslog --cache=auto --shared-dir=/run/kata-containers/shared/sandboxes/115051aa1546b66657f14cd9a19f4223f98370 +root 19703 13.1 3.8 3436120 591872 ? Sl 09:52 1:31 \_ /opt/kata/bin/cloud-hypervisor --api-socket /run/vc/vm/115051aa1546b66657f14cd9a19f4223f98370377b3145354b8703bbcf2939c5/clh-api.sock +``` + + + + +## 结论 + +通过 KubeBlocks,我们可以轻松地在 Kata Containers 上运行数据。然而,为了更好地支持 Kata Containers,KubeBlocks 仍需原生支持 Kubernetes 的 runtimeClass 机制,以便支持更多运行时环境。 + +此外,我们也能感受到,得益于其与 CRI 的良好兼容性,Kata Containers 在保障安全性的同时,也能提供良好的用户体验。 \ No newline at end of file diff --git a/blogs/zh/mangage-6k-db-instance-with-kubeblocks.mdx b/blogs/zh/mangage-6k-db-instance-with-kubeblocks.mdx new file mode 100644 index 00000000..64c43361 --- /dev/null +++ b/blogs/zh/mangage-6k-db-instance-with-kubeblocks.mdx @@ -0,0 +1,184 @@ +--- +authors: + name: Jinhu Xie +date: 2024-12-20 +description: 本博客分享我们如何利用基于KubeBlocks构建的Sealos,成功管理跨四个可用区的6000多个数据库实例。 +image: /img/blogs/thumbnails/blog-manage-6k-db-instance-with-kb.png +slug: manage-6k-db-instance-with-kubeblocks +tags: +- Kubernetes +- KubeBlocks +- database +- large scale +- database instance +title: '管理6000+自托管数据库无需DBA——看单名工程师如何借助KubeBlocks实现这一壮举' +--- +# 无专职DBA管理6000+自托管数据库:单工程师如何借助KubeBlocks实现这一壮举 + +> 关于Sealos +> +> [Sealos](https://sealos.io/) 是一家为中文应用开发者提供基于Kubernetes的PaaS(平台即服务)解决方案的初创企业。Sealos通过提供开箱即用、按量付费的Kubernetes服务,赋能开发者快速构建和部署应用。此外,Sealos还提供包括函数计算、网关、DBaaS(数据库即服务)、MinIO存储以及应用商店等多样化服务。 + +> 作者:谢金虎,Sealos工程师 + +让我们从一个问题开始:在没有专业DBA的情况下,单名工程师能否管理超过6000个自托管的数据库集群? + +在当今云计算和大规模分布式系统的时代,数据库仍然是众多应用的基石。随着规模的增长,数据库管理的复杂性也随之提升。因此,当面对独自管理数千个数据库实例的任务时,大多数人会说:"这不可能!" + +然而,这个看似不可能的任务已经在Sealos内部成为现实。Sealos为开发者用户提供基于Kubernetes的PaaS服务,以及包括**MySQL**、**PostgreSQL**、**Redis**、**MongoDB**、**Kafka**和**Milvus**在内的核心DBaaS产品,管理着**横跨四个区域的6000多个数据库实例**。 + +在本篇博客中,我将分享作为没有DBA背景的K8s工程师,如何通过KubeBlocks在Sealos中成功管理如此庞大数量的数据库,将不可能变为日常运维的现实。 + +## 大规模管理数据库的挑战 + +传统上,管理如此庞大的数据库集群需要一支庞大的运维团队和一系列复杂的工具。即使是最小的错误也可能导致服务中断、性能下降,或在最坏情况下造成数据丢失。资源管理、扩缩容、备份、监控、告警和访问控制等任务变得越来越复杂,尤其是在托管多种数据库类型(如 MySQL、PostgreSQL、Redis、MongoDB 等)的多租户环境中。 + +随着需要管理的数据库实例数量从几十个增长到数千个,复杂性呈指数级上升——不仅在技术挑战方面,还包括运维和组织需求。 + +大规模管理数据库的挑战包括: + +- **日常部署和维护**:诸如次版本升级、配置调整以及扩缩容等任务会变得越来越频繁且耗时。 +- **及时处理异常**:备份失败或复制延迟等问题需要立即关注。 +- **确保高可用性和灾难恢复**:高可用性依赖于副本、故障转移和备份的协调。然而,当实例数量扩展到数千个时,即使是单个组件故障也可能级联影响整个系统。 +- **配置随时间漂移**:手动操作经常导致主机和实例的实际配置与中央存储库中存储的期望配置之间存在差异。 +- **成本考量**:管理大规模数据库通常需要一支庞大的运维团队,使得人力资源成本成为重要瓶颈。 + +传统的运维方法已不足以应对这些挑战。必须借助新技术和基于平台的工具来重新设计和简化数据库管理流程。 + +## Sealos 如何简化数据库管理? + +Sealos 提供了一个强大且灵活的 DBaaS 平台,供用户可视化地管理和使用各类数据库。它支持统一管理 **MySQL**、**PostgreSQL**、**Redis**、**MongoDB**、**Kafka** 和 **Milvus**。该平台通过资源管理、扩缩容、集成监控和告警系统等工具简化数据库操作,确保高可用性、自动化故障转移以及灵活的备份与恢复功能。适用于开发和生产环境。 + +下表总结了截至 2024 年 12 月,Sealos 在各区域为客户提供的数据库实例分布情况。 + +| 可用区 | 实例数量 | +| :------------: | :------: | +| 新加坡可用区 | 1137 | +| 杭州可用区 | 3387 | +| 广州可用区 | 405 | +| 北京可用区 | 1014 | + +Sealos 以 **K8s 和 KubeBlocks** 为基础,实现自动化数据库管理的核心功能。熟悉 Kubernetes 的工程师可以使用 **KubeBlocks Operator** 高效管理数据库。通过 **KubeBlocks** 管理大规模数据库实例,解决了日常维护、异常处理、高可用性、配置一致性和成本优化等挑战。以下是 KubeBlocks 如何有效解决这些问题: + +### 日常部署与维护 + +**挑战:** + +随着实例数量增长,版本升级、配置变更和扩缩容等日常任务变得繁重。 + +**KubeBlocks 解决方案:** + +- **声明式管理**:使用 Kubernetes 原生的 CRD(自定义资源定义),用户可以定义数据库的期望状态(如版本、副本数、配置)。KubeBlocks 确保实际状态始终与期望状态一致。 +- **自动化工作流**:KubeBlocks 为滚动升级、扩缩容和配置更新等常见任务提供内置自动化。这些工作流确保在数千个实例中执行时,停机时间最短且操作一致。 + +### 及时处理异常 + +**挑战:** + +备份失败、复制延迟或意外崩溃等问题需要立即检测和响应,以防止级联故障。 + +**KubeBlocks 解决方案:** + +- **自愈能力**:KubeBlocks 自动化故障转移和恢复流程,减少人工干预。例如,如果副本节点不健康,KubeBlocks 可以自动从健康节点重新创建。 +- **实时监控与告警**:KubeBlocks 与 Prometheus 和 Grafana 等监控工具集成,提供数据库状态、复制健康和备份流程的实时指标与告警。 +- **集中式异常管理**:所有数据库活动日志和告警集中管理,便于运维人员快速分析和解决问题。 + +### 确保高可用性与灾难恢复 + +**挑战:** + +在大规模系统中协调复制、故障转移和备份非常复杂,单个故障可能引发连锁反应。 + +**KubeBlocks 解决方案:** + +- **内置高可用性**:KubeBlocks 通过自动化副本管理、领导者选举和故障转移机制(适用于 MySQL、PostgreSQL 和 MongoDB 等数据库),简化高可用性(HA)环境搭建。 +- **备份与恢复自动化**:KubeBlocks 与外部存储(如对象存储或 NFS)集成,自动化备份并确保故障时快速恢复。定时备份和按时间点恢复选项优化了灾难恢复流程。 + +### 解决配置漂移问题 + +**挑战:** + +人工操作常导致实际配置与期望配置不一致,可能引发意外行为或停机。 + +**KubeBlocks 解决方案:** + +- **统一配置管理**:KubeBlocks 使用存储在中央仓库的声明式配置文件。这些配置在所有实例间保持一致应用,消除了人为错误。 +- **漂移检测与调和**:KubeBlocks 持续监控数据库实例的实际配置,并与期望状态进行比对。若检测到配置漂移,系统会自动调和差异,确保一致性。 +- **版本控制**:配置文件和变更均受版本控制,运维人员可追踪变更记录,必要时回滚至先前配置。 + +### 成本优化 + +**挑战:** + +管理数千个数据库实例通常需要庞大的运维团队,人力资源成为主要成本驱动因素。 + +**KubeBlocks 解决方案:** + +- **运维效率**:通过自动化日常维护、异常处理和扩缩容操作,KubeBlocks 显著减少人工干预需求,使小型团队即可管理数千个实例。 + +## 工作原理:核心架构与设计 + +至此,您可能想知道:**KubeBlocks 如何使 Sealos 能够提供如此广泛的 DBaaS 服务**?让我们探讨其支持多种数据库的**核心设计原则**,重点关注其灵活的**高可用性架构**和**全面的备份恢复机制**。 + +## KubeBlocks 架构 + +![KubeBlocks 架构](/img/blogs/manage-6k-db-instances-1.png) + +KubeBlocks 的架构围绕模块化组件设计,旨在简化在 Sealos 等 PaaS 平台上提供数据库托管服务的复杂性。它提供用户友好的接口(如 `kbcli`、`kubectl` 和 Argo CD),这些接口与强大的核心 API 相连。该平台支持超过 30 种数据库和中间件插件,具有高度的灵活性。通过内置的 **Cluster**、**Component** 和 **InstanceSet** 等控制器,用户只需管理高层 API,而 Operator 会自动处理底层复杂性。这显著降低了技术门槛,使平台即使对非数据库专家也易于使用。 + +### 高可用机制 + +不同的数据库对高可用性的内置支持程度各不相同。为此,KubeBlocks 支持两种常见的高可用模型:**基于仲裁**和**主从复制**,能够灵活适应不同数据库的特性和需求。 + +1. **基于仲裁的高可用模型**: + + 对于支持分布式一致性的数据库(如 Kafka 和 MongoDB),这些数据库通过仲裁机制确保高可用性和一致性。在仲裁模型中,数据库节点通过选举过程实现分布式一致性,通常需要多数节点(即仲裁)达成共识才能进行写入或选举新的主节点。即使部分节点发生故障,只要维持仲裁,集群仍可正常运行。 + + KubeBlocks 通过健康检查持续监控数据库节点状态(如领导者、跟随者或副本),管理角色检测、角色转换和副本重建。如果节点变得不健康,KubeBlocks 会启动角色转换或副本恢复以维持仲裁和一致性。当故障节点恢复或被替换时,KubeBlocks 会从健康节点同步数据并确保节点完全更新后重新加入集群,自动完成副本重建。 + +2. **主从复制高可用模型**: + + **主从高可用模型**常见于传统数据库,如 **MySQL**、**PostgreSQL** 和 **Redis**。这些数据库依赖主节点处理写请求,同时一个或多个副本节点同步数据以提供读取扩展性并确保高可用性。 + + KubeBlocks 通过自动化关键流程(如角色检测、高可用决策、故障转移、副本重建和角色转换)简化和增强了主从设置。它通过健康检查持续监控数据库节点(如主节点和副本)。在节点故障期间,它会通过将最新的副本提升为新主节点来自动执行故障转移,确保数据一致性(使用 MySQL 二进制日志或 PostgreSQL 流复制等机制)。它会更新集群连接端点(如 Kubernetes 服务或 DNS)以实现无缝应用连接。KubeBlocks 还会在故障后自动重建副本。 + +![高可用机制 1](/img/blogs/manage-6k-db-instances-2.png) +![高可用机制 2](/img/blogs/manage-6k-db-instances-3.png) + +KubeBlocks 还支持第三方高可用解决方案。例如: + +1. **MySQL**:KubeBlocks 支持通过 **Orchestrator** 管理 MySQL 高可用性。 +2. **PostgreSQL**:KubeBlocks 支持通过 **Patroni** 管理 PostgreSQL 高可用性,使用 **Noop** 作为故障转移策略。 +3. **Redis**:除了 Redis Cluster,KubeBlocks 还集成了 Redis Sentinel 作为复制集群的高可用解决方案,将 Sentinel 作为独立组件部署在 Redis 复制拓扑中。 + +KubeBlocks 支持可变数量的副本。对于关键业务数据库,增加副本数量可以提升可靠性和可用性,显著降低数据丢失风险。 + +### 备份与恢复机制 + +KubeBlocks 通过将备份文件存储在外部 BackupRepo(如对象存储或 NFS)中,提供强大的备份与恢复能力,确保数据安全可靠。它支持按需备份和定时备份,提供磁盘快照备份以及 MySQL XtraBackup、PostgreSQL pg_basebackup 等数据库专用工具选项。通过支持全量备份和增量备份,KubeBlocks 能够从 BackupRepo 实现时间点恢复(PITR),在灾难发生时将数据恢复到特定时间点。 + +备份流程如下图所示: + +![Backup Process](/img/blogs/manage-6k-db-instances-4.png) + +与 KubeBlocks 集成的 Sealos 平台,通过提供高效的备份与恢复能力,增强了生产环境中的数据保护。它有效支持数据防丢失、灾难恢复和历史数据检索等场景。 + +### 迁移设计 + +开发者经常需要将外部数据库迁移至 Sealos,或在多个 Sealos 区域间转移数据库。KubeBlocks 企业版使用开源数据迁移工具 [Ape-DTS](https://github.com/apecloud/ape-dts) 实现无缝的数据库迁移流程。Ape-DTS 支持跨多种开源数据库的数据同步,非常适合在线实时数据迁移。 + +![Ape-DTS Architecture](/img/blogs/manage-6k-db-instances-5.png) + +迁移工作流: + +1. **源数据库(Source DB)**:提供待迁移的数据。 +2. **提取器(Extractor)**:通过 CDC(变更数据捕获)从源数据库提取全量和增量数据。 +3. **数据管道(Data Pipeline)**: + - **队列(Queue)**:临时存储提取的数据,支持断点续传。 + - **并行处理器(Parallelizer)**:并行处理任务以提高效率。 + - **写入器(Sinker)**:将数据写入目标数据库。 +4. **目标数据库(Target DB)**:作为迁移数据的最终目的地。 + +## 总结 + +KubeBlocks 将数据库管理转变为一种高效、全自动化的流程。它不仅降低了时间和成本,还大幅简化了运维操作。无论您是开发人员、平台工程师还是系统管理员,都能轻松高效地管理大规模数据库集群。 \ No newline at end of file diff --git a/blogs/zh/migrate-redis-in-kuaishou-from-bare-metal-to-k8s.mdx b/blogs/zh/migrate-redis-in-kuaishou-from-bare-metal-to-k8s.mdx new file mode 100644 index 00000000..76779313 --- /dev/null +++ b/blogs/zh/migrate-redis-in-kuaishou-from-bare-metal-to-k8s.mdx @@ -0,0 +1,233 @@ +--- +authors: + name: Xueqiang Wu & Yuxing Liu +date: 2024-09-03 +description: "这是由ApeCloud与快手在KubeCon China 2024上联合呈现的技术演讲。本次演讲主题为快手如何将大规模Redis实例从裸金属迁移至Kubernetes平台,以实现资源利用率的提升" +image: /img/blogs/thumbnails/blog-kubecon-kuaishou.png +slug: migrate-redis-at-kuaishou-from-bare-metal-to-k8s +tags: +- redis +- kuaishou +- migration +- bare metal +- k8s +title: 资源利用率提升——大规模Redis从裸金属迁移至Kubernetes +--- +# 资源利用率提升:大规模Redis从裸金属迁移至Kubernetes实践 + +我是吴雪强,非常荣幸能在KubeCon与大家分享。今天我将与来自快手的刘宇星共同演讲,重点讨论如何通过将大规模Redis实例从裸金属迁移至Kubernetes来显著提升资源利用率。 + +首先来看今天的议程安排。我会先简要介绍KubeBlocks项目,然后从单个Redis集群的角度,重点讲解KubeBlocks为解决Redis等数据库在Kubernetes上运行的三大关键挑战所做的努力。之后,宇星将从大规模部署的角度,分享他在跨多个Kubernetes集群管理众多Redis集群的实践经验。 + + +- Kubernetes -> Kubernetes(技术专名不译) +- Redis -> Redis(数据库名称不译) +- bare metal -> 裸金属(技术术语标准译法) +- resource utilization -> 资源利用率(标准技术术语) +- cluster -> 集群(标准技术术语) +- deployment -> 部署(标准技术术语)) + +## 什么是 KubeBlocks? + +首先,让我概述一下 KubeBlocks。KubeBlocks 由 ApeCloud 创建,这是一家由在数据库开发和运维领域(尤其是阿里云工作期间)拥有丰富经验的团队创立的初创公司。大约两年前,我们决定成立自己的公司,专注于开发简化在 Kubernetes 上运行 Redis 等数据库的产品。去年,我们开源了 KubeBlocks 项目。 + +简而言之,KubeBlocks 是一个 Kubernetes Operator,但它与其他数据库 Operator 不同,因为它旨在支持多种多样的数据库。事实上,KubeBlocks 已经支持超过 35 种不同的数据库。 + +为了实现这一目标,我们将 KubeBlocks 的 API 或自定义资源定义(CRD)分为两大类。第一类是 **"Add-on Provider API"**。这些 API 允许数据库提供商轻松将其数据库集成到 KubeBlocks 生态系统中。第二类是 **"Developer & DevOps API"**,它为用户和运维人员提供了管理数据库的能力——无论是创建新集群、扩缩容、执行备份和恢复等操作。此外,这些声明式 API 使 KubeBlocks 能够轻松与 Kubernetes 生态系统中的其他工具和系统(如 kubectl、Argo CD、Terraform 等)集成。 + +![What is KubeBlocks](/img/blogs/redis-kuaishou-1.png) + +## KubeBlocks 解决的问题:单 Redis 集群视角 + +在了解 KubeBlocks 的基础知识后,让我们深入探讨它如何解决在 Kubernetes 上运行单个 Redis 集群时的具体挑战。 + +### 如何处理数据复制? + +在 Kubernetes 上运行数据库面临诸多挑战,其中最关键之一就是数据复制。正确管理复制对于维护高可用性和数据完整性至关重要,尤其是在大规模部署中。 + +考虑一个典型场景,如下图所示,Redis 以单副本形式运行在 StatefulSet 中。这种配置会带来几个问题: + +![数据复制](/img/blogs/redis-kuaishou-2.png) + +1. 存在单点故障 - 系统极易发生中断 +2. 单个 Redis 实例的吞吐量存在固有上限 +3. 数据丢失风险极高 + +为解决这些问题,我们可以水平扩展 StatefulSet,将副本数从 1 增加到 3。但这又带来了管理副本间复制关系的新问题。 + +第一个问题是:在 Redis 主从架构中,只有主实例能处理写操作,如何确保服务能找到主实例?第二个问题是:扩容时新增的实例如何发现主实例并正确建立复制关系?第三个问题是:当需要在 Redis 集群中执行次版本升级时,如何设计更新策略以最小化停机时间? + +StatefulSet 虽然有用,但并不能完全解决这些挑战。这正是 KubeBlocks 发挥作用的地方,它提供了一个专为数据库设计的 API,称为 **InstanceSet**。 + +与 StatefulSet 类似,**InstanceSet 为每个 Pod 分配稳定且唯一的网络标识符**。但关键区别在于:**InstanceSet 中的 Pod 不是独立的 - 它们会被分配角色,代表其在复制关系中的作用**。 + +其工作原理是:每当 InstanceSet 创建 Pod 时,会注入一个名为 kbagent 的边车容器,kbagent 会定期探测主数据库容器以检测其当前角色,并更新 Pod 上的标签来反映这个角色。通过这些角色标签,我们可以实现许多有用功能。 + +首先,我们可以更新 Service 的选择器以匹配"Master"角色标签,这样网络流量就会自动流向主 Redis 实例。 + +其次,当扩展 Redis 集群时,新的从实例可以通过角色标签找到主实例并正确建立复制关系。 + +第三,**角色标签让我们能更好地定义更新策略**。例如遵循数据库升级最佳实践,可以先更新从实例,再更新主实例。并且我们可以在升级主实例前执行切换操作。 + +通过这种基于角色的设计,KubeBlocks 不仅简化了复制关系管理,还显著提高了 Redis 集群的整体可用性。 + +![角色](/img/blogs/redis-kuaishou-3.png) + +### 如何实现高可用性? + +谈到可用性,让我们讨论第二个关键主题 - 如何实现高可用性。数据库高可用性涉及多个维度,但这里我们重点关注控制平面。 + +除了基于角色的更新策略外,InstanceSet 还引入了另外两个旨在提高可用性的特性 + +第一种称为"[原地实例更新](https://kubeblocks.io/docs/preview/api_docs/maintenance/in-place-update/overview)"。Kubernetes中Pod的原地更新和持久卷声明(PVCs)的存储卷扩容并非新概念,而KubeBlocks充分利用了这些能力。但KubeBlocks更进一步解决了数据库管理中的独特挑战:配置变更。数据库经常需要更新配置参数,传统上这些变更需要完全重启数据库,这会严重影响Redis集群的可用性。为此,KubeBlocks引入了原地配置更新功能。 + +![原地更新](/img/blogs/redis-kuaishou-4.png) + +我们将数据库参数分为三类:**不可变参数**、**静态参数**和**动态参数**。 + +- 不可变参数一旦设置就无法更改 +- 静态参数可以更改但需要重启数据库 +- 动态参数可以热加载 + +基于此分类,InstanceSet可以在无需完全重启数据库的情况下执行配置更新,显著降低恢复时间目标(RTO)并提升服务运行时间。 + +第二个特性称为"**实例重建**"。许多数据库仍运行在本地持久卷(LocalPVs)上,这带来一个挑战:如果节点故障,运行其上的Pod无法自动重新调度。实例重建功能通过在其他节点上自动重建PV和Pod来解决这个问题,使Redis集群能够恢复并回到正常状态。 + +![实例重建](/img/blogs/redis-kuaishou-5.png) + +### 如何处理超大规模集群? + +讨论完高可用性相关主题后,我们来看最后部分——Operator的P10K问题。 + +在快手的使用场景中,我们遇到了极具挑战性的情况:单个Redis集群可能包含近10,000个Pod。这意味着单个InstanceSet自定义资源(CR)会产生超过10,000个次级对象,这对Controller的协调过程造成巨大影响。 + +为解决这个问题,我们对InstanceSet Controller进行了参数调优和设计变更。 + +首先在调优方面,**我们增加了InstanceSet Controller pod分配的CPU和内存资源,使其能缓存更多对象**。同时提高了Controller与API Server之间的速率限制阈值,即ClientQPS和ClientBurst。我们压缩了InstanceSet CR中一些较大的字段以保持在Kubernetes对象大小限制内。最后,我们将协调goroutine从单个改为多个。 + +在设计方面,首先**我们将InstanceSet协调分为两个阶段——Prepare和Commit**。在Prepare阶段,通过比较InstanceSet CR中的期望状态与次级对象的当前状态,计算需要创建、更新或删除的次级对象。然后在Commit阶段,一次性将所有变更提交到API Server。这有助于将后续协调合并为一次,减少总协调次数。 + +其次,在Prepare阶段**我们采用了函数式编程思想**。将每个协调步骤设计为确定性函数,在Commit阶段确保所有API Server请求的幂等性。这帮助我们解决了由缓存陈旧引起的一些问题。 + +第三,**我们将Prepare阶段中一些较重操作异步化**,进一步提升协调效率。 + +通过这些优化,我们能够很好地处理P10K场景。 + +![Operator P10K](/img/blogs/redis-kuaishou-6.png) + +总结而言,KubeBlocks为在Kubernetes上管理Redis集群提供了强大的功能。**基于InstanceSet的角色化能力,使我们能够高效管理Redis服务器的主从复制关系**。原地实例更新和实例重建特性显著提升了Redis服务器的可用性。最后,针对P10K问题的优化证明了KubeBlocks具备处理单集群海量Pod极端场景的能力。 + +![Summary](/img/blogs/redis-kuaishou-7.png) + +现在,我想邀请Yuxing回到台上,分享他在多个Kubernetes集群中运行多个Redis集群的一些经验。 + +## 快手如何应对多Redis集群与多Kubernetes集群的挑战? + +我将带您了解快手如何利用KubeBlocks高效管理跨多个Kubernetes集群的Redis实例。 + +### Redis在快手的应用 + +在深入细节之前,先了解一些背景。在快手,Redis采用经典的主从架构部署,包含三个核心组件:Server(服务端)、Sentinel(哨兵)和Proxy(代理)。我们的Redis部署与众不同之处在于其庞大的规模——不仅是实例总数,单个集群的规模也很大,有些甚至超过10,000个节点。 + +既然已经实现了如此大规模下的稳定性,您可能会好奇为何还要进行云原生转型。答案在于资源利用率。尽管规模庞大,但我们注意到Redis并未充分利用其资源。在这种规模下,即使微小的优化也能带来显著收益。 + +那么如何提升资源利用率?您可能已有一些想法。我们发现云原生技术为此提供了最佳实践。此外,云容器已成为业务运营与基础设施间的新接口。虽然快手的无状态服务已迁移至云容器,但我们认识到将基础设施统一到云原生方式是大势所趋。这一转变不仅能解耦业务逻辑与基础设施,还能提升敏捷性并降低运维成本。 + +简而言之,我们启动云原生转型是为了优化成本。为此,我们同时调动了Redis团队和云原生团队的力量。 + +![Redis in Kuaishou](/img/blogs/redis-kuaishou-8.png) + +### 为什么选择KubeBlocks? + +今天我们讨论这个主题,意味着我们选择了KubeBlocks进行云原生转型。但为什么是KubeBlocks?让我们探究背后的决策原因。 + +用一个词概括:API。**KubeBlocks提供的API专为有状态服务设计**。 + +什么是有状态服务?它与无状态服务有何区别? + +乍看之下,区别似乎仅在于服务是否维护状态信息。但我们认为**真正的差异在于实例间的不对等关系**。有状态服务中不同实例扮演不同角色、存储不同数据,这使得它们不可随意替换。您不能随意丢弃任何实例。这种不对等关系是动态的,可能在运行时发生变化(如切换期间)。KubeBlocks正是为管理这种复杂性而生,提供**基于角色的管理能力**。 + +此外,KubeBlocks支持多种数据库,**无需为每个数据库单独创建Operator**。它还通过OpsRequest提供**面向流程的API**,简化数据库迁移。如需了解具体实现,可深入查阅[官方文档中的KubeBlocks API](../user_docs/references/api-reference/cluster)。 + +![Why KubeBlocks](/img/blogs/redis-kuaishou-9.png) + +### Redis集群编排定义 + +我们已经讨论了很多关于KubeBlocks API的内容,现在看看KubeBlocks如何定义Redis集群。KubeBlocks中的Redis集群包含所有核心组件:Server、Sentinel和Proxy。 + +为避免组件定义的冗余,KubeBlocks将组件定义与组件版本分离,使得创建新集群时可直接引用。我们以最复杂的Redis Server组件为例。 + +- 首先,Redis Server 通过 ShardSpec 定义,其中包含分片列表。这种方式通过将 Redis Server 集群拆分为多个分片(每个分片包含主从实例)来支持更大规模的数据。 +- 另一个关键点是,同一个分片内的主从实例可能需要不同配置。为此,KubeBlocks 允许通过[实例模板](https://kubeblocks.io/docs/preview/api_docs/instance-template/introduction)在同一个组件内定义多套配置。 + +以下是 Redis Server 组件内部对象的关系结构: + +- **Cluster**:将整个 Redis 集群定义为一个整体 +- **ShardSpec**:指定 Redis Server 的分片列表 +- **Component**:表示独立组件,如 Redis Proxy、Sentinel 和单个服务器分片 +- **InstanceTemplate**:指定同一组件内的不同配置 +- **InstanceSet**:最终自动生成的工作负载,提供前文提到的基于角色的管理能力 + +![集群编排](/img/blogs/redis-kuaishou-10.png) + +### 角色管理 + +在基于角色的管理中,有两个关键方面尤为突出: + +- **建立并维护正确的关系** +- **实现细粒度的基于角色的管理**(薛强已为我们介绍过这部分) + +让我们重点探讨如何维护正确的角色关系。需要考虑的一个关键点是分片内主节点信息的重要性。如果该信息不正确或不可访问,可能导致严重问题。为确保业务稳定性,我们选择将数据平面与控制平面分离。这也是我们不依赖 KubeBlocks 获取主节点信息的原因。 + +![角色管理](/img/blogs/redis-kuaishou-11.png) + +### 部署架构 + +KubeBlocks 已证明在单个 Kubernetes 集群内运行有状态服务是有效的。然而,快手平台的 Redis 实例数量远超单个 Kubernetes 集群的承载能力,因此需要使用多个 Kubernetes 集群来支持业务需求。 + +管理多集群会带来新的挑战。如果直接将这种复杂性暴露给 Redis 团队,会导致以下问题: + +- Redis 团队需要维护所有 Kubernetes 集群的缓冲资源池,造成资源浪费 +- Redis 团队还需在单个 Kubernetes 集群达到容量上限前提前迁移 Redis 集群 + +为简化操作,我们决定隐藏多集群管理的复杂性。幸运的是,我们已通过联邦集群具备所需能力,尽管 KubeBlocks 本身并不原生支持多集群环境。 + +如何解决这个问题?以下是整体架构: + +![整体架构](/img/blogs/redis-kuaishou-12.png) + +我们将 KubeBlocks Operator 拆分为两部分:**Cluster Operator 和 Component Operator 部署在联邦集群中,而 InstanceSet Controller 位于成员集群内**。在这两者之间,我们引入了 **Federated InstanceSet Controller** 组件,负责将 InstanceSet 对象从联邦集群分发到成员集群。那么 Federated InstanceSet Controller 如何运作? + +- 其主要职责是**获取调度建议,决定每个集群应部署多少实例** +- 次要职责是**拆分 InstanceSet 并将其分发到成员集群** + +与 StatefulSet 类似,InstanceSet 的每个实例都有编号名称。为保持一致性,我们重新设计了 InstanceSet 中的 'ordinals' 字段,允许自定义编号范围。 + +通过这种架构,我们实现了 KubeBlocks 跨多个 Kubernetes 集群运行,而无需进行重大改动。 + +![多集群分布](/img/blogs/redis-kuaishou-13.png) + +### 稳定性保障 + +除了满足业务需求外,确保我们的解决方案具备生产环境就绪性至关重要,特别是在稳定性方面。让我们探讨几个关键方面。 + +首先,考虑调度能力。为了确保Redis的高可用性,实例必须尽可能均匀地分布在集群中。然而,我们还需要考虑单台机器故障对Redis集群规模的影响。为了解决这个问题,我们定制了**细粒度分散调度能力**。这允许我们配置每个节点的最大实例数和每个Redis集群的最大节点数。 + +此外,我们还提供了基于CPU、内存和网络带宽的负载均衡调度能力。 + +接下来,讨论运行时控制。虽然Kubernetes带来的自动化是有益的,但它也引入了风险——小的变更可能导致大规模故障。为了降低这些风险,我们对运行中的实例实施了多项控制措施,包括并发控制和强制仅允许原地更新。我们还采取了许多其他措施,但由于时间限制,这里就不一一展开了。 + +![稳定性保障](/img/blogs/redis-kuaishou-14.png) + +## 总结 + +综上所述,正如许多读者可能已经意识到的,KubeBlocks 是一个卓越的项目。与 StatefulSet 相比,**KubeBlocks 引入了一个专为有状态服务设计的新 API,并提供了基于角色的管理功能,这使得云原生转型变得异常简单。** + +虽然 KubeBlocks 的 API 看似支持几乎所有有状态服务,但仍有许多工作有待完成: + +- 如何与现有的数据库 Operator 建立连接并与其能力对齐。 +- 推动有状态服务标准 API 的建设,这是快手非常希望与 KubeBlocks 合作的领域。我们也期待更多开发者加入我们,共同探索这一方向。 + +截至目前,快手已与 KubeBlocks 在多个功能上展开合作,例如**通过 InstanceSet 直接管理 Pod 和 PVC**、**实例模板(原异构 Pod)**以及**联邦集群集成**。 + +![Cooperation](/img/blogs/redis-kuaishou-15.png) \ No newline at end of file diff --git a/blogs/zh/moodle-in-kubeblocks-windows.mdx b/blogs/zh/moodle-in-kubeblocks-windows.mdx new file mode 100644 index 00000000..38e6220f --- /dev/null +++ b/blogs/zh/moodle-in-kubeblocks-windows.mdx @@ -0,0 +1,258 @@ +--- +authors: + name: DWJ-Squirtle + url: https://github.com/DWJ-Squirtle +date: 2024-07-03 +description: 本博客介绍如何在Windows上使用KubeBlocks MySQL Operator部署Moodle。 +image: /img/blogs/thumbnails/blog-moodle.png +slug: moodle-in-kubeblocks +tags: +- Kubernetes operator +- database +- MySQL +- KubeBlocks +- Moodle +- Windows +title: 使用KubeBlocks MySQL Operator在Kubernetes上部署Moddle +--- +# 使用 KubeBlocks MySQL Operator 在 K8s 上部署 Moodle(Windows 版) + +Moodle 是一个免费的在线学习管理系统,允许教育工作者创建自己的私有网站,填充动态课程内容,从而让人们随时随地学习。 + +## 简介 + +本文档将指导您快速入门,在 Windows 上安装并使用 Moodle,并利用 KubeBlocks 提供的 MySQL 数据库。 + +## 必要准备 + +- [Docker](https://docs.docker.com/get-docker/):v20.10.5(runC ≥ v1.0.0-rc93)或更高版本; +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl):用于与 Kubernetes 集群交互; +- [kbcli](https://cn.kubeblocks.io/docs/preview/user-docs/installation/install-with-kbcli/install-kbcli):用于 Playground 与 KubeBlocks 之间的交互。 + +## 准备数据库 + +### 操作步骤 + +### 1. 确保已启用 ApeCloud MySQL 插件 + +```shell +kbcli addon list +> +NAME TYPE STATUS EXTRAS AUTO-INSTALL INSTALLABLE-SELECTOR +... +apecloud-mysql Helm Enabled true +... +``` + +### 2. 创建 MySQL 集群 + +这是一个单机模式演示。如需部署 RaftGroup 集群,请参考:[创建并连接 MySQL 集群 | KubeBlocks](https://kubeblocks.io/docs/release-0.8/user_docs/kubeblocks-for-mysql/cluster-management/create-and-connect-a-mysql-cluster)。 + +```shell +kbcli cluster create mysql +``` + +### 3. 获取集群基本信息 + +执行以下命令获取目标数据库的网络信息,特别注意后续会用到的密码。 + +```shell +kbcli cluster connect --show-example --show-password ${cluster-name} +``` + +![Figure 1](/img/blogs/obtain-cluster-basic-information.png) + +### 4. 服务转发 + +执行 `port-forward` 将 MySQL 服务暴露给主机。 + +:::note + +注意:由于后续需要在本地机器上启动 MySQL 服务,需修改本地端口号以避免冲突。此处我们已将其改为 3307。 + +::: + +```shell +kubectl port-forward service/mycluster-mysql 3307:3306 +> +Forwarding from 127.0.0.1:3307 -> 3306 +Forwarding from [::1]:3307 -> 3306 +``` + +关于连接数据库的详细指南,请参阅:[从任意位置连接数据库 | KubeBlocks](https://kubeblocks.io/docs/release-0.8/user_docs/connect_database/overview-of-database-connection)。 + + + +## 安装 Moodle + +对于 Windows 系统,推荐使用 XAMPP 来安装 Moodle。 + +### 操作步骤 + +### 1. 下载并安装 XAMPP + +前往官网下载并安装 XAMPP。官网地址如下:[XAMPP Installers and Downloads for Apache Friends](https://www.apachefriends.org/zh_cn/index.html) + +### 2. 打开 XAMPP 并启动 Apache 和 MySQL 服务 + +![Figure 2](/img/blogs/xmapp-start.png) + +### 3. 下载 Moodle + +前往 Moodle 官网下载对应文件。官网地址如下:[Latest release | Moodle downloads](https://download.moodle.org/releases/latest/) + +![Figure 3](/img/blogs/moodle-release.png) + +下载完成后,将压缩包解压到 XAMPP 安装目录下的 `htdocs` 文件夹中的下载目录。如果配置全部为默认,则位于 `C:/xampp/htdocs` + +### 4. 安装 Moodle + +打开浏览器,输入地址 localhost/moodle,然后按照以下步骤安装 Moodle。 + +#### 1. 选择语言 + +![Figure 4](/img/blogs/moodle-choose-language.png) + +#### 2. 确认路径 + +默认配置即可,直接点击 Next。 + +![Figure 5](/img/blogs/moodle-confirm-paths.png) + +#### 3. 选择数据库驱动 + +此处选择 `mysqli`。 + +![FIgure 6](/img/blogs/moodle-choose-database-diver.png) + +#### 4. 数据库设置 + +使用之前的 KubeBlocks 设置。 + +![Figure 7](/img/blogs/moodle-database-setting.png) + +#### 5. 服务器检查 + +**在下载 PHP 扩展时,此步骤可能会出现一些服务器组件检查错误。** + +修复步骤如下: + +1. 使用文本编辑器打开 `php.ini` 文件,该文件位于 `xampp/` 目录下,搜索并取消以下扩展行的注释: + + `extension=zip` + + `extension=gd` + + `extension=intl` + + `extension=sodium` + + `max_input_vars` + +2. 取消 `max_input_vars` 的注释后,将其修改为 `max_input_vars=5000` + +3. 将 `xampp/php/libsodium.dll` 库复制到 `xampp/apache/bin/` 目录下。 + +![Figure 8](/img/blogs/moodle-check-php.png) + +#### 6. 等待下载 + +之后会有一些下载内容,等待即可。 + +![Figure 9](/img/blogs/moodle-installation.png) + +#### 7. Moodle 的基本设置 + +设置名称和邮箱,安装即将完成。 + +![Figure 10](/img/blogs/moodle-setting.png) + +#### 8. 任务完成 + +此步骤中,整个安装已完成,现在可以自由使用 Moodle 了!! + +![Figure 11](/img/blogs/moodle-hello.png) + +更详细的安装步骤,请访问 [Install Moodle - MoodleDocs](https://docs.moodle.org/404/en/Installing_Moodle)。 + +## 准备 Redis 数据库 + +### 操作步骤 + +### 1. 确保 Redis 插件已启用 + +```shell +kbcli addon list +NAME TYPE STATUS EXTRAS AUTO-INSTALL INSTALLABLE-SELECTOR +... +redis Helm Enabled true +... +``` + +### 2. 创建 Redis 集群 + +本示例仅演示 Redis 的单机模式版本。更多信息请参考文档 [创建并连接 Redis 集群 | KubeBlocks](https://kubeblocks.io/docs/release-0.8/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster)。 + +```shell +kbcli cluster create redis --mode standalone +``` + +### 3. 获取基础连接信息 + +执行以下命令获取目标数据库的基础信息。 + +```shell +kbcli cluster connect --show-example --show-password --client=cli +``` + +![Figure 12](/img/blogs/moodle-redis-connection.png) + +### 4. 服务转发 + +执行 `port-forward` 将 Redis 服务暴露给主机。请注意,我的 Redis 实例命名为 'myredis',你需要将其替换为你自己选择的名称。 + +```shell +kubectl port-forward service/myredis-redis-redis 6379:6379 +> +Forwarding from 127.0.0.1:3306 -> 3306 +Forwarding from [::1]:3306 -> 3306 +``` + + + + +## 安装 Redis PHP 驱动 + +### 步骤 + +### 1. 检查驱动适配器版本 + +检查您的 PHP 版本、CPU(64位或x86)以及线程安全值(参考 站点管理 > 服务器 > PHP 信息)以获取正确版本。 + +![Figure 13](/img/blogs/moodle-site-administration.png) + +### 2. 下载兼容的 PHP 扩展文件并启用 + +- 从 [PECL :: Package :: redis (php.net)](https://pecl.php.net/package/redis) 下载对应版本的 PHP-redis 扩展文件 +- 将 DLL 文件 `php_redis.dll` 添加到 `xampp/php/ext` 目录 +- 在 php.ini 中添加 `extension=php_redis.dll` 并重启 web 服务器 + +## 连接到 Redis 服务器 + +### 操作步骤 + +### 1. 服务检查 + +访问 站点管理 > 插件 > 缓存 > 配置。如果服务配置成功,您将在"已安装的缓存存储"下的"Redis"旁看到一个绿色勾选标记,以及一个添加实例的链接。 + +![Figure 15](/img/blogs/moodle-redis-service-check.png) + +### 2. 添加实例 + +点击`添加实例`后,填写基本设置。在本示例中,名称为'redis',服务器地址为127.0.0.1:6379。 + +![Figure 16](/img/blogs/moodle-redis-add-instance.png) + +点击`保存更改`后,返回KubeBlocks时您将看到`正在处理连接`的提示。此时,您可以更流畅地使用Moodle了! + +![Figure 17](/img/blogs/moodle-handling-check.png) \ No newline at end of file diff --git a/blogs/zh/redis-containerization.mdx b/blogs/zh/redis-containerization.mdx new file mode 100644 index 00000000..e9f32113 --- /dev/null +++ b/blogs/zh/redis-containerization.mdx @@ -0,0 +1,91 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1814084?v=4 + name: Thomas + url: https://github.com/realzyy +date: 2024-05-28 +description: 将Redis这一与容器技术同期诞生的数据库容器化,现在时机成熟了吗? +image: /img/blogs/thumbnails/blog-redis-containerization.png +slug: redis-containerization +tags: +- redis +- containerization +title: Redis容器化——准备好了吗? +--- +# Redis 容器化——准备好了吗? + +在 Kubernetes 主导的时代,数据库容器化对云原生团队来说是一个极具吸引力却又常常令人望而生畏的挑战。 + +像 MySQL 和 PostgreSQL 这样的开源数据库诞生于 PC 服务器时代,通常用于存储关键业务数据。将它们迁移到 Kubernetes 可能需要更多的努力和勇气。然而,对于 Redis 这样一个与容器技术同期诞生、主要用作缓存的数据库来说,容器化是否变得更容易?**许多云原生团队确实这么认为,但实践表明 Redis 并不像看起来那么容易驾驭。** + +## 太简单了,对吧?但是... + +使用 Redis 容器化确实轻而易举。借助官方的 Redis Docker 镜像,只需几秒钟就能拉起 Redis 服务。将应用与 Redis 部署在同一个 Kubernetes 集群中可以极大简化入门流程,但存在两个"小"问题: + +- Redis 服务不具备高可用性 + 一旦 Redis 容器被重新调度,其 IP 地址就会变更,导致应用端的连接池配置失效。为适应容器环境的易变性,Redis 容器的 IP 不能直接暴露给应用,需要增加 VIP(虚拟 IP)或 DNS 域名来提供固定连接地址。 + +- Redis 服务不具备高可靠性 + 若运行 Redis 容器的主机发生宕机,Redis 容器的持久卷可能损坏,导致应用数据丢失。虽然很多开发者将 Redis 用作易失性内存数据库,但也有不少人将其用于持久化键值存储。因此 Redis 容器化必然涉及分布式块存储或本地磁盘同步的解决方案,以确保数据持久性。 + +## 如果不出意外的话,就要出意外了 + +有追求的云原生团队不会满足于玩具级的 Redis 服务。很自然地,他们会研究如何编排多个 Redis 容器的高级方案。在这些高级方案中,Redis 服务由分布在多个宿主机上的多个 Redis 容器(副本)组成,能够应对一个或多个容器故障,从而提供持续可靠的服务。 + +Redis 内核本身并不支持分布式能力,因此需要外部组件来处理 Redis 容器的角色分配和复制配置。在这方面久经考验的 Sentinel 组件,社区也提供了相应的容器编排方案。使用 Bitnami 提供的 Redis Helm Chart,你可以轻松部署一个带有 Sentinel 组件的 Redis 主从集群。通过正确配置副本数、规格参数,并对内核参数稍作调整,Redis 服务的质量就能得到显著提升。如果应用负载相对稳定,这个方案效果不错。但一旦涉及故障或扩缩场景,就会暴露出几个不那么容易解决的问题: + +- Redis 服务能力永久性降级 + + 云原生团队往往没有现成的分布式块存储可用,而本地磁盘资源则相对常见。当 Redis 容器被分配到本地磁盘宿主机时,这些容器实际上就被"固定"在了这些宿主机上。如果发生硬件故障,宿主机若能快速恢复,其上的 Redis 容器也能快速恢复。否则,Redis 容器将一直处于 pending 状态,无法被重新调度到其他健康的宿主机上。虽然 Redis 服务依然可靠可用,但其容量将永久性降级,而 pending 的容器状态对强迫症工程师来说简直是噩梦。 + +- Redis 服务能力的天花板低 + + 在 Redis 使用本地磁盘的场景下,Redis 服务能力的天花板较低。被"固定"在宿主机后,Redis 容器的内存使用上限就受限于该宿主机。宿主机上运行的容器越多,Redis 容器能使用的内存就越少。同样的问题也存在于 Redis 容器可用的存储容量上。由于被"固定"在宿主机,存储容量的上限就是宿主机本地磁盘被其他容器瓜分后的剩余空间。CPU 资源的问题倒不明显,因为 Redis 不需要多核处理,对 CPU 的用量相对不敏感。 + +- Redis 服务的扩缩容问题 + + 业务高峰期时,Redis 服务的扩缩容在所难免。根据面临的容量挑战,扩缩容方式分为垂直扩缩容和水平扩缩容。如果业务整体数据量不变,只是需要缓存的热点数据量增加,那么垂直扩容 Redis 容器的内存即可。但一旦业务整体数据量增加,需要垂直扩容存储容量时,云原生团队就无法通过 Helm 修改 StatefulSet 配置来实现重配,需要人工介入劫持底层 PVC。劫持的代价会在后续需要水平扩容时显现。新增的 Redis 容器会沿用旧的配置,使得原本同质化、自动化的 Redis 服务变成异质化、人工缝合的拼凑品。 + +## 潜藏的挑战 + +高可用性和可靠性问题可以从架构设计和拓扑图中发现,通常云原生团队会在 Day 1 解决。但特定场景下的服务能力挑战更为隐蔽,取决于真实的业务场景和云原生团队的经验,往往要到 Day 2 才会注意到。**一个谨慎的云原生团队应该避免使用原生 Kubernetes 工作负载在生产环境中运行容器化数据库——这就像在海上航行纸船一样危险**。 + +Kubernetes 提供了可以更好地聚合存储、计算和网络资源的自定义资源,通过 API 提供"声明式"的数据库服务。目前,几个知名的 Redis Operator 提供了高级解决方案,帮助云原生团队解决常见的 Day 2 问题,包括: + +- RedisLabs 的 Redis Enterprise Operator +- AppsCode 的 KubeDB +- ApeCloud 的 KubeBlocks +- Spotahome 的 Redis Operator +- OpsTree 的 Redis Operator + +RedisLabs、AppsCode 和 ApeCloud 提供的 Operator 是企业级的,提供更全面的能力。而 Spotahome 和 OpsTree 提供的 Redis Operator 是完全开源的,功能较少但更简单易懂。根据发布说明和变更日志,Spotahome 的最后一次发布是在 2022 年 1 月 19 日,OpsTree 的是在 2022 年 11 月 10 日,这表明对问题的响应时间可能较慢,需要特别注意。 + +无论选择哪个 Redis Operator,云原生团队都应该预见到真实业务场景中的网络环境高度复杂,这可能会挑战 Redis 服务支持的网络解决方案。当跨 Kubernetes 部署的新应用需要读写现有 Redis 集群时,这种挑战经常出现。如果没有精心规划的计划,这可能会阻碍业务部署效率。考虑到业务端各种 SDK 的使用方式,Redis 服务需要支持以下部署模型以满足长期需求: + +- 单节点(客户端仅访问主节点) + - Redis Server 通过 NodePort 暴露主节点地址 + - Redis Server 通过 LoadBalancer 暴露主节点地址 +- 双节点(客户端仅访问主节点) + - Redis Server 通过 NodePort 暴露主节点地址 + - Redis Server 通过 LoadBalancer 暴露主节点地址 +- 双节点或多节点(客户端访问 Sentinel 实现读写分离) + - Redis Server 和 Sentinel 组件通过 HostNetwork 暴露 Redis 和 Sentinel 副本地址 + - Redis Server 和 Sentinel 组件通过 NodePort 暴露 Redis 和 Sentinel 副本地址 + - Redis Server 和 Sentinel 组件通过 LoadBalancer 暴露 Redis 和 Sentinel 副本地址 +- 分片 + - Redis Server 通过 HostNetwork 暴露副本地址 +- 分片 + 代理 + - Proxy Server 通过 NodePort 暴露连接地址 + - Proxy Server 通过 LoadBalancer 暴露连接地址 + +等等,为什么 Redis 分片与其他形式不同,只能使用 HostNetwork?这涉及到 Redis 与云厂商之间的各种博弈。简而言之,Redis 希望将分片作为付费功能,但代码是在 BSD 许可下的。为了防止云厂商利用这一点,Redis 故意没有实现 announce-ip 功能,使得原生 Redis 分片无法在云网络环境中运行。然而,云厂商并没有放弃,而是"帮助"Redis 填补了 announce-ip 功能空白,使他们能够以最小的成本继续大量销售。不幸的是,Redis 和云厂商之间的拉锯战意味着容器环境中的 Redis 分片只能使用 HostNetwork 暴露地址,这给云原生团队带来了额外的障碍。**这些商业利益是 Redis 容器化过程中持续关注的问题**。 + +## 我还是想试试 + +**觉得容器化 Redis 没那么容易,公有云全托管服务的溢价似乎很合理?** + +这种感觉没错,但先别放弃。公有云厂商最重要的数据库技术方向之一就是容器化,而容器化挑战的起点就是保障弹性以及支持多种网络方案。在块存储、对象存储、VPC 网络和 4 层负载均衡的支持下,公有云厂商更容易实现数据库容器化,技术方案也往往更精巧(如固定容器 IP、不重启升级 Kubernetes 等)。而大多数云原生团队在没有 SDS(软件定义存储)和 SDN(软件定义网络)支持的情况下,实现数据库容器化面临的挑战更大。 + +幸运的是,大多数云原生团队需要支撑的业务场景没有公有云厂商那么复杂。如果选对方向、收窄需求并逐步积累生产经验,数据库容器化的挑战并不会扑面而来。业界有不少实践分享了容器化 Redis 的经验,有的显著降低了成本,有的让业务团队实现了自助服务。 + +**冲着提升资源利用率和研发效率的好处,容器化 Redis 值得一试,哪怕有点难。** \ No newline at end of file diff --git a/blogs/zh/redis-on-kb.mdx b/blogs/zh/redis-on-kb.mdx new file mode 100644 index 00000000..75c40df8 --- /dev/null +++ b/blogs/zh/redis-on-kb.mdx @@ -0,0 +1,602 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/111858489?v=4 + name: dullboy + url: https://github.com/nayutah +date: 2024-05-28 +description: 我们如何通过KubeBlocks解决在Kubernetes上管理Redis的问题 +image: /img/blogs/thumbnails/blog-redis-on-kb.png +slug: manage-redis-on-k8s +tags: +- containerization +- database performance +- network compatibility +- redis +title: 基于KubeBlocks优化Kubernetes上的Redis集群部署及解决网络兼容性问题 +--- +# 基于KubeBlocks简化Redis集群在Kubernetes的部署及解决网络兼容性问题 + +Redis Cluster是Redis数据库的分布式解决方案,用于将数据分布在多个节点上以提供高可用性和可扩展性。它允许将大量数据分片存储在多个节点上,并自动处理数据分片和迁移。 +Redis Cluster使用哈希槽来管理数据分布。数据被划分为固定数量的哈希槽,每个槽可以分配给不同的节点。每个节点负责处理分配给它的哈希槽中的部分数据。客户端可以直接连接到任意节点,无需中间代理。 +在应用部署中,整体架构通常由后端的Redis Cluster和应用端的智能客户端组成。 +Redis Cluster提供以下特性: + +1. 自动分片和数据迁移:当节点加入或离开集群时,Redis Cluster会自动将数据迁移到正确的节点以保持数据分布均衡。 +2. 高可用性:Redis Cluster采用主从复制机制,每个主节点有多个从节点。当主节点故障时,从节点可以自动接管,提供高可用性。 +3. 负载均衡:Redis Cluster实现了客户端与节点间的自动负载均衡。客户端可以直接连接任意节点,节点会自动转发请求,实现负载均衡。 + +通过将数据分布在多个节点上,并提供自动故障转移和负载均衡机制,Redis Cluster使应用程序能够处理大规模数据集和高并发访问需求。它是一个强大的分布式解决方案,常用于需要高性能和可扩展性的场景,如缓存、会话存储和实时计数。 + +许多Kubeblocks的客户对Redis Cluster有强烈需求,因此我们基于Kubeblocks适配了Redis Cluster。在适配过程中,我们也发现了一些Redis Cluster在Kubernetes容器场景下面临的网络标准兼容性问题。 + +## 问题复现 + +1. 安装 KubeBlocks 0.9.0。 + +``` +slc@slcmac kbcli % ./bin/kbcli kubeblocks list-versions --devel +VERSION RELEASE-NOTES +0.9.0-beta.8 https://github.com/apecloud/kubeblocks/releases/tag/v0.9.0-beta.8 +0.9.0-beta.7 https://github.com/apecloud/kubeblocks/releases/tag/v0.9.0-beta.7 +slc@slcmac kbcli % kbcli kubeblocks install --version="0.9.0-beta.8" +``` + +2. 安装 redis-cluster 插件。 + +虽然 Redis 集群插件默认已安装,但由于网络标准兼容性问题导致了一些故障,我们需要手动安装它。 + +``` +# Disable addon +slc@slcmac addons % kbcli addon disable redis +# Install the latest addon on the branch +slc@slcmac addons % git clone git@github.com:apecloud/kubeblocks-addons.git +slc@slcmac addons % cd kubeblocks-addons/addons/redis +slc@slcmac addons % helm dependency build && cd .. +slc@slcmac addons % helm install redis ./redis +slc@slcmac addons % helm list +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +redis default 1 2024-04-15 21:29:37.953119 +0800 CST deployed redis-0.9.0 7.0.6 +``` + +为复现该问题,我们在执行 `helm install redis` 命令前修改了插件的配置。 +[img](/static/img/redis-helm.png) +3. 创建 Redis 集群。 + +该集群采用 NodePort 模式创建,包含 3 个主节点和 3 个从节点。 + +``` +slc@slcmac addons % helm install redisc ./redis-cluster --set mode=cluster --set nodePortEnabled=true --set redisCluster.shardCount=3 +slc@slcmac addons % kg pods | grep -v job +NAME READY STATUS RESTARTS AGE +redisc-shard-hxx-1 3/3 Running 0 14m +redisc-shard-hxx-0 3/3 Running 0 14m +redisc-shard-xwz-0 3/3 Running 0 14m +redisc-shard-xwz-1 3/3 Running 0 14m +redisc-shard-5g8-0 3/3 Running 0 14m +redisc-shard-5g8-1 3/3 Running 0 14m +``` + +我们可以清楚地看到创建了3个主从Pod,但节点间的关系尚未建立。 +通告 ip/端口/bus端口 + +(严格保持原文格式和换行,技术术语"primary-secondary pods"译为"主从Pod","ip/port/bus-port"采用中文技术文档常见表述"ip/端口/bus端口"的格式) + +``` +redisc-shard-5g8-0 +kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set re 30039 +kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 32461 +redisc-shard-hxx-0 +kubectl exec -it redisc-shard-hxx-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +kubectl exec -it redisc-shard-hxx-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 30182 +kubectl exec -it redisc-shard-hxx-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 31879 +redisc-shard-xwz-0 +kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 31993 +kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 30105 +``` + +创建插槽 + +``` +kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 0 5461 +kubectl exec -it redisc-shard-hxx-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 5462 10922 +kubectl exec -it redisc-shard-xwz-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster ADDSLOTSRANGE 10923 16383 +``` + +集群接入 + +(保持原始空行格式) + +``` +# login to one of the primary nodes +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- /bin/bash +root@redisc-shard-5g8-0:/# redis-cli -a O3605v7HsS +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 0 0 connected 0-5461 +# Only one node found, we have to meet other two nodes. +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster meet 172.18.0.2 30182 31879 +OK +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- redis-cli -a O3605v7HsS cluster meet 172.18.0.2 31993 30105 +OK +# Check the topology again. +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-0 -c redis-cluster -- /bin/bash +root@redisc-shard-5g8-0:/# redis-cli -a O3605v7HsS +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 1713324462000 0 connected 0-5461 +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713324462989 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713324463091 1 connected 5462-10922 +``` + +此时,一个包含 3 个节点的集群已创建完成。 + +4. 加入无头从节点 + +我们将 Pod `redisc-shard-5g8-1` 作为主 Pod `redisc-shard-5g8-0` 的备用节点。 +检查主 Pod 上的连接情况,可见其未与其他任何主 Pod 建立连接。 + + + +``` +# Check link +root@redisc-shard-5g8-1:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:46948 ESTABLISHED 1/redis-server *:63 keepalive (123.22/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +从节点 Pod 的无头地址:redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 +完整的 `join` 命令为: + +``` +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- /bin/bash +root@redisc-shard-5g8-1:/# redis-cli -a O3605v7HsS --cluster add-node redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 172.18.0.2:30039 --cluster-slave --cluster-master-id ff935854b7626a7e4374598857d5fbe998297799 +>>> Adding node redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 to cluster 172.18.0.2:30039 +>>> Performing Cluster Check (using node 172.18.0.2:30039) +M: ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039 + slots:[0-5461] (5462 slots) master +M: e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993 + slots:[10923-16383] (5461 slots) master +M: a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182 + slots:[5462-10922] (5461 slots) master +[OK] All nodes agree about slots configuration. +>>> Check for open slots... +>>> Check slots coverage... +[OK] All 16384 slots covered. +>>> Send CLUSTER MEET to node redisc-shard-5g8-1.redisc-shard-5g8-headless:6379 to make it join the cluster. +Waiting for the cluster to join + +>>> Configure node as replica of 172.18.0.2:30039. +[OK] New node added correctly. +``` + +172.18.0.2:30039 是主节点 Pod 对外宣告的 IP/端口。 + +检查连接: + +``` +root@redisc-shard-5g8-1:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.237:48424 172.18.0.2:31879 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) // master-2 announced bus port +tcp 0 0 10.42.0.237:36154 172.18.0.2:32461 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) // master-1 announced bus port +tcp 0 0 10.42.0.237:33504 172.18.0.2:30039 ESTABLISHED 1/redis-server *:63 keepalive (285.22/0/0) // master-1 announced port +tcp 0 0 127.0.0.1:6379 127.0.0.1:46948 ESTABLISHED 1/redis-server *:63 keepalive (279.99/0/0) // local redis-cli +tcp 0 0 10.42.0.237:58576 172.18.0.2:30105 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) // master-3 announced bus port +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +从节点 Pod 与其他 3 个主节点 Pod 通过已声明的总线端口连接,同时该从节点 Pod 也与其主节点 Pod 保持连接。 +请在从节点 Pod 上检查集群拓扑结构。 + +(严格保持原文格式与换行) + +``` +root@redisc-shard-5g8-1:/# redis-cli -a O3605v7HsS +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713327060494 0 connected 0-5461 +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 :6379@16379 myself,slave ff935854b7626a7e4374598857d5fbe998297799 0 0 0 connected +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713327060696 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713327060605 1 connected 5462-10922 +``` + +检查主 Pod 上的集群拓扑,发现新添加的从 Pod 缺失。 + +``` +root@redisc-shard-5g8-0:/# redis-cli -a O3605v7HsS +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 1713327106000 0 connected 0-5461 +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713327107004 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713327107106 1 connected 5462-10922 +``` + +在之前的 `add-node` 过程中,`cluster meet` 操作报告成功,但主节点实际上并未识别到新的副本节点。检查 ·/data/running.log· 后,发现以下错误信息: + + + +``` +root@redisc-shard-5g8-0:/data# grep 16379 running.log +1:M 17 Apr 2024 04:05:37.610 - Connection with Node 30e6d55c687bfc08e4a2fcd2ef586ba5458d801f at 10.42.0.1:16379 failed: Connection refused +**10 times repeated** +30e6d55c687bfc08e4a2fcd2ef586ba5458d801f at 10.42.0.1:16379 failed: Connection refused +``` + +因此,实际上这个 `cluster meet` 操作是失败的。但为什么呢? + +## 故障排查 + +1. 神秘的IP地址问题 +默认的Redis集群总线端口是16379(即6379 + 10000)。如果总线端口未显式声明,Redis集群将使用这个默认地址。因此问题现象是:当主节点Pod收到meet请求时,它尝试重新连接到另一个Pod的默认总线端口(16379),但连接失败。然而从节点Pod的IP(10.42.0.237)与错误信息中提到的IP(10.42.0.1)不一致。为什么主节点Pod会尝试连接到一个不一致的IP地址? + + + +``` +slc@slcmac redis % kg pods -A -o wide | grep redisc-shard-5g8-1 +default redisc-shard-5g8-1 3/3 Running 0 72m 10.42.0.237 k3d-k3s-default-server-0 +``` + +在继续调查过程中,发现10.42.0.1实际上是k3d(我们在开发环境中使用的Kubernetes版本)CNI0的地址。 + +``` +slc@slcmac redis % docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +8f8958df3298 moby/buildkit:buildx-stable-1 "buildkitd --allow-i…" 6 weeks ago Up 6 weeks buildx_buildkit_project-v3-builder0 +f8f349b2faab ghcr.io/k3d-io/k3d-proxy:5.4.6 "/bin/sh -c nginx-pr…" 6 months ago Up 3 months 80/tcp, 0.0.0.0:57830->6443/tcp k3d-k3s-default-serverlb +3e291f02144a rancher/k3s:v1.24.4-k3s1 "/bin/k3d-entrypoint…" 6 months ago Up 3 months k3d-k3s-default-server-0 +slc@slcmac redis % docker exec -it 3e291f02144a /bin/sh +/ # ifconfig +cni0 Link encap:Ethernet HWaddr 32:22:34:47:9D:BF + inet addr:10.42.0.1 Bcast:10.42.0.255 Mask:255.255.255.0 + UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 + RX packets:219424018 errors:0 dropped:0 overruns:0 frame:0 + TX packets:238722923 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:33805804056 (31.4 GiB) TX bytes:199941577234 (186.2 GiB) + +eth0 Link encap:Ethernet HWaddr 02:42:AC:12:00:02 + inet addr:172.18.0.2 Bcast:172.18.255.255 Mask:255.255.0.0 + UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 + RX packets:74602028 errors:0 dropped:0 overruns:0 frame:0 + TX packets:68167266 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:39814942542 (37.0 GiB) TX bytes:17167663962 (15.9 GiB) +slc@slcmac redis % kg node -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +k3d-k3s-default-server-0 Ready control-plane,master 183d v1.24.4+k3s1 172.18.0.2 K3s dev 5.10.104-linuxkit containerd://1.6.6-k3s1 +``` + +换句话说,10.42.* 是 k3d 默认的 Pod CIDR 网段,而 172.18.0.2 是唯一 k3d 节点的物理地址(这就是为什么看到的节点端口地址都是 172.18.0.2)。 + +2. 一个不太明显的链接。 + +事实证明,对应 gossip 协议的链接(本地 16379 -> 远程 NodePort)在目标端经过了 NAT 转换。通过 tcpdump 抓包,我们成功定位到一个 gossip 会话链接。尽管该会话链接已被 CNI 进行了 NAT 转换,但我们仍能利用 TS Val 和 ECR 信息完整还原它。以下是已建立的主节点 primary-1 和 primary-2 之间还原出的 gossip 链接。 + +主节点 primary-1 redisc-shard-5g8-0 的链接信息: + +``` +root@redisc-shard-5g8-0:/data# netstat -anop | grep redis +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:46798 ESTABLISHED 1/redis-server *:63 keepalive (117.47/0/0) +tcp 0 0 10.42.0.236:58412 172.18.0.2:31879 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) // Other part is primary-2 nodeport +tcp 0 0 10.42.0.236:6379 10.42.0.1:45255 ESTABLISHED 1/redis-server *:63 keepalive (118.11/0/0) +tcp 0 0 10.42.0.236:36528 172.18.0.2:30105 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:16471 ESTABLISHED 1/redis-server *:63 keepalive (1.20/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:30788 ESTABLISHED 1/redis-server *:63 keepalive (0.08/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:20521 ESTABLISHED 1/redis-server *:63 keepalive (1.42/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +主节点 primary-2 的连接信息 redisc-shard-hxx-0: + +``` +root@redisc-shard-hxx-0:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.232:16379 10.42.0.1:24780 ESTABLISHED 1/redis-server *:63 keepalive (0.72/0/0) // master-1 被 NAT 之后的地址 +tcp 0 0 10.42.0.232:41974 172.18.0.2:30105 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.232:16379 10.42.0.1:6717 ESTABLISHED 1/redis-server *:63 keepalive (1.34/0/0) +tcp 0 0 10.42.0.232:16379 10.42.0.1:24130 ESTABLISHED 1/redis-server *:63 keepalive (0.33/0/0) +tcp 0 0 10.42.0.232:33306 172.18.0.2:32461 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:46626 ESTABLISHED 1/redis-server *:63 keepalive (24.56/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +两个链接之间的映射关系: + +``` +# On primary-1 redisc-shard-5g8-0, capture packets on NodePort 31879 (primary -2 redisc-shard-hxx-0): +05:40:04.817984 IP redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412 > k3d-k3s-default-server-0.31879: Flags [P.], seq 6976:9336, ack 7081, win 10027, options [nop,nop,TS val 4191410578 ecr 867568717], length 2360 +05:40:04.818428 IP k3d-k3s-default-server-0.31879 > redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412: Flags [.], ack 9336, win 498, options [nop,nop,TS val 867569232 ecr 4191410578], length 0 +05:40:04.819269 IP k3d-k3s-default-server-0.31879 > redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412: Flags [P.], seq 7081:9441, ack 9336, win 501, options [nop,nop,TS val 867569233 ecr 4191410578], length 2360 +05:40:04.819309 IP redisc-shard-5g8-0.redisc-shard-5g8-headless.default.svc.cluster.local.58412 > k3d-k3s-default-server-0.31879: Flags [.], ack 9441, win 10026, options [nop,nop,TS val 4191410580 ecr 867569233], length 0 + +# On primary-2 redisc-shard-hxx-0, capture packets on local Port 24780 (primary-1 redisc-shard-5g8-0): +05:40:04.818178 IP 10.42.0.1.24780 > redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379: Flags [P.], seq 32624:34984, ack 32937, win 10027, options [nop,nop,TS val 4191410578 ecr 867568717], length 2360 +05:40:04.818371 IP redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379 > 10.42.0.1.24780: Flags [.], ack 34984, win 498, options [nop,nop,TS val 867569232 ecr 4191410578], length 0 +05:40:04.819239 IP redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379 > 10.42.0.1.24780: Flags [P.], seq 32937:35297, ack 34984, win 501, options [nop,nop,TS val 867569233 ecr 4191410578], length 2360 +05:40:04.819327 IP 10.42.0.1.24780 > redisc-shard-hxx-0.redisc-shard-hxx-headless.default.svc.cluster.local.16379: Flags [.], ack 35297, win 10026, options [nop,nop,TS val 4191410580 ecr 867569233], length 0 +``` + +如我们所见,通信对端的所有 Pod 和 NodePort 流量都被 NAT 转换到了 CNI0 地址 10.42.0.1。 + +3. 真相大白 + +至此,meet 操作失败的原因已经非常清晰。secondary-1 pod 在未宣告自身地址的情况下,尝试使用 pod IP(10.42.0.237)与 primary-1 建立 meet 连接。该 meet 消息在 primary-1 pod 上被 NAT 转换为 10.42.0.1。primary-1 随后尝试使用默认总线端口 16379 和从消息中提取的源 IP 地址(10.42.0.1)重新连接 secondary-1。当尝试连接 10.42.0.1:16379 时,由于这不是实际的 Redis pod,该端口没有 Redis-server 进程监听,因此返回了"connection refused"错误。 + + + +## 问题修复 + +1. Secondary-1 宣告与重新加入 +既然已经找到根本原因,问题就变得更容易解决了。 + +针对这种"加入失败"的场景,我们可以让 secondary-1 显式宣告其 IP/端口/bus端口,然后主动加入集群。这样当 primary-1 尝试重新连接时,就会使用宣告的 IP 来建立连接。 + +``` +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 31309 +slc@slcmac redis % kubectl exec -it redisc-shard-5g8-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 31153 + +# Execute cluster nodes on redisc-shard-5g8-1, we can see the newly announced IP address and port No. are used. +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713334354116 0 connected 0-5461 +# before announcing :6379@16379 +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 myself,slave ff935854b7626a7e4374598857d5fbe998297799 0 0 0 connected +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713334354325 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713334354532 1 connected 5462-10922 + +# meet primary-1 again +127.0.0.1:6379> cluster meet 172.18.0.2 30039 32461 +OK +``` + +检查 primary-1 以查看 `meet` 后的差异。 + +``` +root@redisc-shard-5g8-0:/data# redis-cli -a O3605v7HsS +Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 1713334463000 0 connected 0-5461 +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713334463613 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713334463613 1 connected 5462-10922 +127.0.0.1:6379> cluster nodes +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 myself,master - 0 1713334506000 0 connected 0-5461 +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 slave ff935854b7626a7e4374598857d5fbe998297799 0 1713334506133 0 connected +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713334506133 2 connected 10923-16383 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713334506233 1 connected 5462-10922 +``` + +可以在 primary-1 上找到 secondary-1 的 gossip 链接。 + +(严格保持原文格式,包括换行和间距) + +``` +root@redisc-shard-5g8-0:/data# netstat -anop | grep redis +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:46798 ESTABLISHED 1/redis-server *:63 keepalive (22.34/0/0) +tcp 0 0 10.42.0.236:58412 172.18.0.2:31879 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.236:6379 10.42.0.1:45255 ESTABLISHED 1/redis-server *:63 keepalive (22.15/0/0) +tcp 0 0 10.42.0.236:43732 172.18.0.2:31153 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) // to slave-1 nodeport +tcp 0 0 10.42.0.236:36528 172.18.0.2:30105 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:16471 ESTABLISHED 1/redis-server *:63 keepalive (1.17/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:30788 ESTABLISHED 1/redis-server *:63 keepalive (0.97/0/0) +tcp 0 0 10.42.0.236:16379 10.42.0.1:20521 ESTABLISHED 1/redis-server *:63 keepalive (1.48/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +我们可以看到从 primary-1/2/3 到 secondary-1 的三个新 gossip 链接。 + +``` +root@redisc-shard-5g8-1:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.237:48424 172.18.0.2:31879 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.237:16379 10.42.0.1:35577 ESTABLISHED 1/redis-server *:63 keepalive (1.11/0/0) // from NAT master +tcp 0 0 10.42.0.237:36154 172.18.0.2:32461 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.237:16379 10.42.0.1:32078 ESTABLISHED 1/redis-server *:63 keepalive (0.15/0/0) // from NAT master +tcp 0 0 10.42.0.237:33504 172.18.0.2:30039 ESTABLISHED 1/redis-server *:63 keepalive (0.00/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:46948 ESTABLISHED 1/redis-server *:63 keepalive (0.00/0/0) +tcp 0 0 10.42.0.237:58576 172.18.0.2:30105 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 10.42.0.237:16379 10.42.0.1:35265 ESTABLISHED 1/redis-server *:63 keepalive (1.22/0/0) // from NAT master +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +这三个链接实际上是主 Pod 通过 secondary-1 的 NodePort 成功连接后,再被 NAT 转换到该 Pod 的 CNI0 地址。 + +``` +slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 30662 +slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 30960 +slc@slcmac redis % kubectl exec -it redisc-shard-hxx-1 -c redis-cluster -- /bin/bash +``` + +添加节点 secondary-2(此过程包含 `meet` 操作) + +(严格保留原始格式与换行) + +``` +redis-cli -a O3605v7HsS --cluster add-node 172.18.0.2:30662 172.18.0.2:30182 --cluster-slave --cluster-master-id a54e8fa9474c620154f4c1abc9628116deb3dc7e +``` + +检查 secondary-2 上的集群拓扑结构。 + +``` +127.0.0.1:6379> cluster nodes +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 slave ff935854b7626a7e4374598857d5fbe998297799 0 1713335442641 0 connected +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 master - 0 1713335442328 1 connected 5462-10922 +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713335442328 2 connected 10923-16383 +4d497f9b4ff459b8c65f50afa6621e122e1d8470 172.18.0.2:30662@30960 myself,slave a54e8fa9474c620154f4c1abc9628116deb3dc7e 0 1713335442000 1 connected +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713335442641 0 connected 0-5461 +``` + +检查主节点2上的集群拓扑结构。 + +(严格保留原文换行和间距) + +``` +127.0.0.1:6379> cluster nodes +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713335448690 2 connected 10923-16383 +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713335448892 0 connected 0-5461 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 myself,master - 0 1713335448000 1 connected 5462-10922 +4d497f9b4ff459b8c65f50afa6621e122e1d8470 172.18.0.2:30662@30960 slave a54e8fa9474c620154f4c1abc9628116deb3dc7e 0 1713335448998 1 connected +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 slave ff935854b7626a7e4374598857d5fbe998297799 0 1713335448794 0 connected +``` + +3. 从节点-3 宣告与加入 + +宣告 IP/端口/总线端口,随后添加节点。 + +``` +slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 172.18.0.2 +slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-port 30110 +slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-bus-port 30971 +slc@slcmac redis % kubectl exec -it redisc-shard-xwz-1 -c redis-cluster -- /bin/bash +root@redisc-shard-xwz-1:/# redis-cli -a O3605v7HsS --cluster add-node 172.18.0.2:30110 172.18.0.2:31993 --cluster-slave --cluster-master-id e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b +>>> Adding node 172.18.0.2:30110 to cluster 172.18.0.2:31993 +>>> Performing Cluster Check (using node 172.18.0.2:31993) +M: e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993 + slots:[10923-16383] (5461 slots) master +M: ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039 + slots:[0-5461] (5462 slots) master + 1 additional replica(s) +S: 3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309 + slots: (0 slots) slave + replicates ff935854b7626a7e4374598857d5fbe998297799 +M: a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182 + slots:[5462-10922] (5461 slots) master + 1 additional replica(s) +S: 4d497f9b4ff459b8c65f50afa6621e122e1d8470 172.18.0.2:30662 + slots: (0 slots) slave + replicates a54e8fa9474c620154f4c1abc9628116deb3dc7e +[OK] All nodes agree about slots configuration. +>>> Check for open slots... +>>> Check slots coverage... +[OK] All 16384 slots covered. +>>> Send CLUSTER MEET to node 172.18.0.2:30110 to make it join the cluster. +Waiting for the cluster to join + +>>> Configure node as replica of 172.18.0.2:31993. +[OK] New node added correctly. +``` + +在任何主节点 Pod 上检查集群拓扑。 + + + +``` +127.0.0.1:6379> cluster nodes +e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 172.18.0.2:31993@30105 master - 0 1713335724101 2 connected 10923-16383 +ff935854b7626a7e4374598857d5fbe998297799 172.18.0.2:30039@32461 master - 0 1713335724101 0 connected 0-5461 +a54e8fa9474c620154f4c1abc9628116deb3dc7e 172.18.0.2:30182@31879 myself,master - 0 1713335724000 1 connected 5462-10922 +4d497f9b4ff459b8c65f50afa6621e122e1d8470 172.18.0.2:30662@30960 slave a54e8fa9474c620154f4c1abc9628116deb3dc7e 0 1713335724404 1 connected +3a136cd50eb3f2c0dcc3844a0de63d5e44b462d7 172.18.0.2:31309@31153 slave ff935854b7626a7e4374598857d5fbe998297799 0 1713335724510 0 connected +161ff6ea42047be45d986ed8ba4505afd07096d9 172.18.0.2:30110@30971 slave e4d9b914e7ee7c4fd399bdf3dd1c98f7a0a1791b 0 1713335724101 2 connected +``` + +至此,集群现已处于完整的 3 主节点和 3 从节点配置状态。 + +## 关于 CNI + +1. k3s、Flannel 与 NodePort/Pod + +k3s/k3d 默认使用的 CNI 是 Flannel,如上文分析,Flannel 会存在 NAT 映射问题。 + +2. k3s、Calico 与 NodePort + +我们还测试了 k3s 与 Calico 的场景,其中 Calico 使用 vxlan 建立 Pod 网络。我们发现当使用 NodePort 时,Calico 上依然存在 NAT 问题。假设我们使用的 NodePort 是 10.128.0.52:32135,在入站方向上,通过 NodePort (10.128.0.52) 访问本地端口 16379 的通信仍会被转换为节点 vxlan.calico 网络设备地址 (192.168.238.0)。 +这是其中一个从节点 Pod 的网络连接情况: + +``` +root@redisc-shard-ffv-1:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:41800 10.128.0.52:32135 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:45578 10.128.0.52:31952 ESTABLISHED 1/redis-server *:63 keepalive (277.76/0/0) // 到远端的 NodePort +tcp 0 0 127.0.0.1:6379 127.0.0.1:45998 ESTABLISHED 1/redis-server *:63 keepalive (185.62/0/0) +tcp 0 0 192.168.32.136:53280 10.128.0.52:32675 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 192.168.238.0:8740 ESTABLISHED 1/redis-server *:63 keepalive (8.79/0/0) // 来自远端的经过 NAT 的 NodePort +tcp 0 0 192.168.32.136:16379 192.168.238.0:9617 ESTABLISHED 1/redis-server *:63 keepalive (1.70/0/0) +tcp 0 0 192.168.32.136:34040 10.128.0.52:31454 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 192.168.238.0:18110 ESTABLISHED 1/redis-server *:63 keepalive (1.82/0/0) +tcp 0 0 192.168.32.136:39006 10.128.0.52:30390 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 192.168.238.0:32651 ESTABLISHED 1/redis-server *:63 keepalive (1.57/0/0) +tcp 0 0 192.168.32.136:54986 10.128.0.52:30459 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 192.168.238.0:43310 ESTABLISHED 1/redis-server *:63 keepalive (1.83/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +在节点 10.128.0.52 上,存在两个设备。 + +(严格保持原文格式,包括换行和间距) + +``` +ens4: flags=4163 mtu 1460 + inet 10.128.0.52 netmask 255.255.255.255 broadcast 0.0.0.0 + inet6 fe80::4001:aff:fe80:34 prefixlen 64 scopeid 0x20 + ether 42:01:0a:80:00:34 txqueuelen 1000 (Ethernet) + RX packets 3228477 bytes 3975395572 (3.9 GB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 3025699 bytes 2382110168 (2.3 GB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 +vxlan.calico: flags=4163 mtu 1410 + inet 192.168.238.0 netmask 255.255.255.255 broadcast 0.0.0.0 + inet6 fe80::64b2:cdff:fe99:7f96 prefixlen 64 scopeid 0x20 + ether 66:b2:cd:99:7f:96 txqueuelen 1000 (Ethernet) + RX packets 587707 bytes 714235654 (714.2 MB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 810205 bytes 682665081 (682.6 MB) + TX errors 0 dropped 31 overruns 0 carrier 0 collisions 0 +``` + +如果 NodePort 使用的是 Pod 所在节点,在 Calico 中将不会进行 NAT 转换。 + +(严格保持原文格式与换行) + +``` +slc@cluster-1:~$ kubectl exec -it redisc-shard-ffv-1 -c redis-cluster -- redis-cli -a O3605v7HsS config set cluster-announce-ip 10.128.0.54 // Set the announced IP to the local Node IP where the Pod is located. +OK +slc@cluster-1:~$ kubectl exec -it redisc-shard-ffv-1 -c redis-cluster -- /bin/bash +root@redisc-shard-ffv-1:/# netstat -anop | grep redis +tcp 0 0 0.0.0.0:16379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 10.128.0.54:44757 ESTABLISHED 1/redis-server *:63 keepalive (6.92/0/0) +tcp 0 0 192.168.32.136:41800 10.128.0.52:32135 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 10.128.0.54:16772 ESTABLISHED 1/redis-server *:63 keepalive (0.64/0/0) +tcp 0 0 192.168.32.136:45578 10.128.0.52:31952 ESTABLISHED 1/redis-server *:63 keepalive (70.79/0/0) +tcp 0 0 127.0.0.1:6379 127.0.0.1:45998 ESTABLISHED 1/redis-server *:63 keepalive (0.00/0/0) +tcp 0 0 192.168.32.136:53280 10.128.0.52:32675 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 10.128.0.54:16440 ESTABLISHED 1/redis-server *:63 keepalive (8.62/0/0) +tcp 0 0 192.168.32.136:34040 10.128.0.52:31454 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 10.128.0.54:28655 ESTABLISHED 1/redis-server *:63 keepalive (0.14/0/0) +tcp 0 0 192.168.32.136:39006 10.128.0.52:30390 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:54986 10.128.0.52:30459 ESTABLISHED 1/redis-server *:63 off (0.00/0/0) +tcp 0 0 192.168.32.136:16379 10.128.0.54:29959 ESTABLISHED 1/redis-server *:63 keepalive (8.62/0/0) +tcp6 0 0 :::16379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +tcp6 0 0 :::6379 :::* LISTEN 1/redis-server *:63 off (0.00/0/0) +``` + +因此在Calico vxlan方案中,NodePort是否做SNAT与源Node地址有关。如果是本Node则不需要SNAT,如果是远端Node则需要SNAT。但由于我们做了显式通告,Redis集群相遇时也不会有问题。 + +3. k3s、Calico与Pod +如果仅使用pod IP,Redis集群可以正常相遇,且集群拓扑结构正确。 + +## 总结 + +1. 在某些 Kubernetes 版本中,根据 CNI 实现的不同,Pod 和 NodePort 可能会被 NAT,而经过 NAT 的 IP 和端口无法被集群中其他角色重新连接,导致 meet 失败。 +2. 由于上述机制,在 Kubernetes 中创建 Redis 集群时,要么使用 host 网络,要么使用 NodePort 并显式声明 IP/port/bus-port。对于纯 Pod 网络且未显式声明的情况,需要防止 NAT,这取决于 CNI 的具体实现。 +3. Redis 集群的内部通信和外部通信共用同一套 IP 地址。声明 IP 后,声明的 IP 会覆盖 Pod IP 用于后续通信,导致内部 gossip 协商过程也走声明网络,这是不必要的浪费。未来的建议是将内部协议链路和外部应用数据链路分离。 +4. 即使将 Pod IP 和声明 IP 分开使用,内部通信走 Pod 网络,外部和客户端数据链路走声明网络,也无法解决 CNI NAT 转换的问题。由于 Redis 集群重连机制的存在,经过 NAT 后的地址无法直接重连。这里需要对 Redis 集群通信协议进行扩展。理想情况是: + - 内部通信:Pod 网络,需要重连,携带原始 Pod IP 作为源 IP,即使经过 NAT 也能获取到源 IP。 + - 外部通信:声明网络,可以是 NodePort/LoadBalancer,不需要重连,是否 NAT 无关紧要。 + 当然,内部通信也可以走 NodePort 和 LoadBalancer,但前提是携带原始源 IP(声明 IP 也是一种源 IP),5. 这也是 KubeBlocks 当前的解决方案。 +5. 使用 NodePort 会引入另一个问题。当节点宕机时,集群节点的声明 IP 需要更新,这也不是一个容易的实现,需要 Operator 和 HA 节点的配合。 \ No newline at end of file diff --git a/blogs/zh/run-databases-on-k8s-insights-from-leading-chinese-internet-companies.mdx b/blogs/zh/run-databases-on-k8s-insights-from-leading-chinese-internet-companies.mdx new file mode 100644 index 00000000..cf5963fd --- /dev/null +++ b/blogs/zh/run-databases-on-k8s-insights-from-leading-chinese-internet-companies.mdx @@ -0,0 +1,126 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1135270?v=4 + name: Wei Cao + url: https://github.com/weicao +date: 2025-07-02 +description: 这篇博客探讨了为何中国领先的互联网企业正日益采用在Kubernetes上运行数据库的实践。 +image: /img/blogs/thumbnails/blog-run-databases-on-k8s-insight.png +slug: run-databases-on-k8s-insight-from-chinese-internet-giants +tags: +- K8s +- databases +- operator +- Internet +- Alibaba +- ByteDance +- Kuaishou +- JD +- Meituan +title: 在Kubernetes上运行数据库——中国头部互联网企业的实践经验 +--- +# 在Kubernetes上运行数据库:来自中国领先互联网公司的洞见 + +![Databases on K8s](/img/blogs/run-databases-on-k8s-insight-database_on_k8s_illustration.png) + +## 引言 + +云计算和云原生技术的快速发展正推动全球企业将核心业务系统向容器化平台迁移。作为容器编排的事实标准,Kubernetes(K8s)已在各行业获得广泛应用。值得注意的是,包括阿里巴巴、字节跳动、快手、京东和美团在内的中国互联网巨头,正在K8s上运行数据库等有状态应用的实践中处于领先地位,并基于此构建了成熟的数据库即服务(DBaaS)平台。这一趋势不仅体现了K8s技术的成熟度,更反映了这些企业在技术选型和架构演进上的战略思路。本文通过剖析中国头部互联网企业在K8s上运行数据库工作负载的实践案例,探讨其背后的技术驱动力、业务诉求、实施挑战以及行业发展趋势。 + +## 战略抉择:自建数据中心 vs 公有云 + +![自建IDC与公有云对比](/img/blogs/run-databases-on-k8s-insight-idc_vs_public_cloud.png) + +中国头部互联网企业从创立之初就积累了海量用户基础和业务规模,其数据量和并发访问模式远超普通企业。在基础设施决策中,这些企业往往倾向于自建数据中心而非完全依赖公有云服务,主要基于以下战略考量: + +1. **总体拥有成本(TCO)优化**:对于服务器和带宽需求巨大的超大规模互联网企业,自建数据中心相比公有云服务通常能提供更优的长期经济效益。虽然公有云提供按需付费的灵活性,但在超大规模场景下,累积成本可能变得难以承受。通过批量采购和运营优化,自建基础设施能实现显著的规模经济效应。 + +2. **性能工程与硬件定制**:关键业务负载需要极致的性能特征——从应对购物节瞬时流量洪峰到视频平台的实时内容分发。自建数据中心支持根据特定工作负载需求进行深度硬件定制和网络架构优化。尽管公有云提供多样化的实例类型,但在底层硬件规格和网络拓扑设计方面灵活性有限。 + +3. **数据主权与合规管控**:随着数据保护法规日益严格,这些企业对数据存储、处理和传输保持严格的内部控制。自建数据中心提供增强的物理安全控制和数据主权保障,既便于满足监管要求,又能最大限度降低数据泄露风险。 + +4. **技术创新与竞争差异化**:这些企业拥有强大的研发能力,追求对基础设施栈的完全控制权以实现深度优化、快速排障和技术创新。自建数据中心为开发专有技术(定制调度系统、存储方案和网络组件)奠定基础,形成独特的技术护城河和竞争优势。 + +5. **供应商中立与架构灵活性**:虽然公有云提供便利性,但也带来供应商锁定风险。头部互联网企业通常避免将整个技术栈绑定单一云厂商,通过自建基础设施和多云策略保持架构灵活性,降低供应商依赖。 + +这种自建基础设施战略为Kubernetes创造了极具说服力的应用场景。K8s使这些企业能够在物理基础设施之上构建高效、弹性、自动化的云原生平台,既充分发挥自建数据中心优势,又能获得容器化和云原生技术的运维红利。在K8s上运行数据库正是这一战略思路的自然演进。 + +## 企业案例研究:Kubernetes上的数据库工作负载 + +中国头部互联网企业在Kubernetes上运行数据库工作负载的实践,为云原生转型策略、运维模式以及大规模运行有状态应用的实际挑战提供了宝贵经验。 + +### 阿里巴巴:云原生数据库与K8s深度集成的先行者 + +阿里巴巴作为全球电商和云计算领域的领导者,在K8s和云原生数据库领域的投入与实践始终走在行业前沿。早在2016年,阿里就开始探索数据库容器化并实现混合云架构,成功支撑"双11"大促的海量数据库负载。阿里云容器服务ACK深度整合虚拟化、存储、网络和安全能力,为企业级数据库容器化应用提供高性能、可扩展的全生命周期管理能力。更重要的是,阿里云的云原生数据库PolarDB深度拥抱K8s生态,基于K8s Operator提供自动化部署管理能力。部署在Kubernetes上的PolarDB在公有云场景下,依托其计算存储分离架构实现分钟级弹性扩缩。这些实践使得阿里得以在K8s平台上构建高度自动化的DBaaS服务,大幅提升数据库运维效率和资源利用率。 + +### 字节跳动:基于K8s的自研数据库veDB与超大规模集群管理 + +字节跳动作为全球知名的互联网科技公司,在数据库上K8s的实践中具有独特的技术特色。自2016年起,字节大规模拥抱Kubernetes技术栈,将私有云平台底层编排调度系统全面转向K8s。在数据库领域,字节的突破性实践体现在自研数据库veDB完全基于Kubernetes部署,专注OLTP(在线事务处理)场景。与传统数据库采用物理机或虚拟机部署方式不同,veDB从设计之初就采用云原生架构,通过K8s容器编排能力实现计算存储分离,使数据库实例能根据业务负载弹性扩缩。为支撑超大规模K8s集群中数据库等有状态应用的元数据存储需求,字节自研并开源了基于分布式KV存储引擎的高性能K8s元数据存储系统KubeBrain,可支持超过2万个节点的集群,有效解决etcd在超大规模场景下的性能瓶颈问题。此外,字节还将数据仓库ByteHouse的计算层运行在K8s上,通过容器化实现计算存储分离与性能优化,进一步验证了K8s支撑大规模数据库工作负载的可行性。 + +### 快手:大规模Redis容器化迁移与KubeBlocks实践 + +快手作为短视频和直播领域的领军企业,其将大规模Redis服务从裸金属迁移至K8s的实践具有重要参考价值。快手采用经典的Redis主从架构,包含Server、Sentinel和Proxy三个核心组件,通过K8s StatefulSet控制器实现Redis集群的容器化部署。在迁移过程中,快手面临多分片多实例关系映射、生命周期变更时的数据管理、单分片内多实例间动态拓扑关系维护等技术挑战。为解决这些问题,快手采用分层架构设计,使用StatefulSet管理每个分片下的多个实例,并在此基础上构建分层工作负载,实现整个Redis Server集群多分片的统一管理。快手实践数据显示,Redis容器化带来的性能开销在可接受范围内(不超过10%),而云原生带来的资源利用率提升和运维效率提高则收益显著。值得注意的是,快手同时也是KubeBlocks的重要使用者和贡献者。通过KubeBlocks的InstanceSet工作负载以及Component、Shard、Cluster等抽象概念,快手能够更好地在K8s上构建和管理其DBaaS平台,实现数据库服务的标准化部署、弹性扩缩容和自动化运维。 + +### 京东:电商场景下的MySQL容器化与弹性调度实践 + +京东作为电商巨头,对数据库的可用性和弹性扩缩容能力有着极高要求,特别是面对"618"、"双11"等电商大促时的流量洪峰挑战。京东积极在K8s容器环境中运行MySQL数据库,结合开源数据库分片中间件Vitess,实现了大规模数据库服务的高效管理和在线扩缩容。通过K8s HorizontalPodAutoscaler(HPA)和VerticalPodAutoscaler(VPA)机制,京东能够根据业务流量潮汐变化自动对数据库实例进行水平和垂直扩缩容,确保大促期间百亿甚至万亿级交易量的稳定支撑。此外,京东物流还利用Kubernetes构建混合云平台,通过"阿基米德"调度系统解决全国范围内跨物理机的数千个数据库实例的统一管理和调度难题。京东在K8s上构建了适合数据库容器化的生态体系,包括DNS服务发现、负载均衡和存储管理等,保障数据库数据的持久化和高可用,为其庞大的电商业务提供稳定可靠的数据库服务支撑。 + +### 美团:大规模K8s集群中的数据库性能优化实践 + +美团作为生活服务领域的巨头,业务场景复杂多样,对后端数据库服务的稳定性和性能有着极高要求。在美团全面将云基础设施从OpenStack迁移至Kubernetes的过程中,深入探索了数据库容器化性能优化实践。针对K8s环境下数据库的资源分配与性能优化,美团采用Exclusive CPU Sets方案,通过cpuset机制为数据库容器分配独占CPU核,避免不同Pod间资源争抢导致的性能抖动,保障数据库服务的稳定高性能。美团的K8s集群规模已达10万+节点量级,通过对Kubernetes调度器、Kubelet及资源管理的持续优化改造,成功将数据库等有状态应用纳入K8s管理范畴。在存储方面,美团优化了本地存储与网络存储的使用策略,为数据库容器提供高IOPS低延迟的存储性能保障。同时美团开发了专门的数据库监控告警系统,与K8s原生监控体系深度集成,实现了容器化数据库的全方位性能监控与故障预警,为业务快速发展提供了坚实的技术基础。 + +## 数据库容器化的技术驱动力 + +![技术驱动因素](/img/blogs/run-databases-on-k8s-insight-technical_drivers_illustration.png) + +在Kubernetes上运行数据库的决策源于多个关键性技术因素,这些因素有效解决了大规模基础设施面临的核心挑战: + +1. **业务快速发展和创新需求**:互联网企业业务迭代速度快,新业务新功能层出不穷。K8s能提供快速的应用部署和迭代能力,使DBaaS平台能更好支撑业务快速发展和创新需求,缩短业务上线周期。 + +2. **资源效率与成本优化**:传统基于虚拟机或裸金属的数据库部署常因静态资源分配导致资源碎片化和利用率低下。Kubernetes通过容器化实现细粒度的资源调度管理,显著提升服务器资源利用率。对于管理庞大服务器集群的超大规模互联网企业,即使资源效率的微小提升也能转化为可观的成本节约。 + +3. **运维自动化与效率提升**:Kubernetes提供包括声明式部署、Pod水平自动扩缩、自愈机制和滚动更新在内的完整自动化能力。这些能力使得构建高度自动化的DBaaS平台成为可能,实现数据库的自动供给、弹性扩缩、故障恢复和无中断升级。例如当数据库实例故障时,Kubernetes会自动将其重新调度到健康节点,无需人工干预即可保障服务连续性。 + +4. **标准化与运维一致性**:Kubernetes为应用部署和生命周期管理提供了标准化的API接口和运维模型。在K8s上容器化数据库可以实现跨数据库类型和版本的标准部署模式与运维流程,有效降低运维复杂度。这种标准化对于管理多样化数据库技术栈的企业尤为宝贵。 + +5. **动态扩缩容与流量治理**:互联网业务流量具有显著波动性,特别是在购物节或热点内容分发等峰值场景。Kubernetes的弹性扩缩能力使数据库实例可根据负载需求自动伸缩,在保障服务稳定性和性能特征的同时有效应对流量高峰。 + +6. **云原生生态集成**:Kubernetes作为云原生生态的基石,在其上运行数据库可实现与Prometheus监控、Grafana可观测、Fluentd日志收集以及服务网格等云原生工具链的无缝集成。这种集成提供了统一的监控、日志和告警能力,显著提升系统整体可观测性和运维效率。 + +7. **多云与混合部署灵活性**:领先互联网企业通常需要多云或混合云部署策略。Kubernetes作为事实上的跨平台标准,使得DBaaS服务能在不同云环境和本地数据中心间无缝迁移部署,在保持架构灵活性和工作负载可移植性的同时避免供应商锁定。 + +## 面临的挑战 + +尽管在 K8s 上运行数据库具有诸多优势,但也存在一些挑战: + +1. **有状态应用管理复杂性**:数据库作为有状态应用,在 Kubernetes 上面临 StatefulSet 的固有局限性。它缺乏对数据库角色(如主节点、从节点)的感知能力,无法基于角色优化升级等操作,强制严格的 Pod 顺序更新,且不支持单个 StatefulSet 内的异构配置。这些缺陷要求基于 K8s 的 DBaaS 平台必须通过定制方案来有效管理存储、网络和调度。 + +2. **性能开销**:容器化和虚拟化可能带来性能开销,尤其对高 I/O 需求的 DBaaS 服务。优化存储和网络性能至关重要。解决方案包括采用 Cilium 等高性能 CNI,在云环境或 IDC 中使用 NVMe 存储的本地 PV,以确保低延迟和高吞吐量。 + +3. **数据可靠性与高可用性**:在 Kubernetes 上确保数据可靠性和高可用性对数据库等有状态应用是重大挑战。Kubernetes 原生缺乏对数据库专用高级功能的支持,例如故障转移机制、一致性保证和同步复制。通常需要定制工具或 Operator 来处理自动故障转移、数据复制和灾难恢复等场景,以满足企业级 SLA。此外,混沌测试工具对于验证这些机制在故障场景下的有效性至关重要,可确保系统能够应对中断并保持可靠性。 + +4. **安全与隔离**:在共享的 Kubernetes 集群上运行数据库需要强大的安全和隔离机制。保护敏感数据、确保多租户隔离,以及在 Kubernetes 和数据库层级管理访问控制都增加了复杂性。同时,监控镜像漏洞和确保及时更新对于防范潜在攻击至关重要。必须谨慎实施并持续维护网络策略、加密、基于角色的访问控制(RBAC)以及容器镜像的自动化漏洞扫描等解决方案。 + +## KubeBlocks:K8s 上强大的 DBaaS 工具 + +随着数据库上 K8s 的趋势日益明显,对数据库自动化运维工具的需求也变得愈发迫切。KubeBlocks 作为一款专注于在 K8s 上运行和管理数据库、消息队列等有状态应用的开源 K8s Operator,正受到越来越多互联网公司的青睐。它旨在帮助开发者、SRE 和平台工程师在企业中部署和维护专属的 DBaaS 平台,支持在各类公有云和私有云环境中部署。 + +KubeBlocks 的出现极大地简化了 DBaaS 服务的部署、管理、扩缩容和备份等复杂操作,将数据库运维难度降至最低。通过统一的控制平面,实现了对多种数据库类型的支持,使企业能够以一致的方式管理不同类型的数据库实例。 + +目前包括快手、唯品会、360、腾讯、小米、Momenta 在内的众多互联网公司都在积极探索和使用 KubeBlocks 作为 K8s 上构建和运营 DBaaS 平台的重要工具。这表明 KubeBlocks 在解决 K8s 上有状态应用管理复杂性和提升运维效率方面已获得业界的广泛认可。 + +## 未来趋势 + +随着 K8s 生态的不断完善和技术的演进,数据库运行在 K8s 上的趋势将更加明显: + +1. **Operator 模式的普及:** 数据库 Operator 可以自动化完成 DBaaS 服务的部署、管理、扩缩容、备份等复杂操作,大幅降低 K8s 上 DBaaS 平台的运维难度。未来会出现更多成熟的数据库 Operator,推动 K8s 上 DBaaS 平台的普及。 + +2. **云原生数据库的兴起:** 越来越多的数据库开始原生支持 K8s 或采用云原生架构设计,能更好地利用 K8s 特性提供更优的性能和更便捷的 DBaaS 运维体验。 + +3. **Serverless 数据库:** 结合 Serverless 技术,DBaaS 服务将进一步实现按量付费和弹性扩缩容,降低用户使用门槛和成本。 + +4. **智能化运维:** 结合 AI 和机器学习技术,实现 DBaaS 服务的智能化运维,包括故障预测、性能优化、资源调度等,进一步提升 DBaaS 服务的可用性和效率。 + +## 结论 + +中国头部互联网企业在K8s上运行数据库是技术发展与业务需求的必然结果。K8s为构建DBaaS平台带来了更高的资源利用率、自动化运维能力、弹性伸缩以及云原生集成优势,从而降低运营成本,提升业务创新能力和市场竞争力。虽然面临有状态应用管理复杂性和性能开销等挑战,但随着K8s生态的成熟和云原生数据库的出现,这些挑战将逐步得到解决。未来,K8s将成为越来越多企业构建和运营DBaaS平台的重要基石,推动数据库技术进入全新的云原生时代。 \ No newline at end of file diff --git a/blogs/zh/run-redis-on-k8s-kuaishou-solution-with-kubeblocks.mdx b/blogs/zh/run-redis-on-k8s-kuaishou-solution-with-kubeblocks.mdx new file mode 100644 index 00000000..9b0889a1 --- /dev/null +++ b/blogs/zh/run-redis-on-k8s-kuaishou-solution-with-kubeblocks.mdx @@ -0,0 +1,197 @@ +--- +authors: + name: Yuxing Liu +date: 2024-11-25 +description: 基于快手在规模化实施云原生Redis的实践经验,本文深入探讨了Kubernetes环境下有状态服务管理的实用解决方案与关键考量。 +image: /img/blogs/thumbnails/blog-redis-kuaishou-cover.jpeg +slug: manage-large-scale-redis-on-k8s-with-kubeblocks +tags: +- Redis +- Kubernetes +- Kuaishou +- Stateful service +- StatefulSet +- Large-scale Redis +- Operator +- Database +title: 使用Operator在Kubernetes上管理大规模Redis集群——快手的实践方案 +--- +# 使用Operator在Kubernetes上管理大规模Redis集群:快手实践 + +> **关于快手** +> +> 快手是中国及全球领先的内容社区和社交平台,致力于成为世界上最以客户为中心的公司。快手以尖端AI技术为支撑的技术基础设施,持续推动创新和产品升级,丰富其服务内容和应用场景,创造卓越的客户价值。通过快手平台上的短视频和直播,用户可以分享生活、发现所需商品和服务并展示才华。通过与内容创作者和企业紧密合作,快手提供覆盖娱乐、在线营销服务、电子商务、本地服务、游戏等广泛领域的技术、产品和服务,满足多样化用户需求。 + +> **关于作者** +> +> 刘宇星是快手的高级软件工程师。宇星曾就职于阿里云和快手的云原生团队,专注于云原生领域,在云原生技术的开源、商业化和规模化方面积累了丰富经验。宇星是CNCF/Dragonfly项目的维护者之一,也是CNCF/Sealer项目的维护者之一。目前,他致力于推动快手有状态业务的云原生转型。 + +作为一款流行的短视频应用,快手高度依赖Redis来为用户提供低延迟响应。在私有云基础设施上运行,如何以最少人工干预实现大规模Redis集群的自动化管理是一个重大挑战。一个颇具前景的解决方案应运而生:使用Operator在Kubernetes上运行Redis。 + +虽然容器化应用和Nginx等无状态服务已成为标准实践,但在Kubernetes上运行数据库和Redis等有状态服务仍存在争议。基于快手将Redis从物理机迁移到云原生解决方案的经验,本文探讨了使用KubeBlocks Operator在Kubernetes上管理有状态服务的解决方案和关键考量。 + +## 背景 + +随着技术的发展,快手的底层基础设施正在向云原生技术栈转型。基础设施团队为应用和PaaS系统提供容器和Kubernetes支持。虽然快手的无状态服务几乎已完全采用Kubernetes,但云原生有状态服务的转型之路仍面临诸多挑战。 + +以Redis为例,这是快手使用最广泛的有状态服务之一,其特点是规模庞大。在这种体量下,即使是很小的成本节约也能为公司带来可观的财务收益。在长期规划中,快手认识到在Kubernetes上运行Redis的巨大潜力,特别是通过提高资源利用率来实现成本优化。本文分享了快手将Redis迁移至Kubernetes的经验,包括解决方案、遇到的挑战以及相应的应对策略。 + +## 快手如何在 Kubernetes 上运行 Redis? + +### Redis 部署架构 + +为了满足灵活的分片管理需求,并支持热点迁移和隔离,快手采用了水平分片、主从高可用的 Redis 架构,由 Server、Sentinel 和 Proxy 三个组件构成。 + +![Figure 2](/img/blogs/blog-redis-kuaishou-2.png) + +### 分析:快手对 Redis Operator 的需求是什么? + +**首先,Redis Pod 管理需要分层处理** + +Redis Pod 管理需要分为两层:第一层管理多个分片,第二层管理单个分片内的多个副本。它需要支持动态调整分片数量和每个分片的副本数量,以适应不同的工作负载和使用场景。 + +这意味着,在 Operator 的实现中,需要用一种工作负载(如 StatefulSet)来管理每个分片内的多个副本。在此基础上,还需要再构建一层(某种 CRD 对象)来实现对整个 Redis 集群中多个分片的管理。 + +**其次,在故障和 Day-2 运维中确保数据一致性和可靠性** + +在分片或副本的生命周期变更过程中,需要确保数据的一致性和可靠性。例如,分片扩缩容需要进行数据重平衡,而分片内实例扩缩容可能需要进行数据备份和恢复。 + +因此,Operator 需要支持分片和副本两个层级的生命周期钩子,能够在不同生命周期阶段执行自定义的数据管理操作。 + +**第三,支持拓扑感知的服务发现和金丝雀发布** + +分片内多个 Redis Pod 之间的拓扑关系可能会因为高可用切换、升级、扩缩容等事件动态变化。服务发现和金丝雀发布等功能都依赖于实时拓扑。 + +为了实现这一点,Operator 需要支持动态拓扑感知,通过引入角色探测和角色标记能力,实现基于动态拓扑的服务发现和金丝雀发布。 + +这些需求超出了任何现有开源 Redis Operator 的能力范围,通常需要开发一个高度复杂的 Kubernetes Operator 才能满足。但对于大多数平台团队来说,从零开始构建一个稳定且 API 设计良好的 Operator 是一项艰巨的任务,因为这需要同时具备 Kubernetes 和数据库专业知识,并经过大量实际场景测试。 + +### KubeBlocks 解决方案进入视野 + +在评估了多个方案后,**KubeBlocks** 作为一个开源的 Kubernetes 数据库 Operator 引起了我们的注意。KubeBlocks 的独特之处在于其可扩展性,它提供了一种 **Addon 机制**,允许你使用它的 API 来描述一个数据库的 Day-1 和 Day-2 特性与行为,从而实现在 Kubernetes 上的全生命周期管理。正如其官网所述,KubeBlocks 的愿景是 "Run any database on Kubernetes"。这种灵活性使我们能够定制 KubeBlocks 的 Redis Addon 来适配我们内部的 Redis 集群部署架构。 + +KubeBlocks 的 API 设计也非常符合我们对 Redis 集群管理的需求: + +**1. InstanceSet:比 StatefulSet 更强大的工作负载** + +**InstanceSet** 是 KubeBlocks 内部用来替代 StatefulSet 的一种工作负载,专门用于管理数据库 Pod。与 StatefulSet 类似,InstanceSet 支持管理多个 Pod(称为 Instance)。关键区别在于,InstanceSet 能够追踪每个数据库 Pod 的 **Role**(例如 primary、secondary)。对于不同的数据库(因为 KubeBlocks 支持多种类型),KubeBlocks 允许自定义 Pod 的角色、角色探测方式,以及在金丝雀升级时基于角色的升级顺序。InstanceSet 控制器会在运行时动态探测角色变化,并将角色信息作为标签更新到 Pod 的元数据中,从而实现基于角色的 Service selector。 + +StatefulSet 为每个实例分配一个全局有序且递增的标识符。这种机制提供了稳定的网络和存储身份,集群内的拓扑结构依赖于这些标识符。然而,由于运行时拓扑会动态变化,StatefulSet 提供的固定标识符可能无法满足需求。例如,StatefulSet 标识符不能存在空缺,也不允许删除中间位置的标识符。 + +快手平台团队向 KubeBlocks 社区贡献了多个 PR,包括允许同一 InstanceSet 内的 Pod 拥有不同配置、按指定序号下线 Pod(无需先下线更高序号的 Pod)、控制升级并发度等增强功能。这些改进使得 InstanceSet 更能适应快手在生产环境中管理大规模 Redis 集群的需求。 + +**2. 分层 CRD 和控制器设计:Component 与 Cluster 对象** + +KubeBlocks 采用 **Component**、**Cluster** 多层 CRD 结构来管理数据库集群的复杂拓扑,这一设计与快手 Redis 集群部署架构完美契合: + +- **Component**:代表 Redis 集群中的一组 Pod。例如 Proxy Pod 构成一个 Component,Sentinel Pod 构成另一个 Component,而 Redis-Server Pod 则按分片组织为一个或多个 Component,每个分片对应一个 Component。Component 的数量会随着分片数量动态变化。 + +> ⛱️ **分片(Shard)**:一种特殊的 Component,定义了水平扩展数据库的分片行为。每个分片共享相同配置。以快手 Redis Cluster 为例,每个分片(Component)包含一个主 Pod 和一个副本 Pod。扩容时会新增一个分片(Component),缩容时则移除一个分片,实现分片级别的扩缩容和生命周期管理。 + +- **Cluster**:代表整个 Redis 集群,整合了 Proxy、Server 和 Sentinel 等 Component,同时管理它们的启动拓扑和关联关系。 + +这种分层设计简化了扩缩容操作,增强了生命周期管理能力,并为支持生产环境中复杂的 Redis 部署架构提供了所需的灵活性。 + +通过与 KubeBlocks 社区的紧密协作,我们通过以下方式实现了 Redis 集群的编排: + +![Figure 3](/img/blogs/blog-redis-kuaishou-3.png) + +Redis Cluster 包含三个 Component:`redis-server`、`redis-sentinel` 和 `redis-proxy`。每个 Component 内部使用 **InstanceSet** 而非 **StatefulSet** 来管理 Pod。 + +### 使用 Kubernetes Federation 管理超大规模 Redis 集群 + +在快手,多个应用以多租户模式运行在单个超大规模 Redis 集群中。例如,单个集群可能包含超过 10,000 个 Pod,超出了单个 Kubernetes 集群的承载能力。因此,我们不得不将 Redis 集群部署在多个 Kubernetes 集群上。关键在于,我们需要对 Redis 应用用户隐藏多集群管理的复杂性。 + +#### 联邦 K8s 集群架构 + +幸运的是,快手的 Kubernetes 基础设施团队提供了成熟的 Kubernetes 联邦服务,具备统一调度和统一视图能力: + +- **统一调度**:联邦作为集中式资源调度入口,支持跨多个成员集群的资源调度。 +- **统一视图**:联邦作为统一的资源访问点,可以无缝获取联邦和成员集群中的资源。 + +因此,问题转化为:如何将基于 KubeBlocks 的 Redis 集群管理方案融入快手内部的联邦集群架构?以下是整体架构: + +![Figure 4](/img/blogs/blog-redis-kuaishou-4.png) + +联邦 Kubernetes 集群作为中央控制平面,负责管理多个成员集群。其主要职责包括跨集群编排、资源分发以及 Redis 集群的生命周期管理。具体功能如下: + +- **跨集群实例分发与管理**:根据资源需求,将 Redis 组件(Proxy、Sentinel、Server)分配到不同成员集群。 +- **并发控制**:协调跨集群操作,确保一致性并避免冲突。 +- **状态聚合**:收集并汇总各成员集群中所有组件的状态,提供统一视图。 + +成员 K8s 集群是实际部署和管理 Redis Pod(实例)的独立 Kubernetes 集群。每个成员集群负责运行 Redis 集群的一部分。其职责包括: + +- **实例管理**:通过 InstanceSet 对 Redis Pod(Proxy、Sentinel、Server)进行本地化管理。 + +因此,我们将 KubeBlocks Operator 拆分为两部分并部署在不同 Kubernetes 集群中: + +- **InstanceSet 控制器**部署在成员集群中,负责本地 Pod 管理。 +- **Cluster 控制器**和 **Component 控制器**部署在联邦集群中,处理全局资源编排与协调。 + +KubeBlocks 的分层 CRD 和控制器设计是实现此部署模式的关键。若采用单体式 CRD 和控制器设计,则无法实现联邦 Kubernetes 集群与成员 Kubernetes 集群的分离部署。 + +#### Fed-InstanceSet 控制器 + +由于可能存在多个成员 Kubernetes 集群,需要将联邦 Kubernetes 集群中的 InstanceSet 拆分为多个子 InstanceSet,每个成员集群分配一个。同时,原 InstanceSet 管理的实例(Pod)需要分配到成员集群的新 InstanceSet 中。 + +为此,**快手开发了 Fed-InstanceSet 控制器**来管理联邦集群与成员集群间的交互。其核心职责包括: + +- **调度决策**:根据预定义策略决定各成员集群应部署的实例数量。 +- **InstanceSet 拆分与分发**:将联邦集群的 InstanceSet 拆分并分发至对应成员集群。 + +为实现实例拆分并确保 Redis 实例在成员集群中的全局唯一性和正确顺序,快手向 KubeBlocks 社区提交了 PR,为 InstanceSet 新增 **Ordinals** 字段,用于精确分配实例索引。 + +**Fed-InstanceSet 控制器**利用该字段为每个成员集群分配唯一索引范围,确保跨集群实例的唯一性和顺序正确性。 + +![Figure 5](/img/blogs/blog-redis-kuaishou-5.png) + +## 讨论:有状态服务适合 Kubernetes 吗? + +### 在 Kubernetes 上运行有状态服务的优势与风险 + +我们认为,在 Kubernetes 上运行有状态服务具有显著优势: + +- **提升资源利用率**:通过合并多个小型资源池进行统一调度,并实现应用与 Redis 或 Redis 与其他有状态服务的共置,优化资源使用,显著降低成本。 +- **提高运维效率**:借助 Kubernetes 的声明式 API 和 Operator 模式,以基础设施即代码(IaC)的方式管理 Redis 服务,减少人工干预需求。 +- **降低维护成本**:以往 Redis 运行在物理机上,需要专人管理硬件基础设施。通过将基础设施统一容器化并迁移至 Kubernetes,降低了基础设施相关的维护成本,同时提升了整体管理效率。 + +尽管在 Kubernetes 上运行有状态服务带来巨大收益,但潜在风险仍需谨慎评估,特别是对于数据库和 Redis 这类对重要性和稳定性要求极高的有状态服务。主要挑战包括: + +1. **性能下降风险**:与直接运行在物理机相比,容器化运行进程引入了额外抽象层,尤其是覆盖网络带来的延迟,这引发了服务性能可能下降的担忧。 +2. **稳定性顾虑**:在 Kubernetes 基础设施上构建数据库平台(DBaaS),会让人担忧数据库或 Redis 的稳定性(可用性和可靠性)是否可能受到影响。 +3. **运维复杂度增加**:当出现问题时,是否需要同时具备数据库和 K8s 技术专长的专家才能有效定位和解决问题? + +![图 1](/img/blogs/blog-redis-kuaishou-1.png) + +以下部分将更详细探讨这些风险。 + +### 降低在 Kubernetes 上运行 Redis 的风险 + +#### 性能 + +与传统基于主机的部署相比,在云原生架构中容器化 Redis 引入了额外的抽象层。然而,行业基准测试和快手内部测试表明,性能差异通常控制在 10% 以内,这在大多数用例中往往可以忽略不计。虽然这种差异通常可以接受,但仍建议各组织自行进行性能测试,以确保方案满足其工作负载的特定需求。 + +#### 稳定性 + +将有状态服务迁移至 Kubernetes 后,通过自动化极大提升了运维效率。但这也使得执行流程更加不透明,即便是小的配置变更也可能影响大量实例。为降低意外场景(如 Pod 驱逐、人为错误或 Operator 缺陷)带来的稳定性风险,快手利用 Kubernetes API 服务器中的 **Admission Webhook** 机制拦截并验证变更请求。这种方式允许快手直接拒绝任何未授权的操作。考虑到跨多个可用区(AZs)的多集群 Kubernetes 环境,确保跨集群的变更控制至关重要。为此,快手开发了名为 **kube-shield** 的内部风险缓解系统。 + +此外,值得一提的是,快手通过改进对细粒度调度分布的支持,并引入基于资源利用率的负载均衡特性,进一步提升了可用性和稳定性。 + +#### 运维复杂度 + +将基于主机的系统迁移至基于Kubernetes的环境,同时确保持续维护,需要同时具备Redis和K8s技术的深厚专业知识。仅依赖Redis团队或K8s团队独立提供支持将面临挑战。合理的职责划分不仅能提升效率,还能让各团队充分发挥其专业领域的优势。 + +例如,在快手的云原生Redis解决方案中: + +- **Redis团队**:专注于定义Redis集群对象,并将其运维经验封装为声明式配置 +- **容器云团队**:负责Kubernetes侧的工作,包括开发和维护Operator、处理调度问题以及保障集群生命周期 + +![Figure 6](/img/blogs/blog-redis-kuaishou-6.png) + +## 总结 + +有状态服务的云原生化转型是一段充满挑战的复杂旅程,需要仔细权衡其利弊。但对快手而言,其价值不言而喻。从Redis开始,快手与KubeBlocks社区紧密合作,实现了一个高性价比的云原生解决方案。 + +展望未来,快手计划以此经验为基础,推动更多有状态服务(如数据库和中间件)的云原生化转型,从而在技术和成本效益上获得双重收益。 + +在8月的KubeCon香港峰会上,快手与KubeBlocks团队进行了联合演讲。如果您感兴趣,可以回看[演讲内容](https://kubeblocks.io/blog/migrate-redis-at-kuaishou-from-bare-metal-to-k8s)获取更多洞见。 \ No newline at end of file diff --git a/blogs/zh/take-specified-instances-offline.mdx b/blogs/zh/take-specified-instances-offline.mdx new file mode 100644 index 00000000..318de9e6 --- /dev/null +++ b/blogs/zh/take-specified-instances-offline.mdx @@ -0,0 +1,110 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/28781141?v=4 + name: free6om + url: https://github.com/free6om +date: 2024-09-19 +description: 本博客介绍KubeBlocks如何通过InstanceSet实现指定实例的下线操作。 +image: /img/blogs/thumbnails/blog-specified-instances-offline.png +slug: take-specified-instances-offline +tags: +- statefulset +- kubernetes +- instance +- instanceset +- instance offline +- horizontal scale +- scale in +- scale out +title: 如何将指定实例下线? +--- +# 如何下线指定实例? + +在[上一篇博客](https://kubeblocks.io/blog/instanceset-introduction)中,我们介绍了InstanceSet,以及由此衍生出的一系列用于解决高可用性等数据库需求的功能。在本篇博客中,我们将介绍其中一项功能——下线指定实例。 + +## 为什么需要此特性? + +在 v0.9.0 之前,KubeBlocks 将工作负载生成为 ***StatefulSets***,这是一把双刃剑。虽然 KubeBlocks 可以利用 ***StatefulSets*** 的优势来管理数据库等有状态应用,但也继承了它的局限性。 + +其中一个局限在水平扩缩容场景中尤为明显:***StatefulSets*** 会按照 *Ordinal* 顺序依次卸载 Pod,这可能会影响其中运行的数据库的可用性。 + +例如,使用名为 `foo-bar` 的 ***StatefulSet*** 管理一个包含一个主节点和两个从节点副本的 PostgreSQL 数据库,且 Pod `foo-bar-2` 被选为主节点。此时,如果我们因读取负载较低决定缩容该数据库集群,根据 ***StatefulSet*** 的规则,我们只能卸载当前作为主节点的 Pod `foo-bar-2`。这种情况下,我们要么直接卸载 `foo-bar-2`(触发故障转移机制从 `foo-bar-0` 和 `foo-bar-1` 中选举新的主节点 Pod),要么使用切换机制先将 `foo-bar-2` 转换为从节点 Pod 再卸载。无论采用哪种方式,都会存在一段不可写入的时期。 + +同一场景下还存在另一个问题:如果承载 `foo-bar-1` 的节点发生硬件故障导致磁盘损坏,使得数据读写不可访问,根据最佳运维实践,我们需要卸载 `foo-bar-1` 并在健康的节点上重建副本。但基于 ***StatefulSets*** 执行此类运维操作并不容易。 + +为解决上述局限性,从 v0.9 版本开始,KubeBlocks 用 ***InstanceSet*** 替代了 ***StatefulSets***。***InstanceSet*** 是一个通用工作负载 API,负责管理一组实例。通过 ***InstanceSet***,KubeBlocks 引入了指定实例扩缩容特性以提升可用性。 + +## 如何下线指定实例? + +在 KubeBlocks v0.9 版本中,Cluster API 新增了 `OfflineInstances` 字段,用于指定需要下线的实例。 + +例如,当前 PostgreSQL 集群的状态如下: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: foo +spec: + componentSpecs: +name: bar + replicas: 3 +... +``` + +如果需要将此集群的副本数缩减至 2 个并将实例 `foo-bar-1` 下线,可以按如下方式更新集群对象: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: foo +spec: + componentSpecs: +name: bar + replicas: 2 + offlineInstances: ["foo-bar-1"] +... +``` + +当 KubeBlocks 处理上述 spec 时,它会将集群缩容至 2 个副本并使实例 `foo-bar-1` 下线。最终,集群中剩余的实例将是:`foo-bar-0` 和 `foo-bar-2`。 + +## 如何在 OpsRequest 中使用此功能? + +OpsRequest API 支持水平扩展,从 KubeBlocks v0.9 版本开始,OpsRequest API 新增了 `onlineInstanceToOffline` 字段以支持将指定实例下线。例如, + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + generateName: foo-horizontalscaling- +spec: + clusterRef: foo + force: false + horizontalScaling: +componentName: bar + scaleIn: + onlineInstancesToOffline: ["foo-bar-1"] + ttlSecondsAfterSucceed: 0 + type: HorizontalScaling +``` + +在执行过程中,OpsRequest Controller 会将 `onlineInstancesToOffline` 合并到 Cluster 对象对应的 `offlineInstances` 中。最终由 Cluster Controller 处理名为 `foo-bar-1` 实例的下线任务。 + +通过编辑集群 YAML 或创建 OpsRequest 任务,即可实现指定实例(如 `foo-bar-1`)的下线操作,突破了 StatefulSet 的限制,更好地支持业务需求。更多水平扩缩容的示例,请参阅 [KubeBlocks 文档](https://kubeblocks.io/docs/preview/api_docs/maintenance/scale/horizontal-scale)。 + +## 注意事项 + +在 Cluster API 中,需要同时修改 `OfflineInstances` 和 `Replicas` 才能实现指定实例下线的目标。以下是当前 API 允许的一些非常规组合,建议在使用前充分理解,以避免意外后果。 + +| 变更前 | 配置离线实例 | 变更后 | +| :----: | :-----------------------: | :---: | +| `replicas=3, offlineInstances=[]` | `replicas=2,offlineInstances=["foo-bar-3"]` | `foo-bar-0, foo-bar-1` | +| `replicas=2, offlineInstances=["foo-bar-1"]` | `replicas=2, offlineInstances=[]` | `foo-bar-0, foo-bar-1` | +| `replicas=2, offlineInstances=["foo-bar-1"]` | `replicas=3, offlineInstances=["foo-bar-1"]` | `foo-bar-0, foo-bar-2, foo-bar-3` | + +## 参考文档 + +- [Kubernetes 社区关于 Pod 的讨论](https://github.com/kubernetes/kubernetes/issues/83224) +- [KubeBlocks 的水平扩缩容特性](https://kubeblocks.io/docs/release-0.9/api_docs/maintenance/scale/horizontal-scale) + diff --git a/blogs/zh/use-kubeblocks-to-build-your-aigc-infra-on-amazon-eks.mdx b/blogs/zh/use-kubeblocks-to-build-your-aigc-infra-on-amazon-eks.mdx new file mode 100644 index 00000000..b3f046a8 --- /dev/null +++ b/blogs/zh/use-kubeblocks-to-build-your-aigc-infra-on-amazon-eks.mdx @@ -0,0 +1,570 @@ +--- +authors: + image_url: https://avatars.githubusercontent.com/u/1765402?v=4 + name: iziang + url: https://github.com/iziang +date: 2023-09-21 +description: 使用KubeBlocks在Amazon EKS上构建您的AIGC基础设施 +image: /img/blogs/thumbnails/blog-aigc.png +slug: Use-KubeBlocks-to-build-your-AIGC-infrastructure-on-Amazon-EKS +tags: +- KubeBlocks +- AIGC +- Amazon EKS +title: 使用KubeBlocks在Amazon EKS上构建您的AIGC基础设施 +--- +# 使用 KubeBlocks 在 Amazon EKS 上构建 AIGC 基础设施 + +## 前言 +生成式 AI 引发了广泛关注,也将向量数据库市场推向了风口浪尖。众多向量数据库开始崭露头角并吸引公众目光。 + +根据 IDC 预测,到 2025 年,超过 80% 的业务数据将是非结构化的,以文本、图像、音频、视频或其他格式存储。然而,处理大规模非结构化数据的存储和查询仍面临巨大挑战。 + +在生成式 AI 和深度学习中,通常会将非结构化数据转换为向量进行存储,并利用向量相似性搜索技术实现语义关联检索。嵌入(embedding)的快速存储、索引和搜索是向量数据库的核心功能。 + +那么,什么是嵌入?简而言之,嵌入是由浮点数组成的向量表示。两个向量之间的距离代表它们的相关性。距离越近,相关性越高;距离越远,相关性越低。如果两个嵌入向量相似,则意味着它们代表的原始数据也相似,这与传统的关键字搜索不同。 + +然而,向量数据库的管理十分复杂,因为本质上它是一种有状态工作负载。在生产环境中使用时,它会面临与传统 OLTP 和 OLAP 数据库相同的问题,例如数据安全、高可用性、垂直/水平扩展性、监控与告警、备份与恢复等。由于向量数据库相对较新,大多数用户缺乏必要的知识,这给实现 LLMs + 向量数据库技术栈带来了巨大挑战。**用户更关注 LLMs 和向量数据库为业务带来的价值,而不是在管理上投入过多精力。** + +为了解决这些问题,KubeBlocks 利用 K8s 的声明式 API,以统一的方式抽象各类数据库,并通过 Operator 和一套 API 来管理数据库,极大地减轻了管理负担。此外,**基于 K8s 构建的 KubeBlocks 支持多云,避免了云厂商锁定的风险。** + +EKS 是 AWS 提供的托管 K8s 服务,它提供了一种简单的方式来在 AWS 上运行、扩展和管理 K8s 集群,无需担心节点的部署、升级和维护。EKS 本身也支持多可用区部署以实现高可用性,确保在节点故障或可用区中断时集群仍保持可用。此外,借助 AWS 强大的资源池,您可以在业务高峰和低谷时按需添加或移除节点,保证弹性和可扩展性。 + +本文主要讨论如何基于 Amazon EKS 通过 KubeBlocks 轻松部署和管理向量数据库。 + +## 架构 +Kubernetes 已成为容器编排的事实标准。它利用 ReplicaSet 提供的可扩展性和可用性,以及 Deployment 提供的滚动更新和回滚能力,来管理日益增长的无状态工作负载。然而,管理有状态工作负载对 Kubernetes 提出了重大挑战。尽管 StatefulSet 提供了稳定的持久化存储和唯一的网络标识符,但这些能力对于复杂的有状态工作负载仍显不足。为了应对这些挑战并简化复杂性,KubeBlocks 引入了 ReplicationSet 和 ConsensusSet,它们具有以下功能: + +- 基于角色的更新排序,减少因升级、扩缩容和重启导致的停机时间。 +- 维护数据复制状态,自动修复复制错误或延迟。 + +凭借 Kubernetes 强大的容器编排能力和对数据库引擎的统一抽象,KubeBlocks 具有以下优势: + +- 兼容 AWS、GCP、Azure 等多个云平台。 +- 提供生产级性能、弹性、可扩展性和可观测性。 +- 简化日常运维操作,如升级、扩缩容、监控、备份和恢复。 +- 包含强大直观的命令行工具,帮助您在几分钟内搭建全栈、生产就绪的数据基础设施。 + +上述能力使我们能够以便捷快速的方式在 KubeBlocks 上构建 AIGC 基础设施,例如大语言模型(LLMs)和向量数据库。新数据库也能快速接入,您只需定义 ClusterDefinition 和 ClusterVersion 等少量 CR,并配置操作脚本、参数和监控面板,即可在 KubeBlocks 上创建数据库集群,同时支持参数配置、垂直/水平扩缩容、升级降级、备份恢复等能力。 +![KubeBlocks 架构](/img/blogs/use-kubeblocks-to-build1.png) + +以下说明以 Qdrant 为例,介绍如何通过 KubeBlocks 在 AWS EKS 上搭建向量数据库。 + +Qdrant 是一个开源的向量数据库,专为高效存储和查询高维向量数据而设计。Qdrant 的架构可描述如下: +![Qdrant 架构](/img/blogs/use-kubeblocks-to-build2.png) + +**Qdrant 的关键特性:** + +1. 存储引擎:Qdrant 使用 RocksDB 作为存储引擎。RocksDB 是基于 LSM(Log-Structured Merge)树结构的高性能键值存储引擎,具有卓越的写入和查询性能。 + +2. 索引结构:Qdrant 采用基于 MVP(Most Valuable Point)概念的 HNSW(Hierarchical Navigable Small World)索引结构。该索引结构通过构建多层图结构来组织向量数据,实现快速的近似最近邻搜索。 + +3. 向量编码:Qdrant 支持多种向量编码方法,包括 L2、IP、Cosine 等。这些编码技术用于将高维向量映射到低维空间,以便在索引结构中进行高效的相似性计算和搜索。 + +4. 查询处理:Qdrant 使用多线程和并行计算处理查询请求。其工作原理是将查询向量与索引结构进行比较,并利用近似最近邻算法查找最相似的向量。 + +5. 分布式部署:Qdrant 支持水平扩展和分布式部署。它可以在多个节点上进行数据分片和负载均衡,以提高存储容量和查询吞吐量。 + +总体而言,Qdrant 的架构旨在提供高效的向量存储和查询能力。通过利用存储引擎、索引结构、向量编码和查询处理技术,Qdrant 实现了快速准确的近似最近邻搜索,非常适合涉及高维向量数据处理的各种应用场景。 + +### ClusterDefinition + +该CR定义了Qdrant的ClusterDefinition,包含与引擎密切相关的参数,例如Qdrant服务的访问方式、监控指标的收集方式以及可用性探测方式。 + +```yaml +--- +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterDefinition +metadata: + name: qdrant + labels: + {{- include "qdrant.labels" . | nindent 4 }} +spec: + type: qdrant + connectionCredential: + username: root + password: "$(RANDOM_PASSWD)" + endpoint: "$(SVC_FQDN):$(SVC_PORT_tcp-qdrant)" + host: "$(SVC_FQDN)" + port: "$(SVC_PORT_tcp-qdrant)" + componentDefs: + - name: qdrant + workloadType: Stateful + characterType: qdrant + probes: + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: 6333 + logConfigs: + scriptSpecs: + - name: qdrant-scripts + templateRef: qdrant-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + configSpecs: + - name: qdrant-config-template + templateRef: qdrant-config-template + volumeName: qdrant-config + namespace: {{ .Release.Namespace }} + service: + ports: + - name: tcp-qdrant + port: 6333 + targetPort: tcp-qdrant + - name: grpc-qdrant + port: 6334 + targetPort: grpc-qdrant + volumeTypes: + - name: data + type: data + podSpec: + securityContext: + fsGroup: 1001 + initContainers: + - name: qdrant-tools + command: + - /bin/sh + - -c + - | + cp /bin/jq /qdrant/tools/jq + cp /bin/curl /qdrant/tools/curl + imagePullPolicy: {{default .Values.images.pullPolicy "IfNotPresent"}} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /qdrant/tools + name: tools + containers: + - name: qdrant + imagePullPolicy: {{default .Values.images.pullPolicy "IfNotPresent"}} + securityContext: + runAsUser: 0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: tcp-qdrant + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - | + consensus_status=`/qdrant/tools/curl -s http://localhost:6333/cluster | /qdrant/tools/jq -r .result.consensus_thread_status.consensus_thread_status` + if [ "$consensus_status" != "working" ]; then + echo "consensus stopped" + exit 1 + fi + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 3 + startupProbe: + failureThreshold: 18 + httpGet: + path: / + port: tcp-qdrant + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + lifecycle: + preStop: + exec: + command: ["/qdrant/scripts/pre-stop.sh"] + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /qdrant/config/ + name: qdrant-config + - mountPath: /qdrant/storage + name: data + - mountPath: /qdrant/scripts + name: scripts + - mountPath: /etc/annotations + name: annotations + - mountPath: /qdrant/tools + name: tools + dnsPolicy: ClusterFirst + enableServiceLinks: true + ports: + - name: tcp-qdrant + containerPort: 6333 + - name: grpc-qdrant + containerPort: 6334 + - name: tcp-metrics + containerPort: 9091 + - name: p2p + containerPort: 6335 + command: ["/bin/sh", "-c"] + args: ["/qdrant/scripts/setup.sh"] + env: + - name: QDRANT__TELEMETRY_DISABLED + value: "true" + volumes: + - name: annotations + downwardAPI: + items: + - path: "component-replicas" + fieldRef: + fieldPath: metadata.annotations['apps.kubeblocks.io/component-replicas'] + - emptyDir: {} + name: tools +``` + +### ClusterVersion +该CR(自定义资源)定义了Qdrant的特定版本。若存在多个版本,请确保每个版本都对应一个ClusterVersion。 + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ClusterVersion +metadata: + name: qdrant-{{ default .Chart.AppVersion .Values.clusterVersionOverride }} + labels: + {{- include "qdrant.labels" . | nindent 4 }} +spec: + clusterDefinitionRef: qdrant + componentVersions: + - componentDefRef: qdrant + versionsContext: + initContainers: + - name: qdrant-tools + image: {{ .Values.images.registry | default "docker.io" }}/{{ .Values.images.tools.repository }}:{{ default .Chart.AppVersion .Values.images.tools.tag }} + containers: + - name: qdrant + image: {{ .Values.images.registry | default "docker.io" }}/{{ .Values.images.repository}}:{{ default .Chart.AppVersion .Values.images.tag }} +``` + + + + +## 演示 + +### 准备工作 +- 准备一个 EKS 集群。 +- 安装 kubectl 和 Helm 客户端。 + +### 安装 kbcli 和 KubeBlocks + +1. 安装 kbcli。 + +```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash + ``` + +2. 安装 KubeBlocks。 + +```bash + kbcli kubeblocks install + ``` + +3. 启用 Qdrant 插件。 + +```bash + kbcli addon enable qdrant + ``` + + + + +## 创建集群 + +1. 创建一个单机模式 Qdrant 集群。 + +```bash + kbcli cluster create qdrant --cluster-definition=qdrant + ``` + +如果数据量较大,可以设置 `replicas` 参数来创建一个 RaftGroup Qdrant 集群。 + +```bash + kbcli cluster create qdrant --cluster-definition=qdrant --set replicas=3 + ``` + +2. 查看集群状态,当状态显示为 running 时,表示集群已成功创建。 + +```bash + # View the cluster list + kbcli cluster list + > + NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME + qdrant default qdrant qdrant-1.1.0 Delete Running Aug 15,2023 23:03 UTC+0800 + ``` + +您也可以查看集群详情。 + +```bash + # View the cluster information + kblci cluster describe qdrant + > + Name: qdrant Created Time: Aug 15,2023 23:03 UTC+0800 + NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY + default qdrant qdrant-1.1.0 Running Delete + + Endpoints: + COMPONENT MODE INTERNAL EXTERNAL + qdrant ReadWrite qdrant-qdrant.default.svc.cluster.local:6333 + qdrant-qdrant.default.svc.cluster.local:6334 + + Topology: + COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME + qdrant qdrant-qdrant-0 Running x-worker3/172.20.0.3 Aug 15,2023 23:03 UTC+0800 + qdrant qdrant-qdrant-1 Running x-worker2/172.20.0.5 Aug 15,2023 23:03 UTC+0800 + qdrant qdrant-qdrant-2 Running x-worker/172.20.0.2 Aug 15,2023 23:04 UTC+0800 + + Resources Allocation: + COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS + qdrant false 1 / 1 1Gi / 1Gi data:20Gi standard + + Images: + COMPONENT TYPE IMAGE + qdrant qdrant docker.io/qdrant/qdrant:latest + + Data Protection: + AUTO-BACKUP BACKUP-SCHEDULE TYPE BACKUP-TTL LAST-SCHEDULE RECOVERABLE-TIME + Disabled 7d + + Show cluster events: kbcli cluster list-events -n default qdrant + ``` + +### 连接到集群 +Qdrant 分别通过端口 6333 和 6334 提供 HTTP 和 gRPC 协议供客户端访问。根据客户端所在位置,提供不同的连接选项来连接到 Qdrant 集群。 + + +:::note + +如果您的集群在 AWS 上,请先安装 AWS 负载均衡控制器。 + +::: + +- 如果您的客户端位于 K8s 集群内部,运行 `kbcli cluster describe qdrant` 获取集群的 ClusterIP 地址或对应的 K8s 集群域名。 +- 如果您的客户端位于 K8s 集群外部但与服务器在同一 VPC 中,运行 `kbcli cluster expose qdant --enable=true --type=vpc` 获取数据库集群的 VPC 负载均衡器地址。 +- 如果您的客户端位于 VPC 外部,运行 `kbcli cluster expose qdant --enable=true --type=internet` 为数据库集群开放一个可公开访问的地址。 + +### 测试 +1. 要向 Qdrant 集群插入数据,首先创建一个名为 `test_collection` 的 Collection,向量维度为 4,并使用余弦距离计算相似性。 + +```bash + curl -X PUT 'http://localhost:6333/collections/test_collection' \ + -H 'Content-Type: application/json' \ + --data-raw '{ + "vectors": { + "size": 4, + "distance": "Cosine" + } + }' + ``` + +**结果** + +```json + {"result":true,"status":"ok","time":0.173516958} + ``` + +2. 查看已创建 Collection 的信息。 + +```bash + curl 'http://localhost:6333/collections/test_collection' + ``` + +**结果** + +```sql + { + "result": { + "status": "green", + "optimizer_status": "ok", + "vectors_count": 0, + "indexed_vectors_count": 0, + "points_count": 0, + "segments_count": 2, + "config": { + "params": { + "vectors": { + "size": 4, + "distance": "Cosine" + }, + "shard_number": 1, + "replication_factor": 1, + "write_consistency_factor": 1, + "on_disk_payload": true + }, + "hnsw_config": { + "m": 16, + "ef_construct": 100, + "full_scan_threshold": 10000, + "max_indexing_threads": 0, + "on_disk": false + }, + "optimizer_config": { + "deleted_threshold": 0.2, + "vacuum_min_vector_number": 1000, + "default_segment_number": 0, + "max_segment_size": null, + "memmap_threshold": null, + "indexing_threshold": 20000, + "flush_interval_sec": 5, + "max_optimization_threads": 1 + }, + "wal_config": { + "wal_capacity_mb": 32, + "wal_segments_ahead": 0 + }, + "quantization_config": null + }, + "payload_schema": {} + }, + "status": "ok", + "time": 1.9708e-05 + } + ``` + +3. 向 Collection 中插入数据。 + +```bash + curl -L -X PUT 'http://localhost:6333/collections/test_collection/points?wait=true' \ + -H 'Content-Type: application/json' \ + --data-raw '{ + "points": [ + {"id": 1, "vector": [0.05, 0.61, 0.76, 0.74], "payload": {"city": "Berlin" }}, + {"id": 2, "vector": [0.19, 0.81, 0.75, 0.11], "payload": {"city": ["Berlin", "London"] }}, + {"id": 3, "vector": [0.36, 0.55, 0.47, 0.94], "payload": {"city": ["Berlin", "Moscow"] }}, + {"id": 4, "vector": [0.18, 0.01, 0.85, 0.80], "payload": {"city": ["London", "Moscow"] }}, + {"id": 5, "vector": [0.24, 0.18, 0.22, 0.44], "payload": {"count": [0] }}, + {"id": 6, "vector": [0.35, 0.08, 0.11, 0.44]} + ] + }' + ``` + +**结果** + +```json + { + "result": { + "operation_id": 0, + "status": "completed" + }, + "status": "ok", + "time": 0.040477833 + } + ``` + +4. 搜索之前插入的数据,例如与向量 [0.2,0.1,0.9,0.7] 相似的数据。 + +```bash + curl -L -X POST 'http://localhost:6333/collections/test_collection/points/search' \ + -H 'Content-Type: application/json' \ + --data-raw '{ + "vector": [0.2,0.1,0.9,0.7], + "limit": 3 + }' + ``` + +**结果** + +```json + { + "result": [ + { + "id": 4, + "version": 0, + "score": 0.99248314, + "payload": null, + "vector": null + }, + { + "id": 1, + "version": 0, + "score": 0.89463294, + "payload": null, + "vector": null + }, + { + "id": 5, + "version": 0, + "score": 0.8543979, + "payload": null, + "vector": null + } + ], + "status": "ok", + "time": 0.003061 + } + ``` + +您还可以添加额外的元数据过滤条件,例如在 city 等于 London 的点中查找与向量 [0.2,0.1,0.9,0.7] 相似的数据。 + +```bash + curl -L -X POST 'http://localhost:6333/collections/test_collection/points/search' \ + -H 'Content-Type: application/json' \ + --data-raw '{ + "filter": { + "should": [ + { + "key": "city", + "match": { + "value": "London" + } + } + ] + }, + "vector": [0.2, 0.1, 0.9, 0.7], + "limit": 3 + }' + ``` + +**结果** + +```json + { + "result": [ + { + "id": 4, + "version": 0, + "score": 0.99248314, + "payload": null, + "vector": null + }, + { + "id": 2, + "version": 0, + "score": 0.66603535, + "payload": null, + "vector": null + } + ], + "status": "ok", + "time": 0.012462584 + } + ``` + + + + +## 扩缩容集群 + +KubeBlocks 同时支持垂直扩展和水平扩展。如果您最初创建的是单机模式集群,但后续发现容量不足需要进行扩缩容,您可以对集群进行垂直或水平扩展。 + +垂直扩展会增加 CPU 和内存资源。 + +```bash +kbcli cluster vscale qdrant --components qdrant --cpu 8 --memory 32Gi +``` + +如果垂直扩展已达到机器的上限,您可以通过垂直扩展集群来添加节点,例如从单机模式扩展到 RaftGroup 模式。 + +) + +```bash +kbcli cluster hscale qdrant --replicas 3 +``` + + diff --git a/docs/zh/preview/cli/cli.mdx b/docs/zh/preview/cli/cli.mdx new file mode 100644 index 00000000..54487f0e --- /dev/null +++ b/docs/zh/preview/cli/cli.mdx @@ -0,0 +1,203 @@ +--- +title: KubeBlocks CLI Overview +description: KubeBlocks CLI overview +sidebar_position: 1 +--- + +## [addon](kbcli_addon.md) + +Addon command. + +* [kbcli addon describe](kbcli_addon_describe.md) - Describe an addon specification. +* [kbcli addon disable](kbcli_addon_disable.md) - Disable an addon. +* [kbcli addon enable](kbcli_addon_enable.md) - Enable an addon. +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes +* [kbcli addon install](kbcli_addon_install.md) - Install KubeBlocks addon +* [kbcli addon list](kbcli_addon_list.md) - List addons. +* [kbcli addon purge](kbcli_addon_purge.md) - Purge the sub-resources of specified addon and versions +* [kbcli addon search](kbcli_addon_search.md) - Search the addon from index +* [kbcli addon uninstall](kbcli_addon_uninstall.md) - Uninstall an existed addon +* [kbcli addon upgrade](kbcli_addon_upgrade.md) - Upgrade an existed addon to latest version or a specified version + + +## [backuprepo](kbcli_backuprepo.md) + +BackupRepo command. + +* [kbcli backuprepo create](kbcli_backuprepo_create.md) - Create a backup repository +* [kbcli backuprepo delete](kbcli_backuprepo_delete.md) - Delete a backup repository. +* [kbcli backuprepo describe](kbcli_backuprepo_describe.md) - Describe a backup repository. +* [kbcli backuprepo list](kbcli_backuprepo_list.md) - List Backup Repositories. +* [kbcli backuprepo list-storage-provider](kbcli_backuprepo_list-storage-provider.md) - List storage providers. +* [kbcli backuprepo update](kbcli_backuprepo_update.md) - Update a backup repository. + + +## [cluster](kbcli_cluster.md) + +Cluster command. + +* [kbcli cluster backup](kbcli_cluster_backup.md) - Create a backup for the cluster. +* [kbcli cluster cancel-ops](kbcli_cluster_cancel-ops.md) - Cancel the pending/creating/running OpsRequest which type is vscale or hscale. +* [kbcli cluster configure](kbcli_cluster_configure.md) - Configure parameters with the specified components in the cluster. +* [kbcli cluster connect](kbcli_cluster_connect.md) - Connect to a cluster or instance. +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - +* [kbcli cluster delete](kbcli_cluster_delete.md) - Delete clusters. +* [kbcli cluster delete-backup](kbcli_cluster_delete-backup.md) - Delete a backup. +* [kbcli cluster delete-ops](kbcli_cluster_delete-ops.md) - Delete an OpsRequest. +* [kbcli cluster describe](kbcli_cluster_describe.md) - Show details of a specific cluster. +* [kbcli cluster describe-backup](kbcli_cluster_describe-backup.md) - Describe a backup. +* [kbcli cluster describe-backup-policy](kbcli_cluster_describe-backup-policy.md) - Describe backup policy +* [kbcli cluster describe-config](kbcli_cluster_describe-config.md) - Show details of a specific reconfiguring. +* [kbcli cluster describe-ops](kbcli_cluster_describe-ops.md) - Show details of a specific OpsRequest. +* [kbcli cluster describe-restore](kbcli_cluster_describe-restore.md) - Describe a restore +* [kbcli cluster edit-backup-policy](kbcli_cluster_edit-backup-policy.md) - Edit backup policy +* [kbcli cluster edit-config](kbcli_cluster_edit-config.md) - Edit the config file of the component. +* [kbcli cluster explain-config](kbcli_cluster_explain-config.md) - List the constraint for supported configuration params. +* [kbcli cluster expose](kbcli_cluster_expose.md) - Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. +* [kbcli cluster label](kbcli_cluster_label.md) - Update the labels on cluster +* [kbcli cluster list](kbcli_cluster_list.md) - List clusters. +* [kbcli cluster list-backup-policies](kbcli_cluster_list-backup-policies.md) - List backups policies. +* [kbcli cluster list-backups](kbcli_cluster_list-backups.md) - List backups. +* [kbcli cluster list-components](kbcli_cluster_list-components.md) - List cluster components. +* [kbcli cluster list-events](kbcli_cluster_list-events.md) - List cluster events. +* [kbcli cluster list-instances](kbcli_cluster_list-instances.md) - List cluster instances. +* [kbcli cluster list-logs](kbcli_cluster_list-logs.md) - List supported log files in cluster. +* [kbcli cluster list-ops](kbcli_cluster_list-ops.md) - List all opsRequests. +* [kbcli cluster list-restores](kbcli_cluster_list-restores.md) - List restores. +* [kbcli cluster logs](kbcli_cluster_logs.md) - Access cluster log file. +* [kbcli cluster promote](kbcli_cluster_promote.md) - Promote a non-primary or non-leader instance as the new primary or leader of the cluster +* [kbcli cluster rebuild-instance](kbcli_cluster_rebuild-instance.md) - Rebuild the specified instances in the cluster. +* [kbcli cluster register](kbcli_cluster_register.md) - Pull the cluster chart to the local cache and register the type to 'create' sub-command +* [kbcli cluster restart](kbcli_cluster_restart.md) - Restart the specified components in the cluster. +* [kbcli cluster restore](kbcli_cluster_restore.md) - Restore a new cluster from backup. +* [kbcli cluster scale-in](kbcli_cluster_scale-in.md) - scale in replicas of the specified components in the cluster. +* [kbcli cluster scale-out](kbcli_cluster_scale-out.md) - scale out replicas of the specified components in the cluster. +* [kbcli cluster start](kbcli_cluster_start.md) - Start the cluster if cluster is stopped. +* [kbcli cluster stop](kbcli_cluster_stop.md) - Stop the cluster and release all the pods of the cluster. +* [kbcli cluster update](kbcli_cluster_update.md) - Update the cluster settings, such as enable or disable monitor or log. +* [kbcli cluster upgrade](kbcli_cluster_upgrade.md) - Upgrade the service version(only support to upgrade minor version). +* [kbcli cluster upgrade-to-v1](kbcli_cluster_upgrade-to-v1.md) - upgrade cluster to v1 api version. +* [kbcli cluster volume-expand](kbcli_cluster_volume-expand.md) - Expand volume with the specified components and volumeClaimTemplates in the cluster. +* [kbcli cluster vscale](kbcli_cluster_vscale.md) - Vertically scale the specified components in the cluster. + + +## [clusterdefinition](kbcli_clusterdefinition.md) + +ClusterDefinition command. + +* [kbcli clusterdefinition describe](kbcli_clusterdefinition_describe.md) - Describe ClusterDefinition. +* [kbcli clusterdefinition list](kbcli_clusterdefinition_list.md) - List ClusterDefinitions. + + +## [componentdefinition](kbcli_componentdefinition.md) + +ComponentDefinition command. + +* [kbcli componentdefinition describe](kbcli_componentdefinition_describe.md) - Describe ComponentDefinition. +* [kbcli componentdefinition list](kbcli_componentdefinition_list.md) - List ComponentDefinition. + + +## [componentversion](kbcli_componentversion.md) + +ComponentVersions command. + +* [kbcli componentversion describe](kbcli_componentversion_describe.md) - Describe ComponentVersion. +* [kbcli componentversion list](kbcli_componentversion_list.md) - List ComponentVersion. + + +## [dataprotection](kbcli_dataprotection.md) + +Data protection command. + +* [kbcli dataprotection backup](kbcli_dataprotection_backup.md) - Create a backup for the cluster. +* [kbcli dataprotection delete-backup](kbcli_dataprotection_delete-backup.md) - Delete a backup. +* [kbcli dataprotection describe-backup](kbcli_dataprotection_describe-backup.md) - Describe a backup +* [kbcli dataprotection describe-backup-policy](kbcli_dataprotection_describe-backup-policy.md) - Describe a backup policy +* [kbcli dataprotection describe-restore](kbcli_dataprotection_describe-restore.md) - Describe a restore +* [kbcli dataprotection edit-backup-policy](kbcli_dataprotection_edit-backup-policy.md) - Edit backup policy +* [kbcli dataprotection list-action-sets](kbcli_dataprotection_list-action-sets.md) - List actionsets +* [kbcli dataprotection list-backup-policies](kbcli_dataprotection_list-backup-policies.md) - List backup policies +* [kbcli dataprotection list-backup-policy-templates](kbcli_dataprotection_list-backup-policy-templates.md) - List backup policy templates +* [kbcli dataprotection list-backups](kbcli_dataprotection_list-backups.md) - List backups. +* [kbcli dataprotection list-restores](kbcli_dataprotection_list-restores.md) - List restores. +* [kbcli dataprotection restore](kbcli_dataprotection_restore.md) - Restore a new cluster from backup + + +## [kubeblocks](kbcli_kubeblocks.md) + +KubeBlocks operation commands. + +* [kbcli kubeblocks compare](kbcli_kubeblocks_compare.md) - List the changes between two different version KubeBlocks. +* [kbcli kubeblocks config](kbcli_kubeblocks_config.md) - KubeBlocks config. +* [kbcli kubeblocks describe-config](kbcli_kubeblocks_describe-config.md) - Describe KubeBlocks config. +* [kbcli kubeblocks install](kbcli_kubeblocks_install.md) - Install KubeBlocks. +* [kbcli kubeblocks list-versions](kbcli_kubeblocks_list-versions.md) - List KubeBlocks versions. +* [kbcli kubeblocks preflight](kbcli_kubeblocks_preflight.md) - Run and retrieve preflight checks for KubeBlocks. +* [kbcli kubeblocks status](kbcli_kubeblocks_status.md) - Show list of resource KubeBlocks uses or owns. +* [kbcli kubeblocks uninstall](kbcli_kubeblocks_uninstall.md) - Uninstall KubeBlocks. +* [kbcli kubeblocks upgrade](kbcli_kubeblocks_upgrade.md) - Upgrade KubeBlocks. + + +## [ops-definition](kbcli_ops-definition.md) + +ops-definitions command. + +* [kbcli ops-definition describe](kbcli_ops-definition_describe.md) - Describe OpsDefinition. +* [kbcli ops-definition list](kbcli_ops-definition_list.md) - List OpsDefinition. + + +## [options](kbcli_options.md) + +Print the list of flags inherited by all commands. + + + +## [playground](kbcli_playground.md) + +Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +* [kbcli playground destroy](kbcli_playground_destroy.md) - Destroy the playground KubeBlocks and kubernetes cluster. +* [kbcli playground init](kbcli_playground_init.md) - Bootstrap a kubernetes cluster and install KubeBlocks for playground. + + +## [plugin](kbcli_plugin.md) + +Provides utilities for interacting with plugins. + + Plugins provide extended functionality that is not part of the major command-line distribution. + +* [kbcli plugin describe](kbcli_plugin_describe.md) - Describe a plugin +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes +* [kbcli plugin install](kbcli_plugin_install.md) - Install kbcli or kubectl plugins +* [kbcli plugin list](kbcli_plugin_list.md) - List all visible plugin executables on a user's PATH +* [kbcli plugin search](kbcli_plugin_search.md) - Search kbcli or kubectl plugins +* [kbcli plugin uninstall](kbcli_plugin_uninstall.md) - Uninstall kbcli or kubectl plugins +* [kbcli plugin upgrade](kbcli_plugin_upgrade.md) - Upgrade kbcli or kubectl plugins + + +## [report](kbcli_report.md) + +Report kubeblocks or cluster info. + +* [kbcli report cluster](kbcli_report_cluster.md) - Report Cluster information +* [kbcli report kubeblocks](kbcli_report_kubeblocks.md) - Report KubeBlocks information, including deployments, events, logs, etc. + + +## [trace](kbcli_trace.md) + +trace management command + +* [kbcli trace create](kbcli_trace_create.md) - create a trace. +* [kbcli trace delete](kbcli_trace_delete.md) - Delete a trace. +* [kbcli trace list](kbcli_trace_list.md) - list all traces. +* [kbcli trace update](kbcli_trace_update.md) - update a trace. +* [kbcli trace watch](kbcli_trace_watch.md) - watch a trace. + + +## [version](kbcli_version.md) + +Print the version information, include kubernetes, KubeBlocks and kbcli version. + + + diff --git a/docs/zh/preview/cli/kbcli.mdx b/docs/zh/preview/cli/kbcli.mdx new file mode 100644 index 00000000..de027710 --- /dev/null +++ b/docs/zh/preview/cli/kbcli.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli +--- + +KubeBlocks CLI. + +### Synopsis + +``` + +============================================= + __ __ _______ ______ __ ______ +| \ / \ \ / \| \ | \ +| ▓▓ / ▓▓ ▓▓▓▓▓▓▓\ ▓▓▓▓▓▓\ ▓▓ \▓▓▓▓▓▓ +| ▓▓/ ▓▓| ▓▓__/ ▓▓ ▓▓ \▓▓ ▓▓ | ▓▓ +| ▓▓ ▓▓ | ▓▓ ▓▓ ▓▓ | ▓▓ | ▓▓ +| ▓▓▓▓▓\ | ▓▓▓▓▓▓▓\ ▓▓ __| ▓▓ | ▓▓ +| ▓▓ \▓▓\| ▓▓__/ ▓▓ ▓▓__/ \ ▓▓_____ _| ▓▓_ +| ▓▓ \▓▓\ ▓▓ ▓▓\▓▓ ▓▓ ▓▓ \ ▓▓ \ + \▓▓ \▓▓\▓▓▓▓▓▓▓ \▓▓▓▓▓▓ \▓▓▓▓▓▓▓▓\▓▓▓▓▓▓ + +============================================= +A Command Line Interface for KubeBlocks +``` + +``` +kbcli [flags] +``` + +### Options + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + -h, --help help for kbcli + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. +* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. +* [kbcli options](kbcli_options.md) - Print the list of flags inherited by all commands. +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. +* [kbcli trace](kbcli_trace.md) - trace management command +* [kbcli version](kbcli_version.md) - Print the version information, include kubernetes, KubeBlocks and kbcli version. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon.mdx b/docs/zh/preview/cli/kbcli_addon.mdx new file mode 100644 index 00000000..4948d30e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon.mdx @@ -0,0 +1,52 @@ +--- +title: kbcli addon +--- + +Addon command. + +### Options + +``` + -h, --help help for addon +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli addon describe](kbcli_addon_describe.md) - Describe an addon specification. +* [kbcli addon disable](kbcli_addon_disable.md) - Disable an addon. +* [kbcli addon enable](kbcli_addon_enable.md) - Enable an addon. +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes +* [kbcli addon install](kbcli_addon_install.md) - Install KubeBlocks addon +* [kbcli addon list](kbcli_addon_list.md) - List addons. +* [kbcli addon purge](kbcli_addon_purge.md) - Purge the sub-resources of specified addon and versions +* [kbcli addon search](kbcli_addon_search.md) - Search the addon from index +* [kbcli addon uninstall](kbcli_addon_uninstall.md) - Uninstall an existed addon +* [kbcli addon upgrade](kbcli_addon_upgrade.md) - Upgrade an existed addon to latest version or a specified version + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_describe.mdx b/docs/zh/preview/cli/kbcli_addon_describe.mdx new file mode 100644 index 00000000..c3b1991e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_describe.mdx @@ -0,0 +1,46 @@ +--- +title: kbcli addon describe +--- + +Describe an addon specification. + +``` +kbcli addon describe ADDON_NAME [flags] +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_disable.mdx b/docs/zh/preview/cli/kbcli_addon_disable.mdx new file mode 100644 index 00000000..d90978e1 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_disable.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli addon disable +--- + +Disable an addon. + +``` +kbcli addon disable ADDON_NAME [flags] +``` + +### Examples + +``` + # Disable "prometheus" addon + kbcli addon disable prometheus + + # Disable addons in batch + kbcli addon disable prometheus csi-s3 +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --auto-approve Skip interactive approval before disabling addon + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for disable + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_enable.mdx b/docs/zh/preview/cli/kbcli_addon_enable.mdx new file mode 100644 index 00000000..3bfcae27 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_enable.mdx @@ -0,0 +1,94 @@ +--- +title: kbcli addon enable +--- + +Enable an addon. + +``` +kbcli addon enable ADDON_NAME [flags] +``` + +### Examples + +``` + # Enabled "prometheus" addon + kbcli addon enable prometheus + + # Enabled "prometheus" addon with custom resources settings + kbcli addon enable prometheus --memory 512Mi/4Gi --storage 8Gi --replicas 2 + + # Enabled "prometheus" addon and its extra alertmanager component with custom resources settings + kbcli addon enable prometheus --memory 512Mi/4Gi --storage 8Gi --replicas 2 \ + --memory alertmanager:16Mi/256Mi --storage alertmanager:1Gi --replicas alertmanager:2 + + # Enabled "prometheus" addon with tolerations + kbcli addon enable prometheus \ + --tolerations '[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' \ + --tolerations 'alertmanager:[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' + + # Enabled "prometheus" addon with helm like custom settings + kbcli addon enable prometheus --set prometheus.alertmanager.image.tag=v0.24.0 + + # Force enabled "csi-s3" addon + kbcli addon enable csi-s3 --force + + # Enable addons in batch + kbcli addon enable prometheus csi-s3 +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --cpu stringArray Sets addon CPU resource values (--cpu [extraName:]/) (can specify multiple if has extra items)) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + --force ignoring the installable restrictions and forcefully enabling. + -h, --help help for enable + --memory stringArray Sets addon memory resource values (--memory [extraName:]/) (can specify multiple if has extra items)) + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --replicas stringArray Sets addon component replica count (--replicas [extraName:]) (can specify multiple if has extra items)) + --set stringArray set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2), it's only being processed if addon's type is helm. + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --storage stringArray Sets addon storage size (--storage [extraName:]) (can specify multiple if has extra items)). + Additional notes: + 1. Specify '0' value will remove storage values settings and explicitly disable 'persistentVolumeEnabled' attribute. + 2. For Helm type Addon, that resizing storage will fail if modified value is a storage request size + that belongs to StatefulSet's volume claim template, to resolve 'Failed' Addon status possible action is disable and + re-enable the addon (More info on how-to resize a PVC: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources). + + --storage-class stringArray Sets addon storage class name (--storage-class [extraName:]) (can specify multiple if has extra items)) + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. + --tolerations stringArray Sets addon pod tolerations (--tolerations [extraName:]) (can specify multiple if has extra items)) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_index.mdx b/docs/zh/preview/cli/kbcli_addon_index.mdx new file mode 100644 index 00000000..c8a6b459 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_index.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli addon index +--- + +Manage custom addon indexes + +### Synopsis + +Manage which repositories are used to discover and install addon from. + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. +* [kbcli addon index add](kbcli_addon_index_add.md) - Add a new addon index +* [kbcli addon index delete](kbcli_addon_index_delete.md) - Delete an addon index +* [kbcli addon index list](kbcli_addon_index_list.md) - List addon indexes +* [kbcli addon index update](kbcli_addon_index_update.md) - update the specified index(es) + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_index_add.mdx b/docs/zh/preview/cli/kbcli_addon_index_add.mdx new file mode 100644 index 00000000..50eb3a05 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_index_add.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli addon index add +--- + +Add a new addon index + +### Synopsis + +Configure a new index to install KubeBlocks addon from. + +``` +kbcli addon index add [flags] +``` + +### Examples + +``` +kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_index_delete.mdx b/docs/zh/preview/cli/kbcli_addon_index_delete.mdx new file mode 100644 index 00000000..80e34e0b --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_index_delete.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli addon index delete +--- + +Delete an addon index + +### Synopsis + +Delete a configured addon index. + +``` +kbcli addon index delete [flags] +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_index_list.mdx b/docs/zh/preview/cli/kbcli_addon_index_list.mdx new file mode 100644 index 00000000..04ac17ea --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_index_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli addon index list +--- + +List addon indexes + +### Synopsis + +Print a list of addon indexes. + +This command prints a list of addon indexes. It shows the name and the remote URL for +each addon index in table format. + +``` +kbcli addon index list [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_index_update.mdx b/docs/zh/preview/cli/kbcli_addon_index_update.mdx new file mode 100644 index 00000000..9faf021d --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_index_update.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli addon index update +--- + +update the specified index(es) + +### Synopsis + +Update existed index repository from index origin URL + +``` +kbcli addon index update [flags] +``` + +### Examples + +``` +kbcli addon index update KubeBlocks +``` + +### Options + +``` + --all Upgrade all addon index + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon index](kbcli_addon_index.md) - Manage custom addon indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_install.mdx b/docs/zh/preview/cli/kbcli_addon_install.mdx new file mode 100644 index 00000000..727dc89c --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_install.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli addon install +--- + +Install KubeBlocks addon + +``` +kbcli addon install [flags] +``` + +### Examples + +``` + # install an addon from default index + kbcli addon install apecloud-mysql + + # install an addon from default index and skip KubeBlocks version compatibility check + kbcli addon install apecloud-mysql --force + + # install an addon from a specified index + kbcli addon install apecloud-mysql --index my-index + + # install an addon with a specified version default index + kbcli addon install apecloud-mysql --version 0.7.0 + + # install an addon with a specified version and cluster chart of different version. + kbcli addon install apecloud-mysql --version 0.7.0 --cluster-chart-version 0.7.1 + + # install an addon with a specified version and local path. + kbcli addon install apecloud-mysql --version 0.7.0 --path /path/to/local/chart +``` + +### Options + +``` + --cluster-chart-repo string specify the repo of cluster chart, use the url of 'kubeblocks-addons' by default (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + --cluster-chart-version string specify the cluster chart version, use the same version as the addon by default + --force force install the addon and ignore the version check + -h, --help help for install + --index string specify the addon index, use 'kubeblocks' by default (default "kubeblocks") + --path string specify the local path contains addon CRs and needs to be specified when operating offline + --version string specify the addon version to install, run 'kbcli addon search ' to get the available versions +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_list.mdx b/docs/zh/preview/cli/kbcli_addon_list.mdx new file mode 100644 index 00000000..2404752a --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_list.mdx @@ -0,0 +1,51 @@ +--- +title: kbcli addon list +--- + +List addons. + +``` +kbcli addon list [flags] +``` + +### Options + +``` + --engines List engine addons only + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status stringArray Filter addons by status +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_purge.mdx b/docs/zh/preview/cli/kbcli_addon_purge.mdx new file mode 100644 index 00000000..0d61ee16 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_purge.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli addon purge +--- + +Purge the sub-resources of specified addon and versions + +``` +kbcli addon purge [flags] +``` + +### Examples + +``` + # Purge specific versions of redis addon resources + kbcli addon purge redis --versions=0.9.1,0.9.2 + + # Purge all unused and outdated resources of redis addon + kbcli addon purge redis --all + + # Print the resources that would be purged, and no resource is actually purged + kbcli addon purge redis --dry-run +``` + +### Options + +``` + --all If set to true, all resources will be purged, including those that are unused and not the newest version. + --auto-approve Skip interactive approval before deleting + --dry-run If set to true, only print the resources that would be purged, and no resource is actually purged. + -h, --help help for purge + --versions strings Specify the versions of resources to purge. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_search.mdx b/docs/zh/preview/cli/kbcli_addon_search.mdx new file mode 100644 index 00000000..10995adf --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_search.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli addon search +--- + +Search the addon from index + +``` +kbcli addon search [ADDON_NAME] [flags] +``` + +### Examples + +``` + # search the addons of all index + kbcli addon search + + # search the addons from a specified local path + kbcli addon search --path /path/to/local/chart + + # search different versions and indexes of an addon + kbcli addon search apecloud-mysql +``` + +### Options + +``` + -h, --help help for search + --path string the local directory contains addon CRs +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_uninstall.mdx b/docs/zh/preview/cli/kbcli_addon_uninstall.mdx new file mode 100644 index 00000000..b2e169c3 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_uninstall.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli addon uninstall +--- + +Uninstall an existed addon + +``` +kbcli addon uninstall [flags] +``` + +### Examples + +``` + # uninstall an addon + kbcli addon uninstall apecloud-mysql + + # uninstall more than one addons + kbcli addon uninstall apecloud-mysql postgresql +``` + +### Options + +``` + --auto-approve Skip interactive approval before uninstalling addon + -h, --help help for uninstall +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_addon_upgrade.mdx b/docs/zh/preview/cli/kbcli_addon_upgrade.mdx new file mode 100644 index 00000000..fb4202e2 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_addon_upgrade.mdx @@ -0,0 +1,79 @@ +--- +title: kbcli addon upgrade +--- + +Upgrade an existed addon to latest version or a specified version + +``` +kbcli addon upgrade [flags] +``` + +### Examples + +``` + # upgrade an addon from default index to latest version + kbcli addon upgrade apecloud-mysql + + # upgrade an addon from default index to latest version and skip KubeBlocks version compatibility check + kbcli addon upgrade apecloud-mysql --force + + # upgrade an addon to latest version from a specified index + kbcli addon upgrade apecloud-mysql --index my-index + + # upgrade an addon with a specified version default index + kbcli addon upgrade apecloud-mysql --version 0.7.0 + + # upgrade an addon with a specified version, default index and a different version of cluster chart + kbcli addon upgrade apecloud-mysql --version 0.7.0 --cluster-chart-version 0.7.1 + + # non-inplace upgrade an addon with a specified version + kbcli addon upgrade apecloud-mysql --inplace=false --version 0.7.0 + + # non-inplace upgrade an addon with a specified addon name + kbcli addon upgrade apecloud-mysql --inplace=false --name apecloud-mysql-0.7.0 +``` + +### Options + +``` + --cluster-chart-repo string specify the repo of cluster chart, use the url of 'kubeblocks-addons' by default (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + --cluster-chart-version string specify the cluster chart version, use the same version as the addon by default + --force force upgrade the addon and ignore the version check + -h, --help help for upgrade + --index string specify the addon index index, use 'kubeblocks' by default (default "kubeblocks") + --inplace when inplace is false, it will retain the existing addon and reinstall the new version of the addon, otherwise the upgrade will be in-place. The default is true. (default true) + --name string name is the new version addon name need to set by user when inplace is false, it also will be used as resourceNamePrefix of an addon with multiple version. + --path string specify the local path contains addon CRs and needs to be specified when operating offline + --version string specify the addon version +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli addon](kbcli_addon.md) - Addon command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo.mdx b/docs/zh/preview/cli/kbcli_backuprepo.mdx new file mode 100644 index 00000000..b58385b6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo.mdx @@ -0,0 +1,48 @@ +--- +title: kbcli backuprepo +--- + +BackupRepo command. + +### Options + +``` + -h, --help help for backuprepo +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli backuprepo create](kbcli_backuprepo_create.md) - Create a backup repository +* [kbcli backuprepo delete](kbcli_backuprepo_delete.md) - Delete a backup repository. +* [kbcli backuprepo describe](kbcli_backuprepo_describe.md) - Describe a backup repository. +* [kbcli backuprepo list](kbcli_backuprepo_list.md) - List Backup Repositories. +* [kbcli backuprepo list-storage-provider](kbcli_backuprepo_list-storage-provider.md) - List storage providers. +* [kbcli backuprepo update](kbcli_backuprepo_update.md) - Update a backup repository. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_create.mdx b/docs/zh/preview/cli/kbcli_backuprepo_create.mdx new file mode 100644 index 00000000..7856317c --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_create.mdx @@ -0,0 +1,90 @@ +--- +title: kbcli backuprepo create +--- + +Create a backup repository + +``` +kbcli backuprepo create [NAME] [flags] +``` + +### Examples + +``` + # Create a default backup repository using S3 as the backend + kbcli backuprepo create \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --default + + # Create a non-default backup repository with a specified name + kbcli backuprepo create my-backup-repo \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key + + # Create a backup repository with a sub-path to isolate different repositories + kbcli backuprepo create my-backup-repo \ + --provider s3 \ + --region us-west-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --path-prefix dev/team1 + + # Create a backup repository with a FTP backend + kbcli backuprepo create \ + --provider ftp \ + --ftp-host= \ + --ftp-port=21 \ + --ftp-user= \ + --ftp-password= +``` + +### Options + +``` + --access-method string Specify the access method for the backup repository, "Tool" is preferred if not specified. options: ["Mount" "Tool"] + --default Specify whether to set the created backup repository as default + -h, --help help for create + --path-prefix string Specify the prefix of the path for storing backup files. + --provider string Specify storage provider + --pv-reclaim-policy string Specify the reclaim policy for PVs created by this backup repository, the value can be "Retain" or "Delete". This option only takes effect when --access-method="Mount". (default "Retain") + --volume-capacity string Specify the capacity of the new created PVC. This option only takes effect when --access-method="Mount". (default "100Gi") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_delete.mdx b/docs/zh/preview/cli/kbcli_backuprepo_delete.mdx new file mode 100644 index 00000000..3af401d4 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli backuprepo delete +--- + +Delete a backup repository. + +``` +kbcli backuprepo delete [flags] +``` + +### Examples + +``` + # Delete a backuprepo + kbcli backuprepo delete my-backuprepo +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_describe.mdx b/docs/zh/preview/cli/kbcli_backuprepo_describe.mdx new file mode 100644 index 00000000..5a176b05 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli backuprepo describe +--- + +Describe a backup repository. + +``` +kbcli backuprepo describe [flags] +``` + +### Examples + +``` + # Describe a backuprepo + kbcli backuprepo describe my-backuprepo +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_list-storage-provider.mdx b/docs/zh/preview/cli/kbcli_backuprepo_list-storage-provider.mdx new file mode 100644 index 00000000..af3e70e2 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_list-storage-provider.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli backuprepo list-storage-provider +--- + +List storage providers. + +``` +kbcli backuprepo list-storage-provider [flags] +``` + +### Examples + +``` + # List all storage provider + kbcli backuprepo list-sp +``` + +### Options + +``` + -h, --help help for list-storage-provider + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_list.mdx b/docs/zh/preview/cli/kbcli_backuprepo_list.mdx new file mode 100644 index 00000000..1367467e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_list.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli backuprepo list +--- + +List Backup Repositories. + +``` +kbcli backuprepo list [flags] +``` + +### Examples + +``` + # List all backup repositories + kbcli backuprepo list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_backuprepo_update.mdx b/docs/zh/preview/cli/kbcli_backuprepo_update.mdx new file mode 100644 index 00000000..9d68291c --- /dev/null +++ b/docs/zh/preview/cli/kbcli_backuprepo_update.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli backuprepo update +--- + +Update a backup repository. + +``` +kbcli backuprepo update BACKUP_REPO_NAME [flags] +``` + +### Examples + +``` + # Update the credential of a S3-based backuprepo + kbcli backuprepo update my-backuprepo --access-key-id= --secret-access-key= + + # Set the backuprepo as default + kbcli backuprepo update my-backuprepo --default + + # Unset the default backuprepo + kbcli backuprepo update my-backuprepo --default=false +``` + +### Options + +``` + --default Specify whether to set the created backup repo as default + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli backuprepo](kbcli_backuprepo.md) - BackupRepo command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster.mdx b/docs/zh/preview/cli/kbcli_cluster.mdx new file mode 100644 index 00000000..881191d8 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster.mdx @@ -0,0 +1,86 @@ +--- +title: kbcli cluster +--- + +Cluster command. + +### Options + +``` + -h, --help help for cluster +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli cluster backup](kbcli_cluster_backup.md) - Create a backup for the cluster. +* [kbcli cluster cancel-ops](kbcli_cluster_cancel-ops.md) - Cancel the pending/creating/running OpsRequest which type is vscale or hscale. +* [kbcli cluster configure](kbcli_cluster_configure.md) - Configure parameters with the specified components in the cluster. +* [kbcli cluster connect](kbcli_cluster_connect.md) - Connect to a cluster or instance. +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - +* [kbcli cluster delete](kbcli_cluster_delete.md) - Delete clusters. +* [kbcli cluster delete-backup](kbcli_cluster_delete-backup.md) - Delete a backup. +* [kbcli cluster delete-ops](kbcli_cluster_delete-ops.md) - Delete an OpsRequest. +* [kbcli cluster describe](kbcli_cluster_describe.md) - Show details of a specific cluster. +* [kbcli cluster describe-backup](kbcli_cluster_describe-backup.md) - Describe a backup. +* [kbcli cluster describe-backup-policy](kbcli_cluster_describe-backup-policy.md) - Describe backup policy +* [kbcli cluster describe-config](kbcli_cluster_describe-config.md) - Show details of a specific reconfiguring. +* [kbcli cluster describe-ops](kbcli_cluster_describe-ops.md) - Show details of a specific OpsRequest. +* [kbcli cluster describe-restore](kbcli_cluster_describe-restore.md) - Describe a restore +* [kbcli cluster edit-backup-policy](kbcli_cluster_edit-backup-policy.md) - Edit backup policy +* [kbcli cluster edit-config](kbcli_cluster_edit-config.md) - Edit the config file of the component. +* [kbcli cluster explain-config](kbcli_cluster_explain-config.md) - List the constraint for supported configuration params. +* [kbcli cluster expose](kbcli_cluster_expose.md) - Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. +* [kbcli cluster label](kbcli_cluster_label.md) - Update the labels on cluster +* [kbcli cluster list](kbcli_cluster_list.md) - List clusters. +* [kbcli cluster list-backup-policies](kbcli_cluster_list-backup-policies.md) - List backups policies. +* [kbcli cluster list-backups](kbcli_cluster_list-backups.md) - List backups. +* [kbcli cluster list-components](kbcli_cluster_list-components.md) - List cluster components. +* [kbcli cluster list-events](kbcli_cluster_list-events.md) - List cluster events. +* [kbcli cluster list-instances](kbcli_cluster_list-instances.md) - List cluster instances. +* [kbcli cluster list-logs](kbcli_cluster_list-logs.md) - List supported log files in cluster. +* [kbcli cluster list-ops](kbcli_cluster_list-ops.md) - List all opsRequests. +* [kbcli cluster list-restores](kbcli_cluster_list-restores.md) - List restores. +* [kbcli cluster logs](kbcli_cluster_logs.md) - Access cluster log file. +* [kbcli cluster promote](kbcli_cluster_promote.md) - Promote a non-primary or non-leader instance as the new primary or leader of the cluster +* [kbcli cluster rebuild-instance](kbcli_cluster_rebuild-instance.md) - Rebuild the specified instances in the cluster. +* [kbcli cluster register](kbcli_cluster_register.md) - Pull the cluster chart to the local cache and register the type to 'create' sub-command +* [kbcli cluster restart](kbcli_cluster_restart.md) - Restart the specified components in the cluster. +* [kbcli cluster restore](kbcli_cluster_restore.md) - Restore a new cluster from backup. +* [kbcli cluster scale-in](kbcli_cluster_scale-in.md) - scale in replicas of the specified components in the cluster. +* [kbcli cluster scale-out](kbcli_cluster_scale-out.md) - scale out replicas of the specified components in the cluster. +* [kbcli cluster start](kbcli_cluster_start.md) - Start the cluster if cluster is stopped. +* [kbcli cluster stop](kbcli_cluster_stop.md) - Stop the cluster and release all the pods of the cluster. +* [kbcli cluster update](kbcli_cluster_update.md) - Update the cluster settings, such as enable or disable monitor or log. +* [kbcli cluster upgrade](kbcli_cluster_upgrade.md) - Upgrade the service version(only support to upgrade minor version). +* [kbcli cluster upgrade-to-v1](kbcli_cluster_upgrade-to-v1.md) - upgrade cluster to v1 api version. +* [kbcli cluster volume-expand](kbcli_cluster_volume-expand.md) - Expand volume with the specified components and volumeClaimTemplates in the cluster. +* [kbcli cluster vscale](kbcli_cluster_vscale.md) - Vertically scale the specified components in the cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_backup.mdx b/docs/zh/preview/cli/kbcli_cluster_backup.mdx new file mode 100644 index 00000000..d17c45b1 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_backup.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster backup +--- + +Create a backup for the cluster. + +``` +kbcli cluster backup NAME [flags] +``` + +### Examples + +``` + # Create a backup for the cluster, use the default backup policy and volume snapshot backup method + kbcli cluster backup mycluster + + # create a backup with a specified method, run "kbcli cluster desc-backup-policy mycluster" to show supported backup methods + kbcli cluster backup mycluster --method volume-snapshot + + # create a backup with specified backup policy, run "kbcli cluster list-backup-policies mycluster" to show the cluster supported backup policies + kbcli cluster backup mycluster --method volume-snapshot --policy + + # create a backup from a parent backup + kbcli cluster backup mycluster --parent-backup parent-backup-name +``` + +### Options + +``` + --deletion-policy string Deletion policy for backup, determine whether the backup content in backup repo will be deleted after the backup is deleted, supported values: [Delete, Retain] (default "Delete") + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for backup + --method string Backup methods are defined in backup policy (required), if only one backup method in backup policy, use it as default backup method, if multiple backup methods in backup policy, use method which volume snapshot is true as default backup method + --name string Backup name + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --parent-backup string Parent backup name, used for incremental backup + --policy string Backup policy name, if not specified, use the cluster default backup policy + --retention-period string Retention period for backup, supported values: [1y, 1mo, 1d, 1h, 1m] or combine them [1y1mo1d1h1m], if not specified, the backup will not be automatically deleted, you need to manually delete it. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_cancel-ops.mdx b/docs/zh/preview/cli/kbcli_cluster_cancel-ops.mdx new file mode 100644 index 00000000..68439eca --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_cancel-ops.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli cluster cancel-ops +--- + +Cancel the pending/creating/running OpsRequest which type is vscale or hscale. + +``` +kbcli cluster cancel-ops NAME [flags] +``` + +### Examples + +``` + # cancel the opsRequest which is not completed. + kbcli cluster cancel-ops +``` + +### Options + +``` + --auto-approve Skip interactive approval before cancel the opsRequest + -h, --help help for cancel-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_configure.mdx b/docs/zh/preview/cli/kbcli_cluster_configure.mdx new file mode 100644 index 00000000..0f39e450 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_configure.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster configure +--- + +Configure parameters with the specified components in the cluster. + +``` +kbcli cluster configure NAME --set key=value[,key=value] [--components=component1-name,component2-name] [--config-spec=config-spec-name] [--config-file=config-file] [flags] +``` + +### Examples + +``` + # update component params + kbcli cluster configure mycluster --components=mysql --config-spec=mysql-3node-tpl --config-file=my.cnf --set=max_connections=1000,general_log=OFF + + # if only one component, and one config spec, and one config file, simplify the searching process of configure. e.g: + # update mysql max_connections, cluster name is mycluster + kbcli cluster configure mycluster --set max_connections=2000 +``` + +### Options + +``` + --auto-approve Skip interactive approval before reconfiguring the cluster + --components strings Component names to this operations + --config-file string Specify the name of the configuration file to be updated (e.g. for mysql: --config-file=my.cnf). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --config-spec string Specify the name of the configuration template to be updated (e.g. for apecloud-mysql: --config-spec=mysql-3node-tpl). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --force-restart Boolean flag to restart component. Default with false. + -h, --help help for configure + --local-file string Specify the local configuration file to be updated. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replace Boolean flag to enable replacing config file. Default with false. + --set strings Specify parameters list to be updated. For more details, refer to 'kbcli cluster describe-config'. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_connect.mdx b/docs/zh/preview/cli/kbcli_cluster_connect.mdx new file mode 100644 index 00000000..914cc36f --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_connect.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster connect +--- + +Connect to a cluster or instance. + +``` +kbcli cluster connect (NAME | -i INSTANCE-NAME) [flags] +``` + +### Examples + +``` + # connect to a specified cluster + kbcli cluster connect mycluster + + # connect to a specified instance + kbcli cluster connect -i mycluster-instance-0 + + # connect to a specified component + kbcli cluster connect mycluster --component mycomponent + + # show cli connection example, supported client: [cli, java, python, rust, php, node.js, go, .net, django] and more. + kbcli cluster connect mycluster --client=cli +``` + +### Options + +``` + --client string Which client connection example should be output. + --component string The component to connect. If not specified and no any cluster scope services, pick up the first one. + -h, --help help for connect + -i, --instance string The instance name to connect. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_convert-to-v1.mdx b/docs/zh/preview/cli/kbcli_cluster_convert-to-v1.mdx new file mode 100644 index 00000000..ef4dbaf0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_convert-to-v1.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster convert-to-v1 +--- + +convert cluster api version. + +``` +kbcli cluster convert-to-v1 [NAME] [flags] +``` + +### Examples + +``` + # convert a v1alpha1 cluster + kbcli cluster convert-to-v1 mycluster + + # convert a v1alpha1 cluster with --dry-run + kbcli cluster convert-to-v1 mycluster --dry-run +``` + +### Options + +``` + --dry-run dry run mode + -h, --help help for convert-to-v1 + --no-diff only print the new cluster yaml +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create.mdx b/docs/zh/preview/cli/kbcli_cluster_create.mdx new file mode 100644 index 00000000..6c380562 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create.mdx @@ -0,0 +1,68 @@ +--- +title: kbcli cluster create +--- + +Create a cluster. + +``` +kbcli cluster create [ClusterType] [flags] +``` + +### Examples + +``` + # Create a postgresql + kbcli cluster create postgresql my-cluster + + # Get the cluster yaml by dry-run + kbcli cluster create postgresql my-cluster --dry-run + + # Edit cluster yaml before creation. + kbcli cluster create mycluster --edit +``` + +### Options + +``` + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster create apecloud-mysql](kbcli_cluster_create_apecloud-mysql.md) - Create a apecloud-mysql cluster. +* [kbcli cluster create etcd](kbcli_cluster_create_etcd.md) - Create a etcd cluster. +* [kbcli cluster create kafka](kbcli_cluster_create_kafka.md) - Create a kafka cluster. +* [kbcli cluster create mongodb](kbcli_cluster_create_mongodb.md) - Create a mongodb cluster. +* [kbcli cluster create mysql](kbcli_cluster_create_mysql.md) - Create a mysql cluster. +* [kbcli cluster create postgresql](kbcli_cluster_create_postgresql.md) - Create a postgresql cluster. +* [kbcli cluster create qdrant](kbcli_cluster_create_qdrant.md) - Create a qdrant cluster. +* [kbcli cluster create rabbitmq](kbcli_cluster_create_rabbitmq.md) - Create a rabbitmq cluster. +* [kbcli cluster create redis](kbcli_cluster_create_redis.md) - Create a redis cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_apecloud-mysql.mdx b/docs/zh/preview/cli/kbcli_cluster_create_apecloud-mysql.mdx new file mode 100644 index 00000000..e654ed26 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_apecloud-mysql.mdx @@ -0,0 +1,86 @@ +--- +title: kbcli cluster create apecloud-mysql +--- + +Create a apecloud-mysql cluster. + +``` +kbcli cluster create apecloud-mysql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create apecloud-mysql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create apecloud-mysql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --etcd.local.etcdctl-api string (default "3") + --etcd.local.replicas int Value range [1, 3]. (default 3) + --etcd.local.resources.storage string (default "20Gi") + --etcd.local.service-version string (default "3.5.6") + --etcd.mode string Legal values [serviceRef, local]. (default "local") + --etcd.service-ref.cluster.component string (default "etcd") + --etcd.service-ref.cluster.credential string + --etcd.service-ref.cluster.name string + --etcd.service-ref.cluster.port string (default "client") + --etcd.service-ref.cluster.service string (default "headless") + --etcd.service-ref.namespace string (default "default") + --etcd.service-ref.service-descriptor string + -h, --help help for apecloud-mysql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, raftGroup]. (default "standalone") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --proxy-enabled Enable proxy or not. + --replicas int The number of replicas, for standalone mode, the replicas is 1, for raftGroup mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string MySQL Service Version. (default "8.0.30") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_etcd.mdx b/docs/zh/preview/cli/kbcli_cluster_create_etcd.mdx new file mode 100644 index 00000000..4ea3d7e6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_etcd.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster create etcd +--- + +Create a etcd cluster. + +``` +kbcli cluster create etcd NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create etcd + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create etcd --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --client-service.node-port int Optional, if clientService type is NodePort, by default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767). + --client-service.port int The port on which the service will listen. (default 2379) + --client-service.role string Role of the service within the cluster. (default "leader") + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for etcd + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, the default replicas is 3. Value range [1, 5]. (default 3) + --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tls-enable Enable TLS for etcd cluster + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_kafka.mdx b/docs/zh/preview/cli/kbcli_cluster_create_kafka.mdx new file mode 100644 index 00000000..49980c8e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_kafka.mdx @@ -0,0 +1,89 @@ +--- +title: kbcli cluster create kafka +--- + +Create a kafka cluster. + +``` +kbcli cluster create kafka NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create kafka + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create kafka --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --broker-heap string Kafka broker's jvm heap setting. (default "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64") + --broker-replicas int The number of Kafka broker replicas for separated mode. Value range [1, 100]. (default 1) + --controller-heap string Kafka controller's jvm heap setting for separated mode (default "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64") + --controller-replicas int The number of Kafka controller replicas for separated mode. Legal values [1, 3, 5]. (default 1) + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --fixed-pod-ip-enabled advertised.listeners Whether to enable fixed Pod IP mode in Kafka's advertised.listeners + -h, --help help for kafka + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --meta-storage float Metadata Storage size, the unit is Gi. Value range [1, 10000]. (default 5) + --meta-storage-class string The StorageClass for Kafka Metadata Storage. + --mode string Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently. Legal values [combined, separated]. (default "combined") + --monitor-enable Enable monitor for Kafka. (default true) + --monitor.limit.cpu float (default 0.5) + --monitor.limit.memory float (default 1) + --monitor.replicas int Number of replicas for the monitor component. Value range [1]. (default 1) + --monitor.request.cpu float (default 0.1) + --monitor.request.memory float (default 0.2) + --node-labels stringToString Node label selector (default []) + --node-port-enabled advertised.listeners Whether to enable NodePort mode in Kafka's advertised.listeners + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of Kafka broker replicas for combined mode. Legal values [1, 3, 5]. (default 1) + --sasl-enable Enable authentication using SASL/PLAIN for Kafka. + --storage float Data Storage size, the unit is Gi. Value range [1, 10000]. (default 10) + --storage-class string The StorageClass for Kafka Data Storage. + --storage-enable Enable storage for Kafka. + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. (default "3.3.2") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_mongodb.mdx b/docs/zh/preview/cli/kbcli_cluster_create_mongodb.mdx new file mode 100644 index 00000000..96b9b695 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_mongodb.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster create mongodb +--- + +Create a mongodb cluster. + +``` +kbcli cluster create mongodb NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create mongodb + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create mongodb --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for mongodb + --hostnetwork string Legal values [enabled, disabled]. (default "enabled") + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, replicaset]. (default "standalone") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. Legal values [8.0.8, 8.0.6, 8.0.4, 7.0.19, 7.0.16, 7.0.12, 6.0.22, 6.0.20, 6.0.16, 5.0.30, 5.0.28, 4.4.29, 4.2.24, 4.0.28]. (default "6.0.16") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_mysql.mdx b/docs/zh/preview/cli/kbcli_cluster_create_mysql.mdx new file mode 100644 index 00000000..9cbe561e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_mysql.mdx @@ -0,0 +1,78 @@ +--- +title: kbcli cluster create mysql +--- + +Create a mysql cluster. + +``` +kbcli cluster create mysql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create mysql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create mysql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 1) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for mysql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 1) + --node-labels stringToString Node label selector (default []) + --orchestrator.cluster-service-selector.cluster-name string orchestrator cluster name for service selector + --orchestrator.cluster-service-selector.namespace string orchestrator cluster namespace for service selector + --orchestrator.service-reference.endpoint string Endpoint name of the service reference, format: : + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --proxysql.cpu float (default 1) + --proxysql.memory float Memory, the unit is Gi. (default 1) + --proxysql.replicas int (default 1) + --replicas int The number of replicas. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology string Topology type of the serve. Note that under the orc/orc-proxysql topology, it is necessary to specify the Orchestrator cluster information. You should choose either orchestrator.cluster-service-selector or orchestrator.service-reference. This means that depending on your setup, you will configure one of these options to properly integrate with the Orchestrator service for managing your MySQL cluster. Legal values [semisync, semisync-proxysql, mgr, mgr-proxysql, orc, orc-proxysql]. (default "semisync") + --topology-keys stringArray Topology keys for affinity + --version string MySQL version Legal values [8.0.39, 8.0.38, 8.0.37, 8.0.36, 8.4.2, 8.4.1, 8.4.0, 5.7.44]. (default "8.0.39") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_oriol.mdx b/docs/zh/preview/cli/kbcli_cluster_create_oriol.mdx new file mode 100644 index 00000000..dfe0d62f --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_oriol.mdx @@ -0,0 +1,79 @@ +--- +title: kbcli cluster create oriol +--- + +Create a oriol cluster. + +``` +kbcli cluster create oriol NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create oriol + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create oriol --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --availability-policy string The availability policy of cluster. Legal values [none, node, zone]. (default "node") + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --etcd.cluster string The patroni dependency etcd cluster name (default "etcd") + --etcd.namespace string The patroni dependency etcd cluster namespace (default "default") + -h, --help help for oriol + --host-network-accessible Specify whether the cluster can be accessed from within the VPC. + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Legal values [standalone, replication]. (default "standalone") + --monitor-enabled Enable or disable monitor. + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --publicly-accessible Specify whether the cluster can be accessed from the public internet. + --rbac-enabled Specify whether rbac resources will be created by client, otherwise KubeBlocks server will try to create rbac resources. + --replicas int Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string The tenancy of cluster. Legal values [SharedNode, DedicatedNode]. (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Halt, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string (default "orioledb-beta1") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_postgresql.mdx b/docs/zh/preview/cli/kbcli_cluster_create_postgresql.mdx new file mode 100644 index 00000000..8a17025e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_postgresql.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli cluster create postgresql +--- + +Create a postgresql cluster. + +``` +kbcli cluster create postgresql NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create postgresql + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create postgresql --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for postgresql + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replication mode, the default replicas is 2. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string service version. (default "15.7.0") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_qdrant.mdx b/docs/zh/preview/cli/kbcli_cluster_create_qdrant.mdx new file mode 100644 index 00000000..be0d2f7b --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_qdrant.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli cluster create qdrant +--- + +Create a qdrant cluster. + +``` +kbcli cluster create qdrant NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create qdrant + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create qdrant --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 1) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for qdrant + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 2) + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas. Value range [1, 16]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string The version of Qdrant. (default "1.10.0") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_rabbitmq.mdx b/docs/zh/preview/cli/kbcli_cluster_create_rabbitmq.mdx new file mode 100644 index 00000000..5becf0bb --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_rabbitmq.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster create rabbitmq +--- + +Create a rabbitmq cluster. + +``` +kbcli cluster create rabbitmq NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create rabbitmq + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create rabbitmq --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.1, 64]. (default 0.5) + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for rabbitmq + --memory float Memory, the unit is Gi. Value range [0.1, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [singlenode, clustermode]. (default "singlenode") + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replicaset mode, the default replicas is 3. Value range [1, 5]. (default 1) + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --version string Cluster version. Legal values [4.0.9, 3.13.7, 3.13.2, 3.12.14, 3.11.28, 3.10.25, 3.9.29, 3.8.14]. (default "3.13.7") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_redis.mdx b/docs/zh/preview/cli/kbcli_cluster_create_redis.mdx new file mode 100644 index 00000000..5c6e1817 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_redis.mdx @@ -0,0 +1,92 @@ +--- +title: kbcli cluster create redis +--- + +Create a redis cluster. + +``` +kbcli cluster create redis NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create redis + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create redis --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --cpu float CPU cores. Value range [0.5, 64]. (default 0.5) + --custom-secret-name string the secret must contain keys named 'username' and 'password' + --custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --disable-exporter Enable or disable monitor. (default true) + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for redis + --memory float Memory, the unit is Gi. Value range [0.5, 1000]. (default 0.5) + --mode string Cluster topology mode. Legal values [standalone, replication, cluster, replication-twemproxy]. (default "replication") + --node-labels stringToString Node label selector (default []) + --node-port-enabled Whether NodePort service is enabled, default is true + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --redis-cluster.custom-secret-name string the secret must contain keys named 'username' and 'password' + --redis-cluster.custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --redis-cluster.shard-count float The number of shards in the redis cluster Value range [3, 2048]. (default 3) + --replicas int The number of replicas, for standalone mode, the replicas is 1, for replication mode, the default replicas is 2. Value range [1, 5]. (default 1) + --sentinel.cpu float Sentinel component cpu cores. Value range [0.1, 8]. (default 0.2) + --sentinel.custom-master-name string Name of the master node monitored by Sentinel. If empty, a default value will be used. + --sentinel.custom-secret-name string the secret must contain keys named 'username' and 'password' + --sentinel.custom-secret-namespace string the secret must contain keys named 'username' and 'password' + --sentinel.enabled Whether have sentinel component, default is true (default true) + --sentinel.memory float Sentinel component memory, the unit is Gi. Value range [0.1, 4]. (default 0.2) + --sentinel.replicas float Sentinel component replicas Value range [1, 5]. (default 3) + --sentinel.storage float Sentinel component storage size, the unit is Gi. Value range [1, 1024]. (default 20) + --sentinel.storage-class-name string Sentinel component storage class name + --storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --storage-class-name string Storage class name of the data volume + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Delete, WipeOut]. (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + --twemproxy.cpu float twemproxy component cpu cores. Value range [0.1, 8]. (default 0.2) + --twemproxy.enabled Whether have twemproxy component, default is false + --twemproxy.memory float twemproxy component memory, the unit is Gi. Value range [0.1, 4]. (default 0.2) + --twemproxy.replicas float twemproxy component replicas Value range [1, 5]. (default 3) + --version string Cluster version. (default "7.2.7") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_create_tidb.mdx b/docs/zh/preview/cli/kbcli_cluster_create_tidb.mdx new file mode 100644 index 00000000..b6d711e0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_create_tidb.mdx @@ -0,0 +1,75 @@ +--- +title: kbcli cluster create tidb +--- + +Create a tidb cluster. + +``` +kbcli cluster create tidb NAME [flags] +``` + +### Examples + +``` + # Create a cluster with the default values + kbcli cluster create tidb + + # Create a cluster with the specified cpu, memory and storage + kbcli cluster create tidb --cpu 1 --memory 2 --storage 10 +``` + +### Options + +``` + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for tidb + --node-labels stringToString Node label selector (default []) + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pd.cpu float CPU cores. Value range [2, 64]. (default 2) + --pd.memory float Memory, the unit is Gi. Value range [4, 1000]. (default 4) + --pd.replicas int The number of replicas Value range [1, 5]. (default 3) + --pd.storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) (default "Preferred") + --tenancy string Tenancy options, one of: (SharedNode, DedicatedNode) (default "SharedNode") + --termination-policy string The termination policy of cluster. Legal values [DoNotTerminate, Halt, Delete, WipeOut]. (default "Delete") + --tidb.cpu float CPU cores. Value range [2, 64]. (default 2) + --tidb.replicas int The number of replicas Value range [1, 5]. (default 2) + --tikv.cpu float CPU cores. Value range [2, 64]. (default 2) + --tikv.memory float Memory, the unit is Gi. Value range [4, 1000]. (default 4) + --tikv.replicas int The number of replicas Value range [1, 5]. (default 3) + --tikv.storage float Storage size, the unit is Gi. Value range [1, 10000]. (default 20) + --tolerations strings Tolerations for cluster, such as "key=value:effect,key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' + --topology-keys stringArray Topology keys for affinity +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster create](kbcli_cluster_create.md) - Create a cluster. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops.mdx new file mode 100644 index 00000000..460432d1 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops.mdx @@ -0,0 +1,68 @@ +--- +title: kbcli cluster custom-ops +--- + + + +``` +kbcli cluster custom-ops OpsDef --cluster [flags] +``` + +### Examples + +``` + # custom ops cli format + kbcli cluster custom-ops --cluster + + # example for kafka topic + kbcli cluster custom-ops kafka-topic --cluster mycluster --type create --topic test --partition 3 --replicas 3 + + # example for kafka acl + kbcli cluster custom-ops kafka-user-acl --cluster mycluster --type add --operations "Read,Writer,Delete,Alter,Describe" --allowUsers client --topic "*" + + # example for kafka quota + kbcli cluster custom-ops kafka-quota --cluster mycluster --user client --producerByteRate 1024 --consumerByteRate 2048 +``` + +### Options + +``` + -h, --help help for custom-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster custom-ops kafka-quota](kbcli_cluster_custom-ops_kafka-quota.md) - Create a custom ops with opsDef kafka-quota +* [kbcli cluster custom-ops kafka-topic](kbcli_cluster_custom-ops_kafka-topic.md) - Create a custom ops with opsDef kafka-topic +* [kbcli cluster custom-ops kafka-user-acl](kbcli_cluster_custom-ops_kafka-user-acl.md) - Create a custom ops with opsDef kafka-user-acl +* [kbcli cluster custom-ops mogdb-switchover](kbcli_cluster_custom-ops_mogdb-switchover.md) - Create a custom ops with opsDef mogdb-switchover +* [kbcli cluster custom-ops post-rebuild-for-clickhouse](kbcli_cluster_custom-ops_post-rebuild-for-clickhouse.md) - Create a custom ops with opsDef post-rebuild-for-clickhouse +* [kbcli cluster custom-ops post-scale-out-shard-for-clickhouse](kbcli_cluster_custom-ops_post-scale-out-shard-for-clickhouse.md) - Create a custom ops with opsDef post-scale-out-shard-for-clickhouse + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx new file mode 100644 index 00000000..12cdc739 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-quota.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster custom-ops kafka-quota +--- + +Create a custom ops with opsDef kafka-quota + +``` +kbcli cluster custom-ops kafka-quota [flags] +``` + +### Examples + +``` + # Create a kafka-quota ops + kbcli cluster custom-ops kafka-quota --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --client string client id. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer-byte-rate int the maximum number of messages that can be consumed per second, measured in bytes/sec + --controller-mutation-rate int partition mutation quota to control the rate at which mutations are accepted for user requests. + --delete-quotas stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-quota + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --producer-byte-rate int the maximum number of messages that can be produced per second, measured in bytes/sec + --request-percentage int request percentage. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --user string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx new file mode 100644 index 00000000..7e1a8c10 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-topic.mdx @@ -0,0 +1,74 @@ +--- +title: kbcli cluster custom-ops kafka-topic +--- + +Create a custom ops with opsDef kafka-topic + +``` +kbcli cluster custom-ops kafka-topic [flags] +``` + +### Examples + +``` + # Create a kafka-topic ops + kbcli cluster custom-ops kafka-topic --component --topic= --type= +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --config string A topic configuration override for the topic being created or altered. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for kafka-topic + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --partitions int The number of partitions for the topic being created or altered (WARNING: + If partitions are increased for a topic that has a key, the partition logic or ordering + of the messages will be affected). If not supplied for create, defaults to the cluster default. + Value range [1, 10000]. + --replicas int The replication factor for each partition in the topic being + created. If not supplied, defaults to the cluster default. + Value range [1, 10]. + --topic string The topic to create, alter or delete. It also accepts a regular + expression, except for --create option. Put topic name in double quotes and + use the '\' prefix to escape regular expression symbols; e.g. "test\.topic". + + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string operation type, supports value: [create, alter, delete]. Legal values [create, alter, delete]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx new file mode 100644 index 00000000..cc0074e4 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_kafka-user-acl.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster custom-ops kafka-user-acl +--- + +Create a custom ops with opsDef kafka-user-acl + +``` +kbcli cluster custom-ops kafka-user-acl [flags] +``` + +### Examples + +``` + # Create a kafka-user-acl ops + kbcli cluster custom-ops kafka-user-acl --component --operations= --type= +``` + +### Options + +``` + --allow-hosts stringArray + --allow-users stringArray + --auto-approve Skip interactive approval before promote the instance + --cluster string Indicates to the script that the user is trying to interact with acls on the singular cluster resource. + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --consumer indicate to add or remove the acl of consumer. + --deny-hosts stringArray + --deny-users stringArray + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --group string consumer-group. + -h, --help help for kafka-user-acl + --name string OpsRequest name. if not specified, it will be randomly generated + --operations stringArray + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --pattern-type string Resource pattern type. + --producer indicate to add or remove the acl of producer. + --topic string topic name. + --transactional-id string The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string user name +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_mogdb-switchover.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_mogdb-switchover.mdx new file mode 100644 index 00000000..58bcce4b --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_mogdb-switchover.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster custom-ops mogdb-switchover +--- + +Create a custom ops with opsDef mogdb-switchover + +``` +kbcli cluster custom-ops mogdb-switchover [flags] +``` + +### Examples + +``` + # Create a mogdb-switchover ops + kbcli cluster custom-ops mogdb-switchover --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --candidate string candidate instance name(pod Name). if candidate is not empty, will promote it to primary. + otherwise promote a randomly selected pod to primary. + + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for mogdb-switchover + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --primary string old primary instance name(pod Name). + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-rebuild-for-clickhouse.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-rebuild-for-clickhouse.mdx new file mode 100644 index 00000000..1ebde8df --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-rebuild-for-clickhouse.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster custom-ops post-rebuild-for-clickhouse +--- + +Create a custom ops with opsDef post-rebuild-for-clickhouse + +``` +kbcli cluster custom-ops post-rebuild-for-clickhouse [flags] +``` + +### Examples + +``` + # Create a post-rebuild-for-clickhouse ops + kbcli cluster custom-ops post-rebuild-for-clickhouse --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for post-rebuild-for-clickhouse + --instance-name string specify the instance name which has been rebuild. + + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-scale-out-shard-for-clickhouse.mdx b/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-scale-out-shard-for-clickhouse.mdx new file mode 100644 index 00000000..6afb5029 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_custom-ops_post-scale-out-shard-for-clickhouse.mdx @@ -0,0 +1,61 @@ +--- +title: kbcli cluster custom-ops post-scale-out-shard-for-clickhouse +--- + +Create a custom ops with opsDef post-scale-out-shard-for-clickhouse + +``` +kbcli cluster custom-ops post-scale-out-shard-for-clickhouse [flags] +``` + +### Examples + +``` + # Create a post-scale-out-shard-for-clickhouse ops + kbcli cluster custom-ops post-scale-out-shard-for-clickhouse --component +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --component string Specify the component name of the cluster. if not specified, using the first component which referenced the defined componentDefinition. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for post-scale-out-shard-for-clickhouse + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster custom-ops](kbcli_cluster_custom-ops.md) - + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_delete-backup.mdx b/docs/zh/preview/cli/kbcli_cluster_delete-backup.mdx new file mode 100644 index 00000000..94099819 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_delete-backup.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli cluster delete-backup +--- + +Delete a backup. + +``` +kbcli cluster delete-backup [flags] +``` + +### Examples + +``` + # delete a backup named backup-name + kbcli cluster delete-backup cluster-name --name backup-name +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-backup + --name strings Backup names + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_delete-ops.mdx b/docs/zh/preview/cli/kbcli_cluster_delete-ops.mdx new file mode 100644 index 00000000..aefc6560 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_delete-ops.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster delete-ops +--- + +Delete an OpsRequest. + +``` +kbcli cluster delete-ops [flags] +``` + +### Examples + +``` + # delete all ops belong the specified cluster + kbcli cluster delete-ops mycluster + + # delete the specified ops belong the specify cluster + kbcli cluster delete-ops --name=mysql-restart-82zxv +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-ops + --name strings OpsRequest names + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_delete.mdx b/docs/zh/preview/cli/kbcli_cluster_delete.mdx new file mode 100644 index 00000000..435a7a89 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_delete.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster delete +--- + +Delete clusters. + +``` +kbcli cluster delete NAME [flags] +``` + +### Examples + +``` + # delete a cluster named mycluster + kbcli cluster delete mycluster + + # delete a cluster by label selector + kbcli cluster delete --selector clusterdefinition.kubeblocks.io/name=apecloud-mysql + + # delete a cluster named mycluster forcedly + kbcli cluster delete mycluster --force +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe-backup-policy.mdx b/docs/zh/preview/cli/kbcli_cluster_describe-backup-policy.mdx new file mode 100644 index 00000000..cd242872 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe-backup-policy.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli cluster describe-backup-policy +--- + +Describe backup policy + +``` +kbcli cluster describe-backup-policy [flags] +``` + +### Examples + +``` + # describe the default backup policy of the cluster + kbcli cluster describe-backup-policy cluster-name + + # describe the backup policy of the cluster with specified name + kbcli cluster describe-backup-policy cluster-name --name backup-policy-name +``` + +### Options + +``` + -h, --help help for describe-backup-policy + --names strings Backup policy names +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe-backup.mdx b/docs/zh/preview/cli/kbcli_cluster_describe-backup.mdx new file mode 100644 index 00000000..898aebd5 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe-backup.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli cluster describe-backup +--- + +Describe a backup. + +``` +kbcli cluster describe-backup BACKUP-NAME [flags] +``` + +### Examples + +``` + # describe backups of the cluster + kbcli cluster describe-backup + + # describe a backup + kbcli cluster describe-backup --names +``` + +### Options + +``` + -h, --help help for describe-backup + --names strings Backup names +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe-config.mdx b/docs/zh/preview/cli/kbcli_cluster_describe-config.mdx new file mode 100644 index 00000000..8c972748 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe-config.mdx @@ -0,0 +1,65 @@ +--- +title: kbcli cluster describe-config +--- + +Show details of a specific reconfiguring. + +``` +kbcli cluster describe-config [flags] +``` + +### Examples + +``` + # describe a cluster, e.g. cluster name is mycluster + kbcli cluster describe-config mycluster + + # describe a component, e.g. cluster name is mycluster, component name is mysql + kbcli cluster describe-config mycluster --component=mysql + + # describe all configuration files. + kbcli cluster describe-config mycluster --component=mysql --show-detail + + # describe a content of configuration file. + kbcli cluster describe-config mycluster --component=mysql --config-file=my.cnf --show-detail +``` + +### Options + +``` + --components strings Specify the name of Component to describe (e.g. for apecloud-mysql: --component=mysql). If the cluster has only one component, unset the parameter." + --config-file strings Specify the name of the configuration file to be describe (e.g. for mysql: --config-file=my.cnf). If unset, all files. + --config-specs strings Specify the name of the configuration template to describe. (e.g. for apecloud-mysql: --config-specs=mysql-3node-tpl) + -h, --help help for describe-config +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe-ops.mdx b/docs/zh/preview/cli/kbcli_cluster_describe-ops.mdx new file mode 100644 index 00000000..295a08f0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe-ops.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe-ops +--- + +Show details of a specific OpsRequest. + +``` +kbcli cluster describe-ops [flags] +``` + +### Examples + +``` + # describe a specified OpsRequest + kbcli cluster describe-ops mysql-restart-82zxv +``` + +### Options + +``` + -h, --help help for describe-ops +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe-restore.mdx b/docs/zh/preview/cli/kbcli_cluster_describe-restore.mdx new file mode 100644 index 00000000..047a8577 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe-restore.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe-restore +--- + +Describe a restore + +``` +kbcli cluster describe-restore NAME [flags] +``` + +### Examples + +``` + # describe a restore + kbcli cluster describe-restore +``` + +### Options + +``` + -h, --help help for describe-restore +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_describe.mdx b/docs/zh/preview/cli/kbcli_cluster_describe.mdx new file mode 100644 index 00000000..d3879cb7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster describe +--- + +Show details of a specific cluster. + +``` +kbcli cluster describe NAME [flags] +``` + +### Examples + +``` + # describe a specified cluster + kbcli cluster describe mycluster +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_diff-config.mdx b/docs/zh/preview/cli/kbcli_cluster_diff-config.mdx new file mode 100644 index 00000000..efd0217f --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_diff-config.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster diff-config +--- + +Show the difference in parameters between the two submitted OpsRequest. + +``` +kbcli cluster diff-config [flags] +``` + +### Examples + +``` + # compare config files + kbcli cluster diff-config opsrequest1 opsrequest2 +``` + +### Options + +``` + -h, --help help for diff-config +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_edit-backup-policy.mdx b/docs/zh/preview/cli/kbcli_cluster_edit-backup-policy.mdx new file mode 100644 index 00000000..eb3ca54e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_edit-backup-policy.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli cluster edit-backup-policy +--- + +Edit backup policy + +``` +kbcli cluster edit-backup-policy +``` + +### Examples + +``` + # edit backup policy + kbcli cluster edit-backup-policy +``` + +### Options + +``` + -h, --help help for edit-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_edit-config.mdx b/docs/zh/preview/cli/kbcli_cluster_edit-config.mdx new file mode 100644 index 00000000..1c60a7f7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_edit-config.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster edit-config +--- + +Edit the config file of the component. + +``` +kbcli cluster edit-config NAME [--components=component-name] [--config-spec=config-spec-name] [--config-file=config-file] [flags] +``` + +### Examples + +``` + # update mysql max_connections, cluster name is mycluster + kbcli cluster edit-config mycluster +``` + +### Options + +``` + --components strings Component names to this operations + --config-file string Specify the name of the configuration file to be updated (e.g. for mysql: --config-file=my.cnf). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --config-spec string Specify the name of the configuration template to be updated (e.g. for apecloud-mysql: --config-spec=mysql-3node-tpl). For available templates and configs, refer to: 'kbcli cluster describe-config'. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --enable-delete Boolean flag to enable delete configuration. Default with false. + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + --force-restart Boolean flag to restart component. Default with false. + -h, --help help for edit-config + --local-file string Specify the local configuration file to be updated. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replace Boolean flag to enable replacing config file. Default with false. + --set strings Specify parameters list to be updated. For more details, refer to 'kbcli cluster describe-config'. + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_explain-config.mdx b/docs/zh/preview/cli/kbcli_cluster_explain-config.mdx new file mode 100644 index 00000000..eb8ae49e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_explain-config.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster explain-config +--- + +List the constraint for supported configuration params. + +``` +kbcli cluster explain-config [flags] +``` + +### Examples + +``` + # explain a cluster, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster + + # explain a specified configure template, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --component=mysql --config-specs=mysql-3node-tpl + + # explain a specified configure template, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --component=mysql --config-specs=mysql-3node-tpl --trunc-document=false --trunc-enum=false + + # explain a specified parameters, e.g. cluster name is mycluster + kbcli cluster explain-config mycluster --param=sql_mode +``` + +### Options + +``` + --components strings Specify the name of Component to describe (e.g. for apecloud-mysql: --component=mysql). If the cluster has only one component, unset the parameter." + --config-specs strings Specify the name of the configuration template to describe. (e.g. for apecloud-mysql: --config-specs=mysql-3node-tpl) + -h, --help help for explain-config + --param string Specify the name of parameter to be query. It clearly display the details of the parameter. + --trunc-document If the document length of the parameter is greater than 100, it will be truncated. + --trunc-enum If the value list length of the parameter is greater than 20, it will be truncated. (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_expose.mdx b/docs/zh/preview/cli/kbcli_cluster_expose.mdx new file mode 100644 index 00000000..ac5cc1e6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_expose.mdx @@ -0,0 +1,71 @@ +--- +title: kbcli cluster expose +--- + +Expose a cluster with a new endpoint, the new endpoint can be found by executing 'kbcli cluster describe NAME'. + +``` +kbcli cluster expose NAME --enable=[true|false] --type=[intranet|internet] [flags] +``` + +### Examples + +``` + # Expose a cluster to intranet + kbcli cluster expose mycluster --type intranet --enable=true + + # Expose a cluster to public internet + kbcli cluster expose mycluster --type internet --enable=true + + # Stop exposing a cluster + kbcli cluster expose mycluster --type intranet --enable=false +``` + +### Options + +``` + --auto-approve Skip interactive approval before exposing the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --enable string Enable or disable the expose, values can be true or false + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for expose + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --role-selector roleSelector The Component's exposed Services may target replicas based on their roles using roleSelector, this flag must be set when the component specified has roles + --sub-type string Expose sub type, currently supported types are 'NodePort', 'LoadBalancer', only available if type is intranet (default "LoadBalancer") + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + --type string Expose type, currently supported types are 'intranet', 'internet' +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_label.mdx b/docs/zh/preview/cli/kbcli_cluster_label.mdx new file mode 100644 index 00000000..928139be --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_label.mdx @@ -0,0 +1,73 @@ +--- +title: kbcli cluster label +--- + +Update the labels on cluster + +``` +kbcli cluster label NAME [flags] +``` + +### Examples + +``` + # list label for clusters with specified name + kbcli cluster label mycluster --list + + # add label 'env' and value 'dev' for clusters with specified name + kbcli cluster label mycluster env=dev + + # add label 'env' and value 'dev' for all clusters + kbcli cluster label env=dev --all + + # add label 'env' and value 'dev' for the clusters that match the selector + kbcli cluster label env=dev -l type=mysql + + # update cluster with the label 'env' with value 'test', overwriting any existing value + kbcli cluster label mycluster --overwrite env=test + + # delete label env for clusters with specified name + kbcli cluster label mycluster env- +``` + +### Options + +``` + --all Select all cluster + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + -h, --help help for label + --list If true, display the labels of the clusters + --overwrite If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels. + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-backup-policies.mdx b/docs/zh/preview/cli/kbcli_cluster_list-backup-policies.mdx new file mode 100644 index 00000000..8708d98c --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-backup-policies.mdx @@ -0,0 +1,61 @@ +--- +title: kbcli cluster list-backup-policies +--- + +List backups policies. + +``` +kbcli cluster list-backup-policies [flags] +``` + +### Examples + +``` + # list all backup policies + kbcli cluster list-backup-policies + + # using short cmd to list backup policy of the specified cluster + kbcli cluster list-bp mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-backup-policies + --names strings The backup policy name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-backups.mdx b/docs/zh/preview/cli/kbcli_cluster_list-backups.mdx new file mode 100644 index 00000000..6931bb56 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-backups.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster list-backups +--- + +List backups. + +``` +kbcli cluster list-backups [flags] +``` + +### Examples + +``` + # list all backups + kbcli cluster list-backups + + # list all backups of the cluster + kbcli cluster list-backups + + # list the specified backups + kbcli cluster list-backups --names b1,b2 +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-backups + --names strings The backup name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-components.mdx b/docs/zh/preview/cli/kbcli_cluster_list-components.mdx new file mode 100644 index 00000000..0c321d5a --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-components.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-components +--- + +List cluster components. + +``` +kbcli cluster list-components [flags] +``` + +### Examples + +``` + # list all components of all clusters in current namespace + kbcli cluster list-components + + # list all components of a specified cluster + kbcli cluster list-components mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-components + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-events.mdx b/docs/zh/preview/cli/kbcli_cluster_list-events.mdx new file mode 100644 index 00000000..0f8e3a93 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-events.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-events +--- + +List cluster events. + +``` +kbcli cluster list-events [flags] +``` + +### Examples + +``` + # list all events of all clusters in current namespace + kbcli cluster list-events + + # list all events of a specified cluster + kbcli cluster list-events mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-events + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-instances.mdx b/docs/zh/preview/cli/kbcli_cluster_list-instances.mdx new file mode 100644 index 00000000..f54c2081 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-instances.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster list-instances +--- + +List cluster instances. + +``` +kbcli cluster list-instances [flags] +``` + +### Examples + +``` + # list all instances of all clusters in current namespace + kbcli cluster list-instances + + # list all instances of a specified cluster + kbcli cluster list-instances mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-instances + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-logs.mdx b/docs/zh/preview/cli/kbcli_cluster_list-logs.mdx new file mode 100644 index 00000000..83d62d82 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-logs.mdx @@ -0,0 +1,61 @@ +--- +title: kbcli cluster list-logs +--- + +List supported log files in cluster. + +``` +kbcli cluster list-logs NAME [flags] +``` + +### Examples + +``` + # Display supported log files in cluster mycluster with all instance + kbcli cluster list-logs mycluster + + # Display supported log files in cluster mycluster with specify component my-component + kbcli cluster list-logs mycluster --component my-component + + # Display supported log files in cluster mycluster with specify instance my-instance-0 + kbcli cluster list-logs mycluster --instance my-instance-0 +``` + +### Options + +``` + --component string Component name. + -h, --help help for list-logs + -i, --instance string Instance name. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-ops.mdx b/docs/zh/preview/cli/kbcli_cluster_list-ops.mdx new file mode 100644 index 00000000..c8498d16 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-ops.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster list-ops +--- + +List all opsRequests. + +``` +kbcli cluster list-ops [flags] +``` + +### Examples + +``` + # list all opsRequests + kbcli cluster list-ops + + # list all opsRequests of specified cluster + kbcli cluster list-ops mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-ops + --name string The OpsRequest name to get the details. + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status strings Options include all, pending, creating, running, canceling, failed. by default, outputs the pending/creating/running/canceling/failed OpsRequest. (default [pending,creating,running,canceling,failed]) + --type strings The OpsRequest type +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list-restores.mdx b/docs/zh/preview/cli/kbcli_cluster_list-restores.mdx new file mode 100644 index 00000000..5d93e28b --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list-restores.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster list-restores +--- + +List restores. + +``` +kbcli cluster list-restores [flags] +``` + +### Examples + +``` + # list all restores + kbcli cluster list-restores + + # list all restores of the cluster + kbcli cluster list-restores + + # list the specified restores + kbcli cluster list-restores --names r1,r2 +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list-restores + --names strings List restores in the specified cluster + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_list.mdx b/docs/zh/preview/cli/kbcli_cluster_list.mdx new file mode 100644 index 00000000..53f15f0e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_list.mdx @@ -0,0 +1,70 @@ +--- +title: kbcli cluster list +--- + +List clusters. + +``` +kbcli cluster list [NAME] [flags] +``` + +### Examples + +``` + # list all clusters + kbcli cluster list + + # list a single cluster with specified name + kbcli cluster list mycluster + + # list a single cluster in YAML output format + kbcli cluster list mycluster -o yaml + + # list a single cluster in JSON output format + kbcli cluster list mycluster -o json + + # list a single cluster in wide output format + kbcli cluster list mycluster -o wide +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) + --status string Filter objects by given status. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_logs.mdx b/docs/zh/preview/cli/kbcli_cluster_logs.mdx new file mode 100644 index 00000000..a445b8b6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_logs.mdx @@ -0,0 +1,92 @@ +--- +title: kbcli cluster logs +--- + +Access cluster log file. + +``` +kbcli cluster logs NAME [flags] +``` + +### Examples + +``` + # Return snapshot logs from cluster mycluster with default primary instance (stdout) + kbcli cluster logs mycluster + + # Display only the most recent 20 lines from cluster mycluster with default primary instance (stdout) + kbcli cluster logs mycluster --tail=20 + + # Display stdout info of specific instance my-instance-0 (cluster name comes from annotation app.kubernetes.io/instance) + kbcli cluster logs --instance my-instance-0 + + # Return snapshot logs from cluster mycluster with specific instance my-instance-0 (stdout) + kbcli cluster logs mycluster --instance my-instance-0 + + # Return snapshot logs from cluster mycluster with specific instance my-instance-0 and specific container + # my-container (stdout) + kbcli cluster logs mycluster --instance my-instance-0 -c my-container + + # Return slow logs from cluster mycluster with default primary instance + kbcli cluster logs mycluster --file-type=slow + + # Begin streaming the slow logs from cluster mycluster with default primary instance + kbcli cluster logs -f mycluster --file-type=slow + + # Return the specific file logs from cluster mycluster with specific instance my-instance-0 + kbcli cluster logs mycluster --instance my-instance-0 --file-path=/var/log/yum.log + + # Return the specific file logs from cluster mycluster with specific instance my-instance-0 and specific + # container my-container + kbcli cluster logs mycluster --instance my-instance-0 -c my-container --file-path=/var/log/yum.log +``` + +### Options + +``` + -c, --container string Container name. + --file-path string Log-file path. File path has a priority over file-type. When file-path and file-type are unset, output stdout/stderr of target container. + --file-type string Log-file type. List them with list-logs cmd. When file-path and file-type are unset, output stdout/stderr of target container. + -f, --follow Specify if the logs should be streamed. + -h, --help help for logs + --ignore-errors If watching / following pod logs, allow for any errors that occur to be non-fatal. Only take effect for stdout&stderr. + -i, --instance string Instance name. + --limit-bytes int Maximum bytes of logs to return. + --prefix Prefix each log line with the log source (pod name and container name). Only take effect for stdout&stderr. + -p, --previous If true, print the logs for the previous instance of the container in a pod if it exists. Only take effect for stdout&stderr. + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. Only take effect for stdout&stderr. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. Only take effect for stdout&stderr. + --tail int Lines of recent log file to display. Defaults to -1 for showing all log lines. (default -1) + --timestamps Include timestamps on each line in the log output. Only take effect for stdout&stderr. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_promote.mdx b/docs/zh/preview/cli/kbcli_cluster_promote.mdx new file mode 100644 index 00000000..ad6f60d2 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_promote.mdx @@ -0,0 +1,62 @@ +--- +title: kbcli cluster promote +--- + +Promote a non-primary or non-leader instance as the new primary or leader of the cluster + +``` +kbcli cluster promote NAME [--instance ] [flags] +``` + +### Examples + +``` + # Promote the instance mycluster-mysql-1 as the new primary or leader. + kbcli cluster promote mycluster --candidate mycluster-mysql-1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before promote the instance + --candidate string Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running "kbcli cluster list-instances" + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for promote + --instance string Specify the instance name that will transfer its role to the candidate pod, If not set, the current primary or leader of the cluster will be used. + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_rebuild-instance.mdx b/docs/zh/preview/cli/kbcli_cluster_rebuild-instance.mdx new file mode 100644 index 00000000..fd1a83b6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_rebuild-instance.mdx @@ -0,0 +1,75 @@ +--- +title: kbcli cluster rebuild-instance +--- + +Rebuild the specified instances in the cluster. + +``` +kbcli cluster rebuild-instance NAME [flags] +``` + +### Examples + +``` + # rebuild instance by creating new instances and remove the specified instances after the new instances are ready. + kbcli cluster rebuild-instance mycluster --instances pod1,pod2 + + # rebuild instance to a new node. + kbcli cluster rebuild-instance mycluster --instances pod1 --node nodeName. + + # rebuild instance with the same pod name. + kbcli cluster rebuild-instance mycluster --instances pod1 --in-place + + # rebuild instance from backup and with the same pod name + kbcli cluster rebuild-instance mycluster --instances pod1,pod2 --backupName --in-place +``` + +### Options + +``` + --auto-approve Skip interactive approval before rebuilding the instances.gi + --backup string instances will be rebuild by the specified backup. + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for rebuild-instance + --in-place rebuild the instance with the same pod name. if not set, will create a new instance by horizontalScaling and remove the instance after the new instance is ready + --instances strings instances which need to rebuild. + --name string OpsRequest name. if not specified, it will be randomly generated + --node strings specified the target node which rebuilds the instance on the node otherwise will rebuild on a random node. format: insName1=nodeName,insName2=nodeName + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --restore-env stringArray provide the necessary env for the 'Restore' operation from the backup. format: key1=value, key2=value + --source-backup-target string To rebuild a sharding component instance from a backup, you can specify the name of the source backup target + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_register.mdx b/docs/zh/preview/cli/kbcli_cluster_register.mdx new file mode 100644 index 00000000..4f04216d --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_register.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster register +--- + +Pull the cluster chart to the local cache and register the type to 'create' sub-command + +``` +kbcli cluster register [NAME] [flags] +``` + +### Examples + +``` + # Pull a cluster type to local and register it to "kbcli cluster create" sub-cmd from specified URL + kbcli cluster register orioledb --source https://github.com/apecloud/helm-charts/releases/download/orioledb-cluster-0.6.0-beta.44/orioledb-cluster-0.6.0-beta.44.tgz + + # Register a cluster type from a local path file + kbcli cluster register neon --source pkg/cli/cluster/charts/neon-cluster.tgz + + # Register a cluster type from a Helm repository, specifying the version and engine. + kbcli cluster register mysql --engine mysql --version 0.9.0 --repo https://jihulab.com/api/v4/projects/150246/packages/helm/stable +``` + +### Options + +``` + --alias string Set the cluster type alias + --engine string Specify the cluster chart name in helm repo + -h, --help help for register + --repo string Specify the url of helm repo which contains cluster charts (default "https://jihulab.com/api/v4/projects/150246/packages/helm/stable") + -S, --source string Specify the cluster type chart source, support a URL or a local file path + --version string Specify the version of cluster chart to register +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_restart.mdx b/docs/zh/preview/cli/kbcli_cluster_restart.mdx new file mode 100644 index 00000000..1bbbd728 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_restart.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster restart +--- + +Restart the specified components in the cluster. + +``` +kbcli cluster restart NAME [flags] +``` + +### Examples + +``` + # restart all components + kbcli cluster restart mycluster + + # specified component to restart, separate with commas for multiple components + kbcli cluster restart mycluster --components=mysql +``` + +### Options + +``` + --auto-approve Skip interactive approval before restarting the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for restart + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_restore.mdx b/docs/zh/preview/cli/kbcli_cluster_restore.mdx new file mode 100644 index 00000000..1d701860 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_restore.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster restore +--- + +Restore a new cluster from backup. + +``` +kbcli cluster restore [flags] +``` + +### Examples + +``` + # restore a new cluster from a backup + kbcli cluster restore new-cluster-name --backup backup-name +``` + +### Options + +``` + --backup string Backup name + --backup-namespace string Backup namespace + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + -h, --help help for restore + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --restore-after-cluster-running do the postReady phase when the cluster is Running rather than the component is Running. + --restore-key string specify the key to restore in kv database, support multiple keys split by comma with wildcard pattern matching + --restore-key-ignore-errors whether or not to ignore errors when restore kv database by keys + --restore-to-time string point in time recovery(PITR) + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_scale-in.mdx b/docs/zh/preview/cli/kbcli_cluster_scale-in.mdx new file mode 100644 index 00000000..b6d37547 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_scale-in.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster scale-in +--- + +scale in replicas of the specified components in the cluster. + +``` +kbcli cluster scale-in Replicas [flags] +``` + +### Examples + +``` + # scale in 2 replicas + kbcli cluster scale-in mycluster --components=mysql --replicas=2 + + # offline specified instances + kbcli cluster scale-in mycluster --components=mysql --offline-instances pod1 + + # scale in 2 replicas, one of them is specified by "--offline-instances". + kbcli cluster scale-out mycluster --components=mysql --replicas=2 --offline-instances pod1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before horizontally scaling the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for scale-in + --name string OpsRequest name. if not specified, it will be randomly generated + --offline-instances strings offline the specified instances + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas string Replicas with the specified components + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_scale-out.mdx b/docs/zh/preview/cli/kbcli_cluster_scale-out.mdx new file mode 100644 index 00000000..e737c8d6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_scale-out.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster scale-out +--- + +scale out replicas of the specified components in the cluster. + +``` +kbcli cluster scale-out Replicas [flags] +``` + +### Examples + +``` + # scale out 2 replicas + kbcli cluster scale-out mycluster --components=mysql --replicas=2 + + # to bring the offline instances specified in compSpec.offlineInstances online. + kbcli cluster scale-out mycluster --components=mysql --online-instances pod1 + + # scale out 2 replicas, one of which is an instance that has already been taken offline. + kbcli cluster scale-out mycluster --components=mysql --replicas=2 --online-instances pod1 +``` + +### Options + +``` + --auto-approve Skip interactive approval before horizontally scaling the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for scale-out + --name string OpsRequest name. if not specified, it will be randomly generated + --online-instances strings online the specified instances which have been offline + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --replicas string Replica changes with the specified components + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_start.mdx b/docs/zh/preview/cli/kbcli_cluster_start.mdx new file mode 100644 index 00000000..dbc15d91 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_start.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster start +--- + +Start the cluster if cluster is stopped. + +``` +kbcli cluster start NAME [flags] +``` + +### Examples + +``` + # start the cluster when cluster is stopped + kbcli cluster start mycluster + + # start the component of the cluster when cluster is stopped + kbcli cluster start mycluster --components=mysql +``` + +### Options + +``` + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for start + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_stop.mdx b/docs/zh/preview/cli/kbcli_cluster_stop.mdx new file mode 100644 index 00000000..2acbb525 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_stop.mdx @@ -0,0 +1,64 @@ +--- +title: kbcli cluster stop +--- + +Stop the cluster and release all the pods of the cluster. + +``` +kbcli cluster stop NAME [flags] +``` + +### Examples + +``` + # stop the cluster and release all the pods of the cluster + kbcli cluster stop mycluster + + # stop the component of the cluster and release all the pods of the component + kbcli cluster stop mycluster --components=mysql +``` + +### Options + +``` + --auto-approve Skip interactive approval before stopping the cluster + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for stop + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_update.mdx b/docs/zh/preview/cli/kbcli_cluster_update.mdx new file mode 100644 index 00000000..62077f10 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_update.mdx @@ -0,0 +1,103 @@ +--- +title: kbcli cluster update +--- + +Update the cluster settings, such as enable or disable monitor or log. + +``` +kbcli cluster update NAME [flags] +``` + +### Examples + +``` + # update cluster mycluster termination policy to Delete + kbcli cluster update mycluster --termination-policy=Delete + + # enable cluster monitor + kbcli cluster update mycluster --monitor=true + + # update cluster tolerations + kbcli cluster update mycluster --tolerations='"key=engineType,value=mongo,operator=Equal,effect=NoSchedule","key=diskType,value=ssd,operator=Equal,effect=NoSchedule"' + + # edit cluster + kbcli cluster update mycluster --edit + + # enable cluster monitor and edit + # kbcli cluster update mycluster --monitor=true --edit + + # enable cluster auto backup + kbcli cluster update mycluster --backup-enabled=true + + # update cluster backup retention period + kbcli cluster update mycluster --backup-retention-period=1d + + # update cluster backup method + kbcli cluster update mycluster --backup-method=snapshot + + # update cluster backup cron expression + kbcli cluster update mycluster --backup-cron-expression="0 0 * * *" + + # update cluster backup starting deadline minutes + kbcli cluster update mycluster --backup-starting-deadline-minutes=10 + + # update cluster backup repo name + kbcli cluster update mycluster --backup-repo-name=repo1 + + # update cluster backup pitr enabled + kbcli cluster update mycluster --pitr-enabled=true +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --backup-cron-expression string the cron expression for schedule, the timezone is in UTC. see https://en.wikipedia.org/wiki/Cron. + --backup-enabled Specify whether enabled automated backup + --backup-method string the backup method, view it by "kbcli cd describe ", if not specified, the default backup method will be to take snapshots of the volume + --backup-repo-name string the backup repository name + --backup-retention-period string a time string ending with the 'd'|'D'|'h'|'H' character to describe how long the Backup should be retained (default "1d") + --backup-starting-deadline-minutes int the deadline in minutes for starting the backup job if it misses its scheduled time for any reason + --disable-exporter Enable or disable monitoring (default true) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for update + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --pitr-enabled Specify whether enabled point in time recovery + --runtime-class-name string Specifies runtimeClassName for all Pods managed by this Cluster. + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. + --termination-policy string Termination policy, one of: (DoNotTerminate, Delete, WipeOut) (default "Delete") + --tolerations strings Tolerations for cluster, such as "key=value:effect, key:effect", for example '"engineType=mongo:NoSchedule", "diskType:NoSchedule"' +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_upgrade-to-v1.mdx b/docs/zh/preview/cli/kbcli_cluster_upgrade-to-v1.mdx new file mode 100644 index 00000000..1d91106f --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_upgrade-to-v1.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli cluster upgrade-to-v1 +--- + +upgrade cluster to v1 api version. + +``` +kbcli cluster upgrade-to-v1 [NAME] [flags] +``` + +### Examples + +``` + # upgrade a v1alpha1 cluster to v1 cluster + kbcli cluster upgrade-to-v1 mycluster + + # upgrade a v1alpha1 cluster with --dry-run + kbcli cluster upgrade-to-v1 mycluster --dry-run +``` + +### Options + +``` + --dry-run dry run mode + -h, --help help for upgrade-to-v1 + --no-diff only print the new cluster yaml +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_upgrade.mdx b/docs/zh/preview/cli/kbcli_cluster_upgrade.mdx new file mode 100644 index 00000000..72860af6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_upgrade.mdx @@ -0,0 +1,69 @@ +--- +title: kbcli cluster upgrade +--- + +Upgrade the service version(only support to upgrade minor version). + +``` +kbcli cluster upgrade NAME [flags] +``` + +### Examples + +``` + # upgrade the component to the target version + kbcli cluster upgrade mycluster --service-version=8.0.30 --components my-comp + + # upgrade the component with new component definition + kbcli cluster upgrade mycluster --component-def=8.0.30 --components my-comp + + # upgrade the component with new component definition and specified service version + kbcli cluster upgrade mycluster --component-def=8.0.30 --service-version=8.0.30 --components my-comp +``` + +### Options + +``` + --auto-approve Skip interactive approval before upgrading the cluster + --component-def string Referring to the ComponentDefinition (default "nil") + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for upgrade + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --service-version string Referring to the serviceVersion that is provided by ComponentDefinition and ComponentVersion (default "nil") + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_volume-expand.mdx b/docs/zh/preview/cli/kbcli_cluster_volume-expand.mdx new file mode 100644 index 00000000..43d86e0c --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_volume-expand.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli cluster volume-expand +--- + +Expand volume with the specified components and volumeClaimTemplates in the cluster. + +``` +kbcli cluster volume-expand NAME [flags] +``` + +### Examples + +``` + # restart specifies the component, separate with commas for multiple components + kbcli cluster volume-expand mycluster --components=mysql --volume-claim-templates=data --storage=10Gi +``` + +### Options + +``` + --auto-approve Skip interactive approval before expanding the cluster volume + --components strings Component names to this operations + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for volume-expand + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --storage string Volume storage size (required) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed + -t, --volume-claim-templates strings VolumeClaimTemplate names in components (required) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_cluster_vscale.mdx b/docs/zh/preview/cli/kbcli_cluster_vscale.mdx new file mode 100644 index 00000000..758d721a --- /dev/null +++ b/docs/zh/preview/cli/kbcli_cluster_vscale.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli cluster vscale +--- + +Vertically scale the specified components in the cluster. + +``` +kbcli cluster vscale NAME [flags] +``` + +### Examples + +``` + # scale the computing resources of specified components, separate with commas for multiple components + kbcli cluster vscale mycluster --components=mysql --cpu=500m --memory=500Mi + + # scale the computing resources of instance template, separate with commas for multiple components + kbcli cluster vscale mycluster --components=mysql --cpu=500m --memory=500Mi --instance-tpl default +``` + +### Options + +``` + --auto-approve Skip interactive approval before vertically scaling the cluster + --components strings Component names to this operations + --cpu string Request and limit size of component cpu + --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") + --edit Edit the API resource before creating + --force skip the pre-checks of the opsRequest to run the opsRequest forcibly + -h, --help help for vscale + --instance-tpl strings vertically scaling the specified instance template in the specified component + --memory string Request and limit size of component memory + --name string OpsRequest name. if not specified, it will be randomly generated + -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) + --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli cluster](kbcli_cluster.md) - Cluster command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_clusterdefinition.mdx b/docs/zh/preview/cli/kbcli_clusterdefinition.mdx new file mode 100644 index 00000000..b5a28772 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_clusterdefinition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli clusterdefinition +--- + +ClusterDefinition command. + +### Options + +``` + -h, --help help for clusterdefinition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli clusterdefinition describe](kbcli_clusterdefinition_describe.md) - Describe ClusterDefinition. +* [kbcli clusterdefinition list](kbcli_clusterdefinition_list.md) - List ClusterDefinitions. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_clusterdefinition_describe.mdx b/docs/zh/preview/cli/kbcli_clusterdefinition_describe.mdx new file mode 100644 index 00000000..12da1191 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_clusterdefinition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli clusterdefinition describe +--- + +Describe ClusterDefinition. + +``` +kbcli clusterdefinition describe [flags] +``` + +### Examples + +``` + # describe a specified cluster definition + kbcli clusterdefinition describe myclusterdef +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_clusterdefinition_list.mdx b/docs/zh/preview/cli/kbcli_clusterdefinition_list.mdx new file mode 100644 index 00000000..6a4459db --- /dev/null +++ b/docs/zh/preview/cli/kbcli_clusterdefinition_list.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli clusterdefinition list +--- + +List ClusterDefinitions. + +``` +kbcli clusterdefinition list [flags] +``` + +### Examples + +``` + # list all ClusterDefinitions + kbcli clusterdefinition list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli clusterdefinition](kbcli_clusterdefinition.md) - ClusterDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentdefinition.mdx b/docs/zh/preview/cli/kbcli_componentdefinition.mdx new file mode 100644 index 00000000..66476b89 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentdefinition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli componentdefinition +--- + +ComponentDefinition command. + +### Options + +``` + -h, --help help for componentdefinition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli componentdefinition describe](kbcli_componentdefinition_describe.md) - Describe ComponentDefinition. +* [kbcli componentdefinition list](kbcli_componentdefinition_list.md) - List ComponentDefinition. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentdefinition_describe.mdx b/docs/zh/preview/cli/kbcli_componentdefinition_describe.mdx new file mode 100644 index 00000000..7c191f54 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentdefinition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli componentdefinition describe +--- + +Describe ComponentDefinition. + +``` +kbcli componentdefinition describe [flags] +``` + +### Examples + +``` + # describe a specified component definition + kbcli componentdefinition describe mycomponentdef +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentdefinition_list.mdx b/docs/zh/preview/cli/kbcli_componentdefinition_list.mdx new file mode 100644 index 00000000..9fa47063 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentdefinition_list.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli componentdefinition list +--- + +List ComponentDefinition. + +``` +kbcli componentdefinition list [flags] +``` + +### Examples + +``` + # list all ComponentDefinitions + kbcli componentdefinition list + + # list all ComponentDefinitions by alias + kbcli cmpd list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentdefinition](kbcli_componentdefinition.md) - ComponentDefinition command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentversion.mdx b/docs/zh/preview/cli/kbcli_componentversion.mdx new file mode 100644 index 00000000..0e9c3152 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentversion.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli componentversion +--- + +ComponentVersions command. + +### Options + +``` + -h, --help help for componentversion +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli componentversion describe](kbcli_componentversion_describe.md) - Describe ComponentVersion. +* [kbcli componentversion list](kbcli_componentversion_list.md) - List ComponentVersion. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentversion_describe.mdx b/docs/zh/preview/cli/kbcli_componentversion_describe.mdx new file mode 100644 index 00000000..2e65ea6f --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentversion_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli componentversion describe +--- + +Describe ComponentVersion. + +``` +kbcli componentversion describe [flags] +``` + +### Examples + +``` + # describe a specified componentversion + kbcli componentversion describe mycomponentversion +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_componentversion_list.mdx b/docs/zh/preview/cli/kbcli_componentversion_list.mdx new file mode 100644 index 00000000..6bf3dec0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_componentversion_list.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli componentversion list +--- + +List ComponentVersion. + +``` +kbcli componentversion list [flags] +``` + +### Examples + +``` + # list all ComponentVersions + kbcli componentversion list + + # list all ComponentVersions by alias + kbcli cmpv list +``` + +### Options + +``` + -h, --help help for list + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli componentversion](kbcli_componentversion.md) - ComponentVersions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection.mdx b/docs/zh/preview/cli/kbcli_dataprotection.mdx new file mode 100644 index 00000000..7d23e182 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli dataprotection +--- + +Data protection command. + +### Options + +``` + -h, --help help for dataprotection +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli dataprotection backup](kbcli_dataprotection_backup.md) - Create a backup for the cluster. +* [kbcli dataprotection delete-backup](kbcli_dataprotection_delete-backup.md) - Delete a backup. +* [kbcli dataprotection describe-backup](kbcli_dataprotection_describe-backup.md) - Describe a backup +* [kbcli dataprotection describe-backup-policy](kbcli_dataprotection_describe-backup-policy.md) - Describe a backup policy +* [kbcli dataprotection describe-restore](kbcli_dataprotection_describe-restore.md) - Describe a restore +* [kbcli dataprotection edit-backup-policy](kbcli_dataprotection_edit-backup-policy.md) - Edit backup policy +* [kbcli dataprotection list-action-sets](kbcli_dataprotection_list-action-sets.md) - List actionsets +* [kbcli dataprotection list-backup-policies](kbcli_dataprotection_list-backup-policies.md) - List backup policies +* [kbcli dataprotection list-backup-policy-templates](kbcli_dataprotection_list-backup-policy-templates.md) - List backup policy templates +* [kbcli dataprotection list-backups](kbcli_dataprotection_list-backups.md) - List backups. +* [kbcli dataprotection list-restores](kbcli_dataprotection_list-restores.md) - List restores. +* [kbcli dataprotection restore](kbcli_dataprotection_restore.md) - Restore a new cluster from backup + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_backup.mdx b/docs/zh/preview/cli/kbcli_dataprotection_backup.mdx new file mode 100644 index 00000000..5608cb7d --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_backup.mdx @@ -0,0 +1,67 @@ +--- +title: kbcli dataprotection backup +--- + +Create a backup for the cluster. + +``` +kbcli dataprotection backup NAME [flags] +``` + +### Examples + +``` + # Create a backup for the cluster, use the default backup policy and volume snapshot backup method + kbcli dp backup mybackup --cluster mycluster + + # create a backup with a specified method, run "kbcli cluster desc-backup-policy mycluster" to show supported backup methods + kbcli dp backup mybackup --cluster mycluster --method mymethod + + # create a backup with specified backup policy, run "kbcli cluster list-backup-policies mycluster" to show the cluster supported backup policies + kbcli dp backup mybackup --cluster mycluster --policy mypolicy + + # create a backup from a parent backup + kbcli dp backup mybackup --cluster mycluster --parent-backup myparentbackup +``` + +### Options + +``` + --cluster string Cluster name + --deletion-policy string Deletion policy for backup, determine whether the backup content in backup repo will be deleted after the backup is deleted, supported values: [Delete, Retain] (default "Delete") + -h, --help help for backup + --method string Backup methods are defined in backup policy (required), if only one backup method in backup policy, use it as default backup method, if multiple backup methods in backup policy, use method which volume snapshot is true as default backup method + --parent-backup string Parent backup name, used for incremental backup + --policy string Backup policy name, if not specified, use the cluster default backup policy + --retention-period string Retention period for backup, supported values: [1y, 1mo, 1d, 1h, 1m] or combine them [1y1mo1d1h1m], if not specified, the backup will not be automatically deleted, you need to manually delete it. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_delete-backup.mdx b/docs/zh/preview/cli/kbcli_dataprotection_delete-backup.mdx new file mode 100644 index 00000000..4e694c84 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_delete-backup.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli dataprotection delete-backup +--- + +Delete a backup. + +``` +kbcli dataprotection delete-backup [flags] +``` + +### Examples + +``` + # delete a backup + kbcli dp delete-backup mybackup +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --auto-approve Skip interactive approval before deleting + --cluster string The cluster name. + --force If true, immediately remove resources from API and bypass graceful deletion. Note that immediate deletion of some resources may result in inconsistency or data loss and requires confirmation. + --grace-period int Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. Set to 1 for immediate shutdown. Can only be set to 0 when --force is true (force deletion). (default -1) + -h, --help help for delete-backup + --now If true, resources are signaled for immediate shutdown (same as --grace-period=1). + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_describe-backup-policy.mdx b/docs/zh/preview/cli/kbcli_dataprotection_describe-backup-policy.mdx new file mode 100644 index 00000000..9cc91abd --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_describe-backup-policy.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection describe-backup-policy +--- + +Describe a backup policy + +``` +kbcli dataprotection describe-backup-policy [flags] +``` + +### Examples + +``` + # describe the default backup policy of the cluster + kbcli dp describe-backup-policy cluster-name + + # describe the backup policy of the cluster with specified name + kbcli dp describe-backup-policy cluster-name --name backup-policy-name +``` + +### Options + +``` + -h, --help help for describe-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_describe-backup.mdx b/docs/zh/preview/cli/kbcli_dataprotection_describe-backup.mdx new file mode 100644 index 00000000..5fb493ac --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_describe-backup.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection describe-backup +--- + +Describe a backup + +``` +kbcli dataprotection describe-backup NAME [flags] +``` + +### Examples + +``` + # describe a backup + kbcli dp describe-backup mybackup +``` + +### Options + +``` + -h, --help help for describe-backup +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_describe-restore.mdx b/docs/zh/preview/cli/kbcli_dataprotection_describe-restore.mdx new file mode 100644 index 00000000..474db84a --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_describe-restore.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection describe-restore +--- + +Describe a restore + +``` +kbcli dataprotection describe-restore NAME [flags] +``` + +### Examples + +``` + # describe a restore + kbcli dp describe-restore +``` + +### Options + +``` + -h, --help help for describe-restore +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_edit-backup-policy.mdx b/docs/zh/preview/cli/kbcli_dataprotection_edit-backup-policy.mdx new file mode 100644 index 00000000..6a77bf01 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_edit-backup-policy.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli dataprotection edit-backup-policy +--- + +Edit backup policy + +``` +kbcli dataprotection edit-backup-policy +``` + +### Examples + +``` + # edit backup policy + kbcli dp edit-backup-policy +``` + +### Options + +``` + -h, --help help for edit-backup-policy +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_list-action-sets.mdx b/docs/zh/preview/cli/kbcli_dataprotection_list-action-sets.mdx new file mode 100644 index 00000000..d0e6d556 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_list-action-sets.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection list-action-sets +--- + +List actionsets + +``` +kbcli dataprotection list-action-sets [flags] +``` + +### Examples + +``` + # list all action sets + kbcli dp list-as +``` + +### Options + +``` + -h, --help help for list-action-sets + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policies.mdx b/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policies.mdx new file mode 100644 index 00000000..ab169b4e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policies.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli dataprotection list-backup-policies +--- + +List backup policies + +``` +kbcli dataprotection list-backup-policies [flags] +``` + +### Examples + +``` + # list all backup policies + kbcli dp list-backup-policies + + # using short cmd to list backup policy of the specified cluster + kbcli dp list-bp mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string The cluster name + -h, --help help for list-backup-policies + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policy-templates.mdx b/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policy-templates.mdx new file mode 100644 index 00000000..96d3bda7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_list-backup-policy-templates.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli dataprotection list-backup-policy-templates +--- + +List backup policy templates + +``` +kbcli dataprotection list-backup-policy-templates [flags] +``` + +### Examples + +``` + # list all backup policy template + kbcli dp list-bpt +``` + +### Options + +``` + -h, --help help for list-backup-policy-templates + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_list-backups.mdx b/docs/zh/preview/cli/kbcli_dataprotection_list-backups.mdx new file mode 100644 index 00000000..5081f50e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_list-backups.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli dataprotection list-backups +--- + +List backups. + +``` +kbcli dataprotection list-backups [flags] +``` + +### Examples + +``` + # list all backups + kbcli dp list-backups + + # list all backups of specified cluster + kbcli dp list-backups --cluster mycluster +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string List backups in the specified cluster + -h, --help help for list-backups + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_list-restores.mdx b/docs/zh/preview/cli/kbcli_dataprotection_list-restores.mdx new file mode 100644 index 00000000..989702d6 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_list-restores.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli dataprotection list-restores +--- + +List restores. + +``` +kbcli dataprotection list-restores [flags] +``` + +### Examples + +``` + # list all restores + kbcli dp list-restores +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + --cluster string List restores in the specified cluster + -h, --help help for list-restores + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_dataprotection_restore.mdx b/docs/zh/preview/cli/kbcli_dataprotection_restore.mdx new file mode 100644 index 00000000..596e49b7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_dataprotection_restore.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli dataprotection restore +--- + +Restore a new cluster from backup + +``` +kbcli dataprotection restore [flags] +``` + +### Examples + +``` + # restore a new cluster from a backup + kbcli dp restore mybackup --cluster cluster-name +``` + +### Options + +``` + --cluster string The cluster to restore + -h, --help help for restore + --restore-key string specify the key to restore in kv database, support multiple keys split by comma with wildcard pattern matching + --restore-key-ignore-errors whether or not to ignore errors when restore kv database by keys + --restore-to-time string point in time recovery(PITR) + --volume-restore-policy string the volume claim restore policy, supported values: [Serial, Parallel] (default "Parallel") +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli dataprotection](kbcli_dataprotection.md) - Data protection command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks.mdx b/docs/zh/preview/cli/kbcli_kubeblocks.mdx new file mode 100644 index 00000000..c3089942 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks.mdx @@ -0,0 +1,51 @@ +--- +title: kbcli kubeblocks +--- + +KubeBlocks operation commands. + +### Options + +``` + -h, --help help for kubeblocks +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli kubeblocks compare](kbcli_kubeblocks_compare.md) - List the changes between two different version KubeBlocks. +* [kbcli kubeblocks config](kbcli_kubeblocks_config.md) - KubeBlocks config. +* [kbcli kubeblocks describe-config](kbcli_kubeblocks_describe-config.md) - Describe KubeBlocks config. +* [kbcli kubeblocks install](kbcli_kubeblocks_install.md) - Install KubeBlocks. +* [kbcli kubeblocks list-versions](kbcli_kubeblocks_list-versions.md) - List KubeBlocks versions. +* [kbcli kubeblocks preflight](kbcli_kubeblocks_preflight.md) - Run and retrieve preflight checks for KubeBlocks. +* [kbcli kubeblocks status](kbcli_kubeblocks_status.md) - Show list of resource KubeBlocks uses or owns. +* [kbcli kubeblocks uninstall](kbcli_kubeblocks_uninstall.md) - Uninstall KubeBlocks. +* [kbcli kubeblocks upgrade](kbcli_kubeblocks_upgrade.md) - Upgrade KubeBlocks. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_compare.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_compare.mdx new file mode 100644 index 00000000..cdc4078b --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_compare.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli kubeblocks compare +--- + +List the changes between two different version KubeBlocks. + +``` +kbcli kubeblocks compare version [OTHER-VERSION] [flags] +``` + +### Examples + +``` + # compare installed KubeBlocks with specified version + kbcli kubeblocks compare 0.4.0 + + # compare two specified KubeBlocks version + kbcli kubeblocks compare 0.4.0 0.5.0 +``` + +### Options + +``` + --details show the different details between two kubeblocks version + -h, --help help for compare +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_config.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_config.mdx new file mode 100644 index 00000000..9c601c63 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_config.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli kubeblocks config +--- + +KubeBlocks config. + +``` +kbcli kubeblocks config [flags] +``` + +### Examples + +``` + # Enable the snapshot-controller and volume snapshot, to support snapshot backup. + kbcli kubeblocks config --set snapshot-controller.enabled=true +``` + +### Options + +``` + -h, --help help for config + -n, --namespace string KubeBlocks namespace + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_describe-config.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_describe-config.mdx new file mode 100644 index 00000000..1e51a58e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_describe-config.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli kubeblocks describe-config +--- + +Describe KubeBlocks config. + +``` +kbcli kubeblocks describe-config [flags] +``` + +### Examples + +``` + # Describe the KubeBlocks config. + kbcli kubeblocks describe-config + # Describe all the KubeBlocks configs + kbcli kubeblocks describe-config --all + # Describe the desired KubeBlocks configs by filter conditions + kbcli kubeblocks describe-config --filter=addonController,affinity +``` + +### Options + +``` + -A, --all show all kubeblocks configs value + --filter string filter the desired kubeblocks configs, multiple filtered strings are comma separated + -h, --help help for describe-config + -n, --namespace string KubeBlocks namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_install.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_install.mdx new file mode 100644 index 00000000..75a861ae --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_install.mdx @@ -0,0 +1,81 @@ +--- +title: kbcli kubeblocks install +--- + +Install KubeBlocks. + +``` +kbcli kubeblocks install [flags] +``` + +### Examples + +``` + # Install KubeBlocks, the default version is same with the kbcli version, the default namespace is kb-system + kbcli kubeblocks install + + # Install KubeBlocks with specified version + kbcli kubeblocks install --version=0.4.0 + + # Install KubeBlocks with ignoring preflight checks + kbcli kubeblocks install --force + + # Install KubeBlocks with specified namespace, if the namespace is not present, it will be created + kbcli kubeblocks install --namespace=my-namespace --create-namespace + + # Install KubeBlocks with other settings, for example, set replicaCount to 3 + kbcli kubeblocks install --set replicaCount=3 +``` + +### Options + +``` + --check Check kubernetes environment before installation (default true) + --create-namespace Create the namespace if not present + --force If present, just print fail item and continue with the following steps + -h, --help help for install + -n, --namespace string KubeBlocks namespace (default "kb-system") + --node-labels stringToString Node label selector (default []) + --pod-anti-affinity string Pod anti-affinity type, one of: (Preferred, Required) + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout duration Time to wait for installing KubeBlocks, such as --timeout=10m (default 30m0s) + --tolerations strings Tolerations for Kubeblocks, such as '"dev=true:NoSchedule,large=true:NoSchedule"' + --topology-keys stringArray Topology keys for affinity + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) + --version string KubeBlocks version + --wait Wait for KubeBlocks to be ready, including all the auto installed add-ons. It will wait for a --timeout period (default true) + --wait-addons Wait for auto installed add-ons. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_list-versions.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_list-versions.mdx new file mode 100644 index 00000000..875259da --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_list-versions.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli kubeblocks list-versions +--- + +List KubeBlocks versions. + +``` +kbcli kubeblocks list-versions [flags] +``` + +### Examples + +``` + # list KubeBlocks release versions + kbcli kubeblocks list-versions + + # list KubeBlocks versions including development versions, such as alpha, beta and release candidate + kbcli kubeblocks list-versions --devel +``` + +### Options + +``` + --devel Use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. + -h, --help help for list-versions + --limit int Maximum rows of versions to return, 0 means no limit (default 10) (default 10) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_preflight.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_preflight.mdx new file mode 100644 index 00000000..93abf5bc --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_preflight.mdx @@ -0,0 +1,72 @@ +--- +title: kbcli kubeblocks preflight +--- + +Run and retrieve preflight checks for KubeBlocks. + +``` +kbcli kubeblocks preflight [flags] +``` + +### Examples + +``` + # Run preflight provider checks against the default rules automatically + kbcli kubeblocks preflight + + # Run preflight provider checks and output more verbose info + kbcli kubeblocks preflight --verbose + + # Run preflight checks against the customized rules of preflight-check.yaml + kbcli kubeblocks preflight preflight-check.yaml + + # Run preflight checks and display AnalyzeResults with interactive mode + kbcli kubeblocks preflight preflight-check.yaml --interactive=true +``` + +### Options + +``` + --collect-without-permissions always run preflight checks even if some required permissions that preflight does not have (default true) + --collector-image string the full name of the collector image to use + --collector-pullpolicy string the pull policy of the collector image + --debug enable debug logging + --format string output format, one of json, yaml. only used when interactive is set to false, default format is yaml (default "yaml") + -h, --help help for preflight + -n, --namespace string If present, the namespace scope for this CLI request + -o, --output string specify the output file path for the preflight checks + --selector string selector (label query) to filter remote collection nodes on. + --since string force pod logs collectors to return logs newer than a relative duration like 5s, 2m, or 3h. + --since-time string force pod logs collectors to return logs after a specific date (RFC3339) + --verbose print more verbose logs, default value is false +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_status.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_status.mdx new file mode 100644 index 00000000..dce44bb0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_status.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli kubeblocks status +--- + +Show list of resource KubeBlocks uses or owns. + +``` +kbcli kubeblocks status [flags] +``` + +### Examples + +``` + # list workloads owned by KubeBlocks + kbcli kubeblocks status + + # list all resources owned by KubeBlocks, such as workloads, cluster definitions, backup template. + kbcli kubeblocks status --all +``` + +### Options + +``` + -A, --all Show all resources, including configurations, storages, etc + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_uninstall.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_uninstall.mdx new file mode 100644 index 00000000..b9f33392 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_uninstall.mdx @@ -0,0 +1,59 @@ +--- +title: kbcli kubeblocks uninstall +--- + +Uninstall KubeBlocks. + +``` +kbcli kubeblocks uninstall [flags] +``` + +### Examples + +``` + # uninstall KubeBlocks + kbcli kubeblocks uninstall +``` + +### Options + +``` + --auto-approve Skip interactive approval before uninstalling KubeBlocks + -h, --help help for uninstall + -n, --namespace string KubeBlocks namespace + --remove-namespace Remove default created "kb-system" namespace or not + --remove-pvcs Remove PersistentVolumeClaim or not + --remove-pvs Remove PersistentVolume or not + --timeout duration Time to wait for uninstalling KubeBlocks, such as --timeout=5m (default 10m0s) + --wait Wait for KubeBlocks to be uninstalled, including all the add-ons. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_kubeblocks_upgrade.mdx b/docs/zh/preview/cli/kbcli_kubeblocks_upgrade.mdx new file mode 100644 index 00000000..f31d2edf --- /dev/null +++ b/docs/zh/preview/cli/kbcli_kubeblocks_upgrade.mdx @@ -0,0 +1,66 @@ +--- +title: kbcli kubeblocks upgrade +--- + +Upgrade KubeBlocks. + +``` +kbcli kubeblocks upgrade [flags] +``` + +### Examples + +``` + # Upgrade KubeBlocks to specified version + kbcli kubeblocks upgrade --version=0.4.0 + + # Upgrade KubeBlocks other settings, for example, set replicaCount to 3 + kbcli kubeblocks upgrade --set replicaCount=3 +``` + +### Options + +``` + --auto-approve Skip interactive approval before upgrading KubeBlocks + --check Check kubernetes environment before upgrade (default true) + -h, --help help for upgrade + -n, --namespace string KubeBlocks namespace + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2) + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2) + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --timeout duration Time to wait for upgrading KubeBlocks, such as --timeout=10m (default 30m0s) + -f, --values strings Specify values in a YAML file or a URL (can specify multiple) + --version string Set KubeBlocks version + --wait Wait for KubeBlocks to be ready. It will wait for a --timeout period (default true) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli kubeblocks](kbcli_kubeblocks.md) - KubeBlocks operation commands. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_ops-definition.mdx b/docs/zh/preview/cli/kbcli_ops-definition.mdx new file mode 100644 index 00000000..a7b92725 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_ops-definition.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli ops-definition +--- + +ops-definitions command. + +### Options + +``` + -h, --help help for ops-definition +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli ops-definition describe](kbcli_ops-definition_describe.md) - Describe OpsDefinition. +* [kbcli ops-definition list](kbcli_ops-definition_list.md) - List OpsDefinition. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_ops-definition_describe.mdx b/docs/zh/preview/cli/kbcli_ops-definition_describe.mdx new file mode 100644 index 00000000..f61dcc33 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_ops-definition_describe.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli ops-definition describe +--- + +Describe OpsDefinition. + +``` +kbcli ops-definition describe [flags] +``` + +### Examples + +``` + # describe a specified ops-definition + kbcli ops-definition describe my-ops-definition +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_ops-definition_list.mdx b/docs/zh/preview/cli/kbcli_ops-definition_list.mdx new file mode 100644 index 00000000..831bad01 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_ops-definition_list.mdx @@ -0,0 +1,60 @@ +--- +title: kbcli ops-definition list +--- + +List OpsDefinition. + +``` +kbcli ops-definition list [flags] +``` + +### Examples + +``` + # list all ops-definitions + kbcli ops-definition list + + # list all ops-definitions by alias + kbcli ops-def list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli ops-definition](kbcli_ops-definition.md) - ops-definitions command. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_options.mdx b/docs/zh/preview/cli/kbcli_options.mdx new file mode 100644 index 00000000..38a4a122 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_options.mdx @@ -0,0 +1,54 @@ +--- +title: kbcli options +--- + +Print the list of flags inherited by all commands. + +``` +kbcli options [flags] +``` + +### Examples + +``` + + # Print flags inherited by all commands + kbcli options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_playground.mdx b/docs/zh/preview/cli/kbcli_playground.mdx new file mode 100644 index 00000000..c6414a79 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_playground.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli playground +--- + +Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +### Options + +``` + -h, --help help for playground +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli playground destroy](kbcli_playground_destroy.md) - Destroy the playground KubeBlocks and kubernetes cluster. +* [kbcli playground init](kbcli_playground_init.md) - Bootstrap a kubernetes cluster and install KubeBlocks for playground. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_playground_destroy.mdx b/docs/zh/preview/cli/kbcli_playground_destroy.mdx new file mode 100644 index 00000000..9f9f78d1 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_playground_destroy.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli playground destroy +--- + +Destroy the playground KubeBlocks and kubernetes cluster. + +``` +kbcli playground destroy [flags] +``` + +### Examples + +``` + # destroy playground cluster + kbcli playground destroy +``` + +### Options + +``` + --auto-approve Skip interactive approval before destroying the playground + -h, --help help for destroy + --purge Purge all resources before destroying kubernetes cluster, delete all clusters created by KubeBlocks and uninstall KubeBlocks. (default true) + --timeout duration Time to wait for destroying KubeBlocks, such as --timeout=10m (default 10m0s) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_playground_init.mdx b/docs/zh/preview/cli/kbcli_playground_init.mdx new file mode 100644 index 00000000..9e2924b2 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_playground_init.mdx @@ -0,0 +1,87 @@ +--- +title: kbcli playground init +--- + +Bootstrap a kubernetes cluster and install KubeBlocks for playground. + +### Synopsis + +Bootstrap a kubernetes cluster and install KubeBlocks for playground. + + If no cloud provider is specified, a k3d cluster named kb-playground will be created on local host, otherwise a kubernetes cluster will be created on the specified cloud. Then KubeBlocks will be installed on the created kubernetes cluster, and an apecloud-mysql cluster named mycluster will be created. + +``` +kbcli playground init [flags] +``` + +### Examples + +``` + # create a k3d cluster on local host and install KubeBlocks + kbcli playground init + + # create an AWS EKS cluster and install KubeBlocks, the region is required + kbcli playground init --cloud-provider aws --region us-west-1 + + # after init, run the following commands to experience KubeBlocks quickly + # list database cluster and check its status + kbcli cluster list + + # get cluster information + kbcli cluster describe mycluster + + # connect to database + kbcli exec -it mycluster-mysql-0 bash + mysql -h 127.1 -u root -p$MYSQL_ROOT_PASSWORD + + # view the Grafana + kbcli dashboard open kubeblocks-grafana + + # destroy playground + kbcli playground destroy +``` + +### Options + +``` + --auto-approve Skip interactive approval during the initialization of playground + --cloud-provider string Cloud provider type, one of [local aws] (default "local") + --cluster-type string Specify the cluster type to create, use 'kbcli cluster create --help' to get the available cluster type. (default "mysql") + -h, --help help for init + --k3d-proxy-image string Specify k3d proxy image if you want to init playground locally (default "docker.io/apecloud/k3d-proxy:5.4.4") + --k3s-image string Specify k3s image that you want to use for the nodes if you want to init playground locally (default "rancher/k3s:v1.23.8-k3s1") + --region string The region to create kubernetes cluster + --timeout duration Time to wait for init playground, such as --timeout=10m (default 10m0s) + --version string KubeBlocks version +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli playground](kbcli_playground.md) - Bootstrap or destroy a playground KubeBlocks in local host or cloud. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin.mdx b/docs/zh/preview/cli/kbcli_plugin.mdx new file mode 100644 index 00000000..9d132e7e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin.mdx @@ -0,0 +1,55 @@ +--- +title: kbcli plugin +--- + +Provides utilities for interacting with plugins. + +### Synopsis + +Provides utilities for interacting with plugins. + + Plugins provide extended functionality that is not part of the major command-line distribution. + +### Options + +``` + -h, --help help for plugin +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli plugin describe](kbcli_plugin_describe.md) - Describe a plugin +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes +* [kbcli plugin install](kbcli_plugin_install.md) - Install kbcli or kubectl plugins +* [kbcli plugin list](kbcli_plugin_list.md) - List all visible plugin executables on a user's PATH +* [kbcli plugin search](kbcli_plugin_search.md) - Search kbcli or kubectl plugins +* [kbcli plugin uninstall](kbcli_plugin_uninstall.md) - Uninstall kbcli or kubectl plugins +* [kbcli plugin upgrade](kbcli_plugin_upgrade.md) - Upgrade kbcli or kubectl plugins + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_describe.mdx b/docs/zh/preview/cli/kbcli_plugin_describe.mdx new file mode 100644 index 00000000..8d57c2fd --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_describe.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli plugin describe +--- + +Describe a plugin + +``` +kbcli plugin describe [flags] +``` + +### Examples + +``` + # Describe a plugin + kbcli plugin describe [PLUGIN] + + # Describe a plugin with index + kbcli plugin describe [INDEX/PLUGIN] +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_index.mdx b/docs/zh/preview/cli/kbcli_plugin_index.mdx new file mode 100644 index 00000000..3a404811 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_index.mdx @@ -0,0 +1,50 @@ +--- +title: kbcli plugin index +--- + +Manage custom plugin indexes + +### Synopsis + +Manage which repositories are used to discover plugins and install plugins from + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. +* [kbcli plugin index add](kbcli_plugin_index_add.md) - Add a new index +* [kbcli plugin index delete](kbcli_plugin_index_delete.md) - Remove a configured index +* [kbcli plugin index list](kbcli_plugin_index_list.md) - List configured indexes +* [kbcli plugin index update](kbcli_plugin_index_update.md) - update all configured indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_index_add.mdx b/docs/zh/preview/cli/kbcli_plugin_index_add.mdx new file mode 100644 index 00000000..66ae11dc --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_index_add.mdx @@ -0,0 +1,55 @@ +--- +title: kbcli plugin index add +--- + +Add a new index + +``` +kbcli plugin index add [flags] +``` + +### Examples + +``` + # Add a new plugin index + kbcli plugin index add default https://github.com/apecloud/block-index.git + + kbcli plugin index add krew https://github.com/kubernetes-sigs/krew-index.git +``` + +### Options + +``` + -h, --help help for add +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_index_delete.mdx b/docs/zh/preview/cli/kbcli_plugin_index_delete.mdx new file mode 100644 index 00000000..47c79055 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_index_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin index delete +--- + +Remove a configured index + +``` +kbcli plugin index delete [flags] +``` + +### Examples + +``` + # Delete a plugin index + kbcli plugin index delete myIndex +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_index_list.mdx b/docs/zh/preview/cli/kbcli_plugin_index_list.mdx new file mode 100644 index 00000000..1931e4d0 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_index_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin index list +--- + +List configured indexes + +``` +kbcli plugin index list [flags] +``` + +### Examples + +``` + # List all configured plugin indexes + kbcli plugin index list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_index_update.mdx b/docs/zh/preview/cli/kbcli_plugin_index_update.mdx new file mode 100644 index 00000000..0ab0caaf --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_index_update.mdx @@ -0,0 +1,46 @@ +--- +title: kbcli plugin index update +--- + +update all configured indexes + +``` +kbcli plugin index update [flags] +``` + +### Options + +``` + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin index](kbcli_plugin_index.md) - Manage custom plugin indexes + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_install.mdx b/docs/zh/preview/cli/kbcli_plugin_install.mdx new file mode 100644 index 00000000..54933733 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_install.mdx @@ -0,0 +1,56 @@ +--- +title: kbcli plugin install +--- + +Install kbcli or kubectl plugins + +``` +kbcli plugin install [flags] +``` + +### Examples + +``` + # install a kbcli or kubectl plugin by name + kbcli plugin install [PLUGIN] + + # install a kbcli or kubectl plugin by name and index + kbcli plugin install [INDEX/PLUGIN] +``` + +### Options + +``` + -h, --help help for install +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_list.mdx b/docs/zh/preview/cli/kbcli_plugin_list.mdx new file mode 100644 index 00000000..945ec144 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_list.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin list +--- + +List all visible plugin executables on a user's PATH + +``` +kbcli plugin list +``` + +### Examples + +``` + # List all available plugins file on a user's PATH. + kbcli plugin list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_search.mdx b/docs/zh/preview/cli/kbcli_plugin_search.mdx new file mode 100644 index 00000000..d294f743 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_search.mdx @@ -0,0 +1,58 @@ +--- +title: kbcli plugin search +--- + +Search kbcli or kubectl plugins + +### Synopsis + +Search kbcli or kubectl plugins by keywords + +``` +kbcli plugin search [flags] +``` + +### Examples + +``` + # search a kbcli or kubectl plugin with keywords + kbcli plugin search keyword1 keyword2 +``` + +### Options + +``` + -h, --help help for search + --limit int Limit the number of plugin descriptions to output (default 50) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_uninstall.mdx b/docs/zh/preview/cli/kbcli_plugin_uninstall.mdx new file mode 100644 index 00000000..f96801fa --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_uninstall.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli plugin uninstall +--- + +Uninstall kbcli or kubectl plugins + +``` +kbcli plugin uninstall [flags] +``` + +### Examples + +``` + # uninstall a kbcli or kubectl plugin by name + kbcli plugin uninstall [PLUGIN] +``` + +### Options + +``` + -h, --help help for uninstall +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_plugin_upgrade.mdx b/docs/zh/preview/cli/kbcli_plugin_upgrade.mdx new file mode 100644 index 00000000..3ee5e590 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_plugin_upgrade.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli plugin upgrade +--- + +Upgrade kbcli or kubectl plugins + +``` +kbcli plugin upgrade [flags] +``` + +### Examples + +``` + # upgrade installed plugins with specified name + kbcli plugin upgrade myplugin + + # upgrade installed plugin to a newer version + kbcli plugin upgrade --all +``` + +### Options + +``` + --all Upgrade all installed plugins + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli plugin](kbcli_plugin.md) - Provides utilities for interacting with plugins. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_report.mdx b/docs/zh/preview/cli/kbcli_report.mdx new file mode 100644 index 00000000..ca61eab7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_report.mdx @@ -0,0 +1,44 @@ +--- +title: kbcli report +--- + +Report kubeblocks or cluster info. + +### Options + +``` + -h, --help help for report +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli report cluster](kbcli_report_cluster.md) - Report Cluster information +* [kbcli report kubeblocks](kbcli_report_kubeblocks.md) - Report KubeBlocks information, including deployments, events, logs, etc. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_report_cluster.mdx b/docs/zh/preview/cli/kbcli_report_cluster.mdx new file mode 100644 index 00000000..773d6e1e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_report_cluster.mdx @@ -0,0 +1,79 @@ +--- +title: kbcli report cluster +--- + +Report Cluster information + +``` +kbcli report cluster NAME [-f file] [-with-logs] [-mask] [flags] +``` + +### Examples + +``` + # report KubeBlocks status + kbcli report cluster mycluster + + # report KubeBlocks cluster information to file + kbcli report cluster mycluster -f filename + + # report KubeBlocks cluster information with logs + kbcli report cluster mycluster --with-logs + + # report KubeBlocks cluster information with logs and mask sensitive info + kbcli report cluster mycluster --with-logs --mask + + # report KubeBlocks cluster information with logs since 1 hour ago + kbcli report cluster mycluster --with-logs --since 1h + + # report KubeBlocks cluster information with logs since given time + kbcli report cluster mycluster --with-logs --since-time 2023-05-23T00:00:00Z + + # report KubeBlocks cluster information with logs for all containers + kbcli report cluster mycluster --with-logs --all-containers +``` + +### Options + +``` + --all-containers Get all containers' logs in the pod(s). Byt default, only the main container (the first container) will have logs recorded. + -f, --file string zip file for output + -h, --help help for cluster + --mask mask sensitive info for secrets and configmaps (default true) + -n, --namespace string KubeBlocks namespace + -o, --output string Output format. One of: json|yaml. (default "json") + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. + --with-logs include pod logs + --with-secrets include secrets +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_report_kubeblocks.mdx b/docs/zh/preview/cli/kbcli_report_kubeblocks.mdx new file mode 100644 index 00000000..c49e0d73 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_report_kubeblocks.mdx @@ -0,0 +1,70 @@ +--- +title: kbcli report kubeblocks +--- + +Report KubeBlocks information, including deployments, events, logs, etc. + +``` +kbcli report kubeblocks [-f file] [--with-logs] [--mask] [flags] +``` + +### Examples + +``` + # report KubeBlocks status + kbcli report kubeblocks + + # report KubeBlocks information to file + kbcli report kubeblocks -f filename + + # report KubeBlocks information with logs + kbcli report kubeblocks --with-logs + + # report KubeBlocks information with logs and mask sensitive info + kbcli report kubeblocks --with-logs --mask +``` + +### Options + +``` + --all-containers Get all containers' logs in the pod(s). Byt default, only the main container (the first container) will have logs recorded. + -f, --file string zip file for output + -h, --help help for kubeblocks + --mask mask sensitive info for secrets and configmaps (default true) + -n, --namespace string KubeBlocks namespace + -o, --output string Output format. One of: json|yaml. (default "json") + --since duration Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used. + --since-time string Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used. + --with-logs include pod logs + --with-secrets include secrets +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli report](kbcli_report.md) - Report kubeblocks or cluster info. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace.mdx b/docs/zh/preview/cli/kbcli_trace.mdx new file mode 100644 index 00000000..8d4ad716 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace.mdx @@ -0,0 +1,47 @@ +--- +title: kbcli trace +--- + +trace management command + +### Options + +``` + -h, --help help for trace +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + +* [kbcli trace create](kbcli_trace_create.md) - create a trace. +* [kbcli trace delete](kbcli_trace_delete.md) - Delete a trace. +* [kbcli trace list](kbcli_trace_list.md) - list all traces. +* [kbcli trace update](kbcli_trace_update.md) - update a trace. +* [kbcli trace watch](kbcli_trace_watch.md) - watch a trace. + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace_create.mdx b/docs/zh/preview/cli/kbcli_trace_create.mdx new file mode 100644 index 00000000..04ebcd28 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace_create.mdx @@ -0,0 +1,63 @@ +--- +title: kbcli trace create +--- + +create a trace. + +``` +kbcli trace create trace-name [flags] +``` + +### Examples + +``` + # create a trace for cluster has the same name 'pg-cluster' + kbcli trace create pg-cluster + + # create a trace for cluster has the name of 'pg-cluster' + kbcli trace create pg-cluster-trace --cluster-name pg-cluster + + # create a trace with custom locale, stateEvaluationExpression + kbcli trace create pg-cluster-trace --locale zh_cn --cel-state-evaluation-expression "has(object.status.phase) && object.status.phase == \"Running\"" +``` + +### Options + +``` + --cel-state-evaluation-expression string Specify CEL state evaluation expression. + --cluster-name string Specify target cluster name. + --depth int Specify object tree depth to display. + -h, --help help for create + --locale string Specify locale. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace_delete.mdx b/docs/zh/preview/cli/kbcli_trace_delete.mdx new file mode 100644 index 00000000..262c1701 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace_delete.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli trace delete +--- + +Delete a trace. + +``` +kbcli trace delete trace-name [flags] +``` + +### Examples + +``` + # Delete a trace + kbcli trace delete pg-cluster +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace_list.mdx b/docs/zh/preview/cli/kbcli_trace_list.mdx new file mode 100644 index 00000000..b1c42c02 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace_list.mdx @@ -0,0 +1,57 @@ +--- +title: kbcli trace list +--- + +list all traces. + +``` +kbcli trace list [flags] +``` + +### Examples + +``` + # list all traces + kbcli trace list +``` + +### Options + +``` + -A, --all-namespaces If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. + -h, --help help for list + -n, --namespace string specified the namespace + -o, --output format prints the output in the specified format. Allowed values: table, json, yaml, wide (default table) + -l, --selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints. + --show-labels When printing, show all labels as the last column (default hide labels column) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace_update.mdx b/docs/zh/preview/cli/kbcli_trace_update.mdx new file mode 100644 index 00000000..53ba1c71 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace_update.mdx @@ -0,0 +1,62 @@ +--- +title: kbcli trace update +--- + +update a trace. + +``` +kbcli trace update trace-name [flags] +``` + +### Examples + +``` + # update a trace with custom locale, stateEvaluationExpression + kbcli trace update pg-cluster-trace --locale zh_cn --cel-state-evaluation-expression "has(object.status.phase) && object.status.phase == \"Running\"" +``` + +### Options + +``` + --allow-missing-template-keys If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. (default true) + --cel-state-evaluation-expression string Specify CEL state evaluation expression. + --depth int Specify object tree depth to display. (default -1) + --dry-run string[="unchanged"] Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource. (default "none") + --edit Edit the API resource + -h, --help help for update + --locale string Specify locale. + -o, --output string Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file). + --show-managed-fields If true, keep the managedFields when printing objects in JSON or YAML format. + --template string Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_trace_watch.mdx b/docs/zh/preview/cli/kbcli_trace_watch.mdx new file mode 100644 index 00000000..e1d35f1e --- /dev/null +++ b/docs/zh/preview/cli/kbcli_trace_watch.mdx @@ -0,0 +1,53 @@ +--- +title: kbcli trace watch +--- + +watch a trace. + +``` +kbcli trace watch trace-name [flags] +``` + +### Examples + +``` + # watch a trace + kbcli trace watch pg-cluster-trace +``` + +### Options + +``` + -h, --help help for watch +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + +* [kbcli trace](kbcli_trace.md) - trace management command + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/cli/kbcli_version.mdx b/docs/zh/preview/cli/kbcli_version.mdx new file mode 100644 index 00000000..29e1e7f7 --- /dev/null +++ b/docs/zh/preview/cli/kbcli_version.mdx @@ -0,0 +1,47 @@ +--- +title: kbcli version +--- + +Print the version information, include kubernetes, KubeBlocks and kbcli version. + +``` +kbcli version [flags] +``` + +### Options + +``` + -h, --help help for version + --verbose print detailed kbcli information +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --cache-dir string Default cache directory (default "$HOME/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --match-server-version Require server version to match client version + -n, --namespace string If present, the namespace scope for this CLI request + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use +``` + +### SEE ALSO + + + +#### Go Back to [CLI Overview](cli.md) Homepage. + diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/01-overview.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/01-overview.mdx new file mode 100644 index 00000000..2fb7ed74 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/01-overview.mdx @@ -0,0 +1,72 @@ +--- +description: 了解KubeBlocks Elasticsearch插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- Elasticsearch +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks Elasticsearch 插件概述 +--- +# KubeBlocks Elasticsearch 插件概述 + +## 概述 + +Elasticsearch 是一个分布式、RESTful 风格的搜索引擎,专为生产级工作负载的速度和相关性优化。每个 Elasticsearch 集群由一个或多个节点组成,每个节点承担特定角色。 + +### 节点角色 + +| 角色 | 描述 | +|------|-------------| +| **master** | 管理集群状态并协调操作 | +| **data** | 存储数据并处理数据相关操作 | +| **data_content** | 存储文档数据 | +| **data_hot** | 处理近期频繁访问的数据 | +| **data_warm** | 存储访问频率较低的数据 | +| **data_cold** | 处理极少访问的数据 | +| **data_frozen** | 管理归档数据 | +| **ingest** | 在索引前处理文档 | +| **ml** | 运行机器学习任务 | +| **remote_cluster_client** | 连接远程集群 | +| **transform** | 处理数据转换 | + +[查看 Elasticsearch 节点角色文档](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) + +## 核心特性 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 Elasticsearch 操作: + +| 特性 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 增减副本来调整容量 | +| **垂直扩展** | 调整 Elasticsearch 实例的 CPU/内存资源 | +| **存储卷扩容** | 动态增加存储容量而无需停机 | +| **重启操作** | 以最小中断进行受控集群重启 | +| **启动/停止** | 临时暂停/恢复集群操作 | +| **自定义服务** | 暴露专门的数据库端点 | +| **副本管理** | 安全地停用或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 放置和资源分配 | +| **监控** | 集成的 Prometheus 指标收集 | +| **日志** | 通过 Loki Stack 实现集中式日志 | + +### 支持版本 + +KubeBlocks Elasticsearch 插件支持以下 Elasticsearch 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 7.x | 7.7.1,7.8.1,7.10.1 | +| 8.x | 8.1.3, 8.8.2 | + + +可通过以下命令查看支持的版本列表: +``` +kubectl get cmpv elasticsearch +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/02-quickstart.mdx new file mode 100644 index 00000000..38a8ff85 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/02-quickstart.mdx @@ -0,0 +1,329 @@ +--- +description: 使用KubeBlocks部署和管理Elasticsearch副本集集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- Elasticsearch +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: Elasticsearch 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Elasticsearch 快速入门 + +本指南提供了使用 **KubeBlocks Elasticsearch 插件** 部署和管理 Elasticsearch 副本集集群的完整流程,涵盖以下内容: +- 系统先决条件与插件安装 +- 集群创建与配置 +- 运维管理(包括启停操作) +- 连接方式与集群监控 + + + +## 前提条件 + +### 系统要求 + +开始前请确保您的环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装配置 `kubectl` v1.21+ 并具备集群访问权限 +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 Elasticsearch 插件 + +Elasticsearch 插件默认随 KubeBlocks 安装。检查其状态: + +```bash +helm list -n kb-system | grep elasticsearch +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-elasticsearch kb-system 1 2025-05-21 deployed elasticsearch-1.0.0 +``` +
+ +若插件未启用,请选择安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 查询可用插件版本 + helm search repo kubeblocks/elasticsearch --versions + # 安装指定版本(将替换为目标版本) + helm upgrade -i kb-addon-elasticsearch kubeblocks-addons/elasticsearch --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 索引默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新指定索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 插件搜索与安装: + + ```bash + # 搜索插件 + kbcli addon search elasticsearch + # 安装指定版本插件(将替换为目标版本) + kbcli addon install elasticsearch --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + elasticsearch 0.9.0 kubeblocks + elasticsearch 0.9.1 kubeblocks + elasticsearch 1.0.0 kubeblocks + ``` + 插件启用/停用: + + ```bash + # 启用插件 + kbcli addon enable elasticsearch + # 停用插件 + kbcli addon disable elasticsearch + ``` + + + + +:::note +**版本兼容性** + +请始终确保 Elasticsearch 插件版本与 KubeBlocks 主版本相匹配,以避免兼容性问题。 + +::: + +## 部署 Elasticsearch 集群 + +出于开发和测试目的,您可以部署一个单节点集群,其中单个节点承担所有角色。 + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/elasticsearch/cluster-single-node.yaml +``` + +该操作将创建: +- 一个包含1个组件的 Elasticsearch 集群,其中单个副本承担所有角色 +- 默认资源分配(1核CPU,2Gi内存) +- 20Gi持久化存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-singlenode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: mdit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + replicas: 1 + configs: + - name: es-cm + variables: + mode: "single-node" + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +更多API字段和描述,请参阅[API参考文档](../user_docs/references/api-reference/cluster)。 + +## 验证集群状态 + +当部署一个包含1个副本的Elasticsearch集群时: + +通过以下方式确认部署成功: +1. 集群阶段显示为`Running` +2. 所有Pod均正常运行 + +可通过以下任一方法检查状态: + + + + ```bash + kubectl get cluster es-singlenode -n demo -w + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-singlenode Delete Running 49s + + kubectl get pods -l app.kubernetes.io/instance=es-singlenode -n demo + NAME READY STATUS RESTARTS AGE + es-singlenode-mdit-0 3/3 Running 0 58s + ``` + + + + + 安装`kbcli`后,可查看完整的集群信息: + + ```bash + kbcli cluster describe es-singlenode -n demo + + Name: es-singlenode Created Time: May 19,2025 20:34 UTC+0800 + NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY + demo Running Delete + + Endpoints: + COMPONENT INTERNAL EXTERNAL + mdit es-singlenode-mdit-http.demo.svc.cluster.local:9200 + + Topology: + COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME + mdit 8.8.2 es-singlenode-mdit-0 Running kbv10-control-plane/172.19.0.2 May 19,2025 20:34 UTC+0800 + + Resources Allocation: + COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS + mdit 1 / 1 2Gi / 2Gi data:20Gi standard + + Images: + COMPONENT COMPONENT-DEFINITION IMAGE + mdit elasticsearch-8-1.0.0 docker.io/library/elasticsearch:8.8.2 + docker.io/prometheuscommunity/elasticsearch-exporter:v1.7.0 + docker.io/apecloud/curl-jq:0.1.0 + + Data Protection: + BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + + Show cluster events: kbcli cluster list-events -n demo es-singlenode + ``` + + + + +## 停止 Elasticsearch 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)将被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运行成本降低 + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: elasticsearch-stop + namespace: demo + spec: + clusterName: es-singlenode + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + + ```bash + kubectl patch cluster es-singlenode -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ``` + + + +## 启动 Elasticsearch 集群 + +重启已停止的集群将恢复运行,所有数据和配置保持不变。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前的状态 + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: elasticsearch-start + namespace: demo + spec: + clusterName: es-singlenode + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设置为 false 来重启集群: + + ```bash + kubectl patch cluster es-singlenode -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ``` + + + +## 删除 Elasticsearch 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略类型 | 删除的资源范围 | 数据清除情况 | 适用场景 | +|-----------------|-------------------|-------------------|-----------------------| +| DoNotTerminate | 不删除任何资源 | 保留所有数据 | 关键生产环境集群 | +| Delete | 删除所有资源 | 清除PVC存储卷数据 | 非关键环境 | +| WipeOut | 删除所有资源 | 彻底清除所有数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份数据 + +**删除前检查清单:** +1. 确认无应用正在使用该集群 +2. 确保存在必要的备份 +3. 验证terminationPolicy配置正确 +4. 检查是否存在依赖资源 + +测试环境完整清理操作: + +```bash +kubectl patch cluster es-singlenode -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster es-singlenode -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..9348da26 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,320 @@ +--- +description: 了解如何在KubeBlocks中管理Elasticsearch集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- Elasticsearch +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: Elasticsearch 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Elasticsearch 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 Elasticsearch 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运营成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|-----------|--------|----------| +| 停止 | 暂停集群运行,保留存储 | 节约成本、维护场景 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Elasticsearch 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 Elasticsearch 集群将执行以下操作: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 维持集群配置 + +此操作适用于以下场景: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + + 选项1:使用 OpsRequest API + + 创建停止操作请求: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-stop-ops + namespace: demo + spec: + clusterName: es-multinode + type: Stop + ``` + + + + + 选项2:使用 使用 Cluster API Patch + + 通过修改 stop 字段直接调整集群规格: + + ```bash + kubectl patch cluster es-multinode -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + } + ]' + ``` + + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Stopping 8m6s + es-multinode Delete Stopped 9m41s + ``` + +2. 确认无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=es-multinode -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 验证持久卷仍然存在: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=es-multinode -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-es-multinode-dit-0 Bound pvc-aa8136e5-a69a-4117-bb4c-8978978bb77f 20Gi RWO standard 8m25s + data-es-multinode-dit-1 Bound pvc-408fe4d5-b3a9-4984-b6e5-48ec133307eb 20Gi RWO standard 8m25s + data-es-multinode-dit-2 Bound pvc-cf6c3c7c-bb5f-4fa6-8dff-33e0862f8ef9 20Gi RWO standard 8m25s + data-es-multinode-master-0 Bound pvc-5793e794-8c91-4bba-b6e8-52c414ec0ade 20Gi RWO standard 8m25s + data-es-multinode-master-1 Bound pvc-044dae8d-82ee-41f3-867d-c8f27ec08fbe 20Gi RWO standard 8m25s + data-es-multinode-master-2 Bound pvc-2af7cedb-2f5f-4846-be43-ff6da8109880 20Gi RWO standard 8m25s + ``` + +### 启动集群 + +启动已停止的 Elasticsearch 集群将: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + + 发起启动操作请求: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-start-ops + namespace: demo + spec: + # 指定此操作目标集群资源的名称 + clusterName: es-multinode + type: Start + ``` + + + + + + 修改集群规格以恢复运行: + 1. 设置 stop: false,或 + 2. 完全移除 stop 字段 + + ```bash + kubectl patch cluster es-multinode -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Updating 24m + es-multinode Delete Running 24m + es-multinode Delete Running 24m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + es-multinode-dit-0 3/3 Running 0 24m + es-multinode-dit-1 3/3 Running 0 24m + es-multinode-dit-2 3/3 Running 0 24m + es-multinode-master-0 3/3 Running 0 24m + es-multinode-master-1 3/3 Running 0 24m + es-multinode-master-2 3/3 Running 0 24m + ``` + +### 重启集群 + +重启操作提供以下特性: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +适用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**检查组件** + +Milvus 集群包含五个组件。获取组件列表: +```bash +kubectl get cluster -n demo es-multinode -oyaml | yq '.spec.componentSpecs[].name' +``` + +预期输出: +```text +dit +master +``` + +**通过 OpsRequest API 重启 Proxy** + +列出需要重启的特定组件: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: es-multinode-restart-ops + namespace: demo +spec: + clusterName: es-multinode + type: Restart + restart: + - componentName: dit +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest es-multinode-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-restart-ops Restart es-multinode Running 0/3 8s + es-multinode-restart-ops Restart es-multinode Running 1/3 59s + es-multinode-restart-ops Restart es-multinode Running 2/3 117s + es-multinode-restart-ops Restart es-multinode Running 3/3 2m55s + es-multinode-restart-ops Restart es-multinode Running 3/3 2m55s + es-multinode-restart-ops Restart es-multinode Succeed 3/3 2m55s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode + ``` + 注意:重启后 Pod 将显示新的创建时间戳。只有属于组件 `dit` 的 Pod 会被重启。 + +操作完成后,集群将返回 Running 状态。 + +## 概述 +在本指南中,您学习了如何: +1. **停止 Elasticsearch 集群**:暂停服务运行同时保留持久化存储。 +2. **启动已停止的集群**:将集群重新上线恢复服务。 +3. **重启特定集群组件**:在不停止整个集群的情况下,通过重建对应Pod实现组件更新。 + +通过管理 Elasticsearch 集群的生命周期,您可以优化资源利用率、降低成本,并在 Kubernetes 环境中保持运维灵活性。KubeBlocks 提供了无缝执行这些操作的能力,在确保高可用性的同时将服务中断降至最低。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..9c1869ee --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,179 @@ +--- +description: 了解如何在KubeBlocks管理的Elasticsearch集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- Elasticsearch +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: Elasticsearch 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks对Elasticsearch集群进行垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的Elasticsearch集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改Elasticsearch实例的计算资源(CPU和内存)同时保持副本数量不变。主要特点: + +- **无中断性**:当正确配置时,可在扩缩容期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:可根据需要向上或向下扩缩容 + +KubeBlocks通过遵循受控的、角色感知的更新策略确保扩缩操作期间影响最小: +**角色感知副本(主/从副本)** +- 从副本优先更新 - 非领导者Pod先升级以最小化中断 +- 主副本最后更新 - 仅当所有从副本健康后,主Pod才会重启 +- 集群状态在所有副本稳定后从"更新中"转为"运行中" + +**无角色副本(基于序号的扩缩容)** +如果副本没有定义角色,更新将遵循Kubernetes Pod序号顺序: +- 最高序号优先(例如pod-2 → pod-1 → pod-0)以确保确定性滚动更新 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Elasticsearch集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. Pod按序号从高到低顺序更新(例如pod-2 → pod-1 → pod-0) +1. 集群状态从"更新中"转为"运行中" + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为elasticsearch-broker组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-vscale-ops + namespace: demo + spec: + clusterName: es-multinode + type: VerticalScaling + verticalScaling: + - componentName: dit + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + 可以使用以下命令查看扩缩容操作进度: + + ```bash + kubectl -n demo get ops es-multinode-vscale-ops -w + ``` + + 预期结果: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-vscale-ops VerticalScaling es-multinode Running 0/3 57s + es-multinode-vscale-ops VerticalScaling es-multinode Running 1/3 60s + es-multinode-vscale-ops VerticalScaling es-multinode Running 2/3 118s + es-multinode-vscale-ops VerticalScaling es-multinode Running 3/3 2m51s + es-multinode-vscale-ops VerticalScaling es-multinode Running 3/3 2m51s + es-multinode-vscale-ops VerticalScaling es-multinode Succeed 3/3 2m51s + ``` + + + + + + 选项2:直接更新Cluster API + + 也可以直接更新`spec.componentSpecs.resources`字段来调整资源进行垂直扩缩容。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 3 + resources: + requests: + cpu: "1" # 根据需要更新资源 + memory: "1Gi" # 根据需要更新资源 + limits: + cpu: "1" # 根据需要更新资源 + memory: "1Gi" # 根据需要更新资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段进行扩缩容 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有正在进行的操作 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩容后:** +- 监控资源利用率和应用性能 +- 根据需要调整Elasticsearch参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe es-multinode -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +dit 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩容的关键优势 +- 无缝扩缩容:按特定顺序重建Pod以确保最小中断 +- 动态资源调整:根据工作负载需求轻松调整CPU和内存 +- 灵活性:可选择OpsRequest动态扩缩容或直接API更新精确控制 +- 提高可用性:扩缩过程中集群保持运行状态,维持高可用性 + +## 清理 +删除所有创建的资源,包括Elasticsearch集群及其命名空间: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您学会了如何: +1. 部署由KubeBlocks管理的Elasticsearch集群 +2. 通过增加或减少elasticsearch组件的资源进行垂直扩缩容 +3. 使用OpsRequest和直接Cluster API更新两种方式调整资源分配 + +垂直扩缩容是优化资源利用率和适应工作负载变化的强大工具,可确保您的Elasticsearch集群保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..af56de7e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,237 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的Elasticsearch集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- Elasticsearch +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现Elasticsearch集群的水平扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现Elasticsearch集群水平扩缩容 + +本指南将介绍如何对KubeBlocks管理的Elasticsearch集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用**OpsRequest**和直接修改**Cluster API**两种方式实现这一目标。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Elasticsearch集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## 扩容(增加副本) + +**预期工作流程**: + +1. 新Pod被创建,状态从`Pending`转为`Running` +2. 集群状态从`Updating`变为`Running` + + + + + + 选项1:使用水平扩容OpsRequest + + 通过为elasticsearch组件增加1个副本来扩容集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-scale-out-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + # 指定组件扩容的副本变化 + scaleOut: + # 指定组件的副本变化量 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops es-multinode-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-scale-out-ops HorizontalScaling es-multinode Running 0/1 9s + es-multinode-scale-out-ops HorizontalScaling es-multinode Running 1/1 16s + es-multinode-scale-out-ops HorizontalScaling es-multinode Succeed 1/1 16s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 4 # 增加副本数实现扩容 + ... + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新Pod被创建,Elasticsearch集群状态从`Updating`变为`Running`,且新建Pod具有新的`secondary`角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 4m28s +es-multinode-dit-1 3/3 Running 0 5m27s +es-multinode-dit-2 3/3 Running 0 6m25s +es-multinode-dit-3 3/3 Running 0 1m25s +``` + +## 缩容(减少副本) + +**预期工作流程**: + +1. 移除序号最大的副本 +2. Pod被优雅终止 +3. 集群状态从`Updating`变为`Running` + + + + + + 选项1:使用水平缩容OpsRequest + + 通过减少1个副本来缩容Elasticsearch集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-scale-in-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + # 指定组件缩容的副本变化 + scaleIn: + # 指定组件的副本变化量 + # 当前组件减少1个副本 + replicaChanges: 1 + ``` + + 监控操作进度: + ```bash + kubectl get ops es-multinode-scale-in-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-scale-in-ops HorizontalScaling es-multinode Running 0/1 8s + es-multinode-scale-in-ops HorizontalScaling es-multinode Running 1/1 24s + es-multinode-scale-in-ops HorizontalScaling es-multinode Succeed 1/1 24s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 3 # 减少副本数实现缩容 + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + + +### 验证缩容结果 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +示例输出(三个Pod): +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 8m20s +es-multinode-dit-1 3/3 Running 0 9m19s +es-multinode-dit-2 3/3 Running 0 10m +``` + +## 最佳实践 + +执行水平扩缩容时: +- 尽可能选择低流量时段操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够的资源 +- 考虑新副本的存储需求 + +## 清理资源 +删除Elasticsearch集群及其命名空间以移除所有创建的资源: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中您学会了: +- 执行扩容操作为Elasticsearch集群增加副本 +- 执行缩容操作从Elasticsearch集群移除副本 +- 使用OpsRequest和直接Cluster API更新两种方式进行水平扩缩容 + +KubeBlocks能确保在最小化影响数据库操作的前提下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..e47f7540 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/04-volume-expansion.mdx @@ -0,0 +1,252 @@ +--- +description: 了解如何在KubeBlocks管理的Elasticsearch集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- Elasticsearch +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: 在 Elasticsearch 集群中扩展存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 扩展 Elasticsearch 集群存储卷 + +本指南介绍如何在 **KubeBlocks** 管理的 Elasticsearch 集群中扩展持久卷声明(PVC)。存储卷扩展功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持时,此操作可在不中断服务的情况下执行。 + +存储卷扩展允许您在创建持久卷声明(PVC)后增加其容量。该功能在 Kubernetes v1.11 中引入,并在 Kubernetes v1.24 中正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持卷扩展 + +列出所有可用存储类,并通过检查 `ALLOWVOLUMEEXPANSION` 字段验证是否支持卷扩展: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +确保您使用的存储类将 `ALLOWVOLUMEEXPANSION` 设置为 true。若为 false,则表示该存储类不支持卷扩展。 + +## 使用 StorageClass 部署 Elasticsearch 集群 + +KubeBlocks 采用声明式方式管理 Elasticsearch 集群。以下是部署包含 3 个副本的 Elasticsearch 集群的示例配置。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-multinode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: dit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # 使用 key `roles` 指定该组件承担的角色 + roles: data,ingest,transform + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩展的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # 使用 key `roles` 指定该组件承担的角色 + roles: master + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩展的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键字段说明** +- `storageClassName`: 指定支持卷扩展的 `StorageClass` 名称。若未设置,将使用标注为 `default` 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时,请确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 + +::: + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 扩展存储卷 + +:::note +1. 确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 +2. 新容量必须大于当前容量。 +3. 根据存储提供商的不同,卷扩展可能需要额外配置。 +::: + +您可以通过以下两种方式之一扩展存储卷: + + + + + 选项 1:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 elasticsearch 组件增加存储卷容量: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expand-volume-ops + namespace: demo + spec: + clusterName: es-multinode + type: VolumeExpansion + volumeExpansion: + - componentName: dit + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩展进度: + + ```bash + kubectl describe ops es-multinode-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 完成后,PVC 容量将更新。 + + :::note + 若使用的存储类不支持卷扩展,此 OpsRequest 将快速失败并显示类似信息: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 选项 2:直接更新 Cluster API + + 您也可以直接更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段至目标容量。 + + ```yaml + componentSpecs: + - name: dit + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量,并确保大于当前容量 + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证 + +验证更新后的集群配置: +```bash +kbcli cluster describe es-multinode -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +dit 1 / 1 1Gi / 1Gi data:30Gi +``` +数据 PVC 的存储卷容量已更新至指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS +data-es-multinode-dit-0 Bound pvc-uuid 30Gi RWO +data-es-multinode-dit-1 Bound pvc-uuid 30Gi RWO +data-es-multinode-dit-2 Bound pvc-uuid 30Gi RWO +``` + +## 清理 +删除所有创建的资源,包括 Elasticsearch 集群及其命名空间: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## 总结 + +在本指南中您学会了: +1. 验证存储类对卷扩展的兼容性。 +2. 通过以下方式执行卷扩展: + - 使用 OpsRequest 进行动态更新。 + - 通过 Cluster API 进行手动更新。 +3. 验证更新后的 PVC 容量并确保扩容操作完成。 + +借助存储卷扩展功能,您可以高效扩展 Elasticsearch 集群的存储容量而无需中断服务,确保数据库能够随着应用需求增长而扩展。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..e8924839 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,316 @@ +--- +description: 了解如何通过负载均衡器及其他服务类型,在KubeBlocks中配置和管理Elasticsearch服务,实现内外部访问。 +keywords: +- KubeBlocks +- Elasticsearch +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理Elasticsearch服务 +sidebar_position: 5 +title: 使用KubeBlocks中的声明式集群API创建和销毁Elasticsearch服务 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 Elasticsearch 服务 + +本指南提供了逐步操作说明,指导如何对外部和内部暴露由 KubeBlocks 管理的 Elasticsearch 服务。您将学习如何通过云服务提供商的负载均衡器服务配置外部访问、管理内部服务,以及在不再需要时正确关闭外部暴露功能。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Elasticsearch 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 查看网络服务 +列出为 Elasticsearch 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +示例服务列表: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +es-multinode-dit-http ClusterIP 10.96.224.72 9200/TCP 56m +es-multinode-master-http ClusterIP 10.96.153.35 9200/TCP 56m +``` + +## 对外暴露 Elasticsearch 服务 + +外部服务地址允许公网访问 Elasticsearch,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云服务成本 | 安全性 | +|---------------|--------------------|------------|------------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试环境 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 对外暴露 Elasticsearch 服务,创建 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: es-multinode + expose: + - componentName: master + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 可选值:'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + ports: + - name: es-http + port: 9200 + protocol: TCP + targetPort: es-http + # 当 ServiceType 为 LoadBalancer 时,包含云服务商相关参数 + # 以下是 AWS EKS 的配置示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops es-multinode-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-expose-enable-ops Expose es-multinode Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分添加 LoadBalancer 服务配置: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: es-multinode + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: elasticsearch + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + componentSelector: master + name: master-internet + serviceName: master-internet + spec: + ports: + - name: es-http + nodePort: 32751 + port: 9200 + protocol: TCP + targetPort: es-http + type: LoadBalancer + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分新增了一个外部服务。该 LoadBalancer 服务包含了 AWS 网络负载均衡器(NLB)的注解。 + + :::note + 云服务商注解说明 + + 使用 LoadBalancer 服务时,必须添加对应云服务商的特定注解。以下是不同云服务商的常用注解: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为 "false" 则创建面向互联网的负载均衡器 + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为 "false" 则创建面向互联网的负载均衡器 + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制负载均衡器仅限内部 VPC 访问。默认不指定时为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网负载均衡器的优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为 "intranet" 则创建内部负载均衡器 + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制负载均衡器是内部还是面向互联网。注意该注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + ``` + 如果在服务创建后将该注解从 "false" 改为 "true",虽然 Service 对象中的注解会更新,但负载均衡器仍会保留其公网 IP。 + + 正确修改该行为的步骤: + - 首先删除现有的 LoadBalancer 服务 + - 使用更新后的注解重新创建服务(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新的负载均衡器分配正确的内部或外部 IP + ::: + + + 使用以下命令等待集群状态变为 Running: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + + ```text + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +es-multinode-dit-http ClusterIP 10.96.224.72 9200/TCP 59m +es-multinode-master-http ClusterIP 10.96.153.35 9200/TCP 59m +es-multinode-master-internet LoadBalancer 10.96.38.72 9200:30998/TCP 19s +``` + +## 禁用外部暴露 + + + + + + 方法一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-expose-disable-ops + namespace: demo + spec: + clusterName: es-multinode + expose: + - componentName: master + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops es-multinode-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-expose-disable-ops Expose es-multinode Succeed 1/1 16s + ``` + + + + + + 方法二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster es-multinode -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster es-multinode -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + es-multinode Delete Running 26m + ``` + + + +### 验证服务移除 + +确保 'es-multinode-elasticsearch-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=es-multinode -n demo +``` + +预期结果:'es-multinode-elasticsearch-internet' 服务应被移除。 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除Elasticsearch集群及其所在的命名空间: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何通过KubeBlocks实现以下操作: +- 对外或对内暴露Elasticsearch服务 +- 使用云服务商特定注解配置LoadBalancer类型的服务 +- 通过OpsRequest或直接更新Cluster API来管理外部访问,实现服务的启用或禁用 + +KubeBlocks为Kubernetes环境中的Elasticsearch服务管理提供了灵活且简化的解决方案。 + diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..ba58c4a3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,135 @@ +--- +description: 了解如何对由KubeBlocks管理的Elasticsearch集群中特定Pod执行下线(停用)操作。 +keywords: +- KubeBlocks +- Elasticsearch +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 停用 Elasticsearch 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的Elasticsearch集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 管理的 Elasticsearch 集群中的特定 Pod + +本文档介绍如何在 KubeBlocks 管理的 Elasticsearch 集群中下线(停用)特定 Pod。通过精确控制 Pod 下线,可以在保持服务可用性的同时实现资源管理,适用于工作负载重平衡、节点维护或故障处理等场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法指定下线特定 Pod。StatefulSet 会严格保证 Pod 的顺序和身份标识,缩容操作总是移除序号最大的 Pod(例如从 3 个副本缩容时,会优先移除 `Pod-2`)。这种限制使得无法精确控制需要下线的 Pod,给维护工作、负载分配和故障处理带来不便。 + +KubeBlocks 突破了这一限制,允许管理员直接指定需要下线的 Pod。这种细粒度控制既能保障高可用性,又能实现更优的资源管理,且不会影响整个集群运行。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Elasticsearch 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 下线指定 Pod + +**预期工作流程**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +要下线特定 Pod(例如 'es-multinode-dit-1'),可通过以下两种方式实现: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 标记需要下线的 Pod: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: es-multinode-decommission-ops + namespace: demo + spec: + clusterName: es-multinode + type: HorizontalScaling + horizontalScaling: + - componentName: dit + scaleIn: + onlineInstancesToOffline: + - 'es-multinode-dit-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作执行状态: + + ```bash + kubectl get ops es-multinode-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + es-multinode-decommission-ops HorizontalScaling es-multinode Running 0/1 8s + es-multinode-decommission-ops HorizontalScaling es-multinode Running 1/1 31s + es-multinode-decommission-ops HorizontalScaling es-multinode Succeed 1/1 31s + ``` + + + + + + 方法二:使用 Cluster API + + 也可以直接修改 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: dit + replicas: 2 # 下线后期望的副本数 + offlineInstances: + - es-multinode-dit-1 # <----- 指定需要下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用配置更新后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=es-multinode,apps.kubeblocks.io/component-name=dit +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 2/2 Running 0 24m +es-multinode-dit-2 2/2 Running 0 2m1s +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向下线特定 Pod +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保障服务可用性的同时,提供了更精细的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..cbd3373e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,199 @@ +--- +description: 了解如何通过Prometheus Operator为KubeBlocks中的Elasticsearch集群设置可观测性。配置监控并通过Grafana可视化指标。 +keywords: +- KubeBlocks +- Elasticsearch +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: Elasticsearch 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 Elasticsearch 集群可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 Elasticsearch + +本指南演示如何在 KubeBlocks 中为 Elasticsearch 集群配置全面的监控方案,包含以下组件: + +1. Prometheus Operator 用于指标采集 +2. 内置 Elasticsearch Exporter 用于指标暴露 +3. Grafana 用于可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 安装监控套件 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 Elasticsearch 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 配置指标采集 + +### 1. 验证 Exporter 端点 + +```bash +kubectl -n demo exec -it pods/es-multinode-dit-0 -- \ + curl -s http://127.0.0.1:9114/metrics | head -n 50 + +kubectl -n demo exec -it pods/es-multinode-master-0 -- \ + curl -s http://127.0.0.1:9114/metrics | head -n 50 +``` + +### 2. 创建 PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: elasticsearch-jmx-pod-monitor + namespace: demo + labels: # 需与 `prometheus.spec.podMonitorSelector` 中的标签匹配 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/instance: es-multinode +``` +**PodMonitor 配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与 exporter 端口名称('http-metrics')匹配 | +| `namespaceSelector` | 是 | 目标 Elasticsearch 所在的命名空间 | +| `labels` | 是 | 必须与 Prometheus 的 podMonitorSelector 匹配 | +| `path` | 否 | 指标端点路径(默认: /metrics) | +| `interval` | 否 | 采集间隔(默认: 30s) | + + +## 验证监控配置 + +### 1. 检查 Prometheus 目标 +端口转发并访问 Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +浏览器访问: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的采集任务(任务名应为'demo/es-multinode-pod-monitor')。 + +预期状态: +- 目标状态应为 UP +- 目标标签应包含 podTargetLabels 中定义的标签(如'app_kubernetes_io_instance') + +### 2. 测试指标采集 +验证指标是否被正确采集: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=elasticsearch_clusterinfo_up{job="kubeblocks"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "elasticsearch_clusterinfo_up", + "container": "exporter", + "endpoint": "metrics", + "instance": "10.244.0.49:9114", + "job": "kubeblocks", + "namespace": "demo", + "pod": "es-multinode-master-2", + "url": "http://localhost:9200" + }, + "value": [ + 1747666760.443, + "1" + ] + }, +... // 更多省略行 +``` +## Grafana 可视化 + +### 1. 访问 Grafana +端口转发并登录: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +浏览器访问 `http://localhost:3000`,使用默认凭证登录: +- 用户名: 'admin' +- 密码: 'prom-operator' (默认值) + +### 2. 导入仪表板 +导入 KubeBlocks Elasticsearch 仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 从 [Elasticsearch Dashboard](https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/addons/elasticsearch/dashboards/elasticsearch.json) 导入仪表板 + +![elasticsearch-monitoring-grafana-dashboard.png](/img/docs/en/elasticsearch-monitoring-grafana-dashboard.png) +图 1. Elasticsearch 监控仪表板 + + +## 清理资源 +执行以下命令删除所有创建的资源: +```bash +kubectl delete cluster es-multinode -n demo +kubectl delete ns demo +kubectl delete podmonitor es-multinode-pod-monitor -n demo +``` + +## 总结 +本教程演示了如何在 KubeBlocks 中使用 Prometheus Operator 为 Elasticsearch 集群建立可观测性方案。通过配置 `PodMonitor`,我们实现了 Prometheus 从 Elasticsearch exporter 采集指标的功能,最终在 Grafana 中实现了指标可视化。这套方案为监控 Elasticsearch 数据库的健康状态和性能表现提供了有力支持。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/_category_.yml b/docs/zh/preview/kubeblocks-for-elasticsearch/_category_.yml new file mode 100644 index 00000000..5df0b8e4 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for Elasticsearch 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_category_.yml new file mode 100644 index 00000000..cd891c2b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 (tpl) +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..cb766d69 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_create-cluster.mdx @@ -0,0 +1,65 @@ +KubeBlocks 采用声明式方式管理 Elasticsearch 集群。 +以下是一个部署多角色副本 Elasticsearch 集群的配置示例: + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-multinode + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: dit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # 使用 `roles` 键指定该组件承担的角色 + roles: data,ingest,transform + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + configs: + - name: es-cm + variables: + # 使用 `roles` 键指定该组件承担的角色 + roles: master + replicas: 3 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..3d50a8f2 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续操作之前,请确保满足以下条件: +- 环境准备: + - Kubernetes 集群已启动并正常运行。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装指南进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..96b1e2a3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-elasticsearch/_tpl/_verify-cluster.mdx @@ -0,0 +1,36 @@ +监控集群状态直至其转为运行中(Running)状态: +```bash +kubectl get cluster es-multinode -n demo -w +``` + +预期输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +es-multinode Delete Creating 10s +es-multinode Delete Updating 41s +es-multinode Delete Running 42s +``` + +检查Pod状态及其角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=es-multinode -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +es-multinode-dit-0 3/3 Running 0 6m21s +es-multinode-dit-1 3/3 Running 0 6m21s +es-multinode-dit-2 3/3 Running 0 6m21s +es-multinode-master-0 3/3 Running 0 6m21s +es-multinode-master-1 3/3 Running 0 6m21s +es-multinode-master-2 3/3 Running 0 6m21s +``` + +当集群状态显示为Running时,您的Elasticsearch集群即可投入使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/01-overview.mdx b/docs/zh/preview/kubeblocks-for-kafka/01-overview.mdx new file mode 100644 index 00000000..c780d8eb --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/01-overview.mdx @@ -0,0 +1,49 @@ +--- +description: 了解KubeBlocks Kafka插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本信息。 +keywords: +- Kafka +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks Kafka 插件概述 +--- +# KubeBlocks Kafka 插件概述 + +Apache Kafka 是一个分布式流处理平台,专为构建实时数据管道而设计,可用作消息代理或替代大数据应用中的日志聚合解决方案。 + +- **Broker** 是 Kafka 服务器,负责存储数据并处理生产者和消费者的请求。Kafka 集群由多个 broker 组成,每个 broker 通过唯一 ID 标识。Broker 协同工作以实现数据在集群中的分发和复制。 +- **KRaft** 于 2022 年 10 月在 Kafka 3.3.1 版本中引入,作为 Zookeeper 的替代方案。部分 broker 被指定为控制器(controller),这些控制器提供原先由 Zookeeper 实现的共识服务。 + +## 核心特性 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 Kafka 运维: + +| 特性 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 增减副本数量以调整容量 | +| **垂直扩展** | 调整 Kafka 实例的 CPU/内存资源 | +| **存储卷扩容** | 动态增加存储容量且无需停机 | +| **重启操作** | 以最小影响实现受控的集群重启 | +| **启动/停止** | 临时暂停/恢复集群运行 | +| **自定义服务** | 暴露专用的数据库访问端点 | +| **副本管理** | 安全地移除或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 部署位置和资源分配 | +| **监控** | 集成 Prometheus 指标采集 | +| **日志** | 通过 Loki Stack 实现集中式日志管理 | + +### 支持版本 + +KubeBlocks Kafka 插件支持以下 Kafka 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 3.x | 3.3.2 | +| 2.x | 2.7.0 | \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-kafka/02-quickstart.mdx new file mode 100644 index 00000000..10d526d0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/02-quickstart.mdx @@ -0,0 +1,470 @@ +--- +description: 使用KubeBlocks部署和管理Kafka副本集集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- Kafka +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: Kafka 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Kafka 快速入门 + +本指南提供了使用 **KubeBlocks Kafka 插件** 部署和管理 Kafka 副本集集群的完整流程,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 操作管理(包括启动/停止流程) +- 连接方法与集群监控 + +## 前提条件 + +### 系统要求 + +开始前请确保您的环境满足以下要求: + +- 可正常运行的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装并配置好集群访问权限的 `kubectl` v1.21+ 工具 +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 Kafka 插件 + +Kafka 插件默认包含在 KubeBlocks 中。检查其状态: + +```bash +helm list -n kb-system | grep kafka +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-kafka kb-system 1 2025-05-21 deployed kafka-1.0.0 +``` +
+ +如果插件未启用,请选择以下安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难或缓慢,可使用以下备用仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/kafka --versions + # 安装指定版本(将 替换为您选择的版本号) + helm upgrade -i kb-addon-kafka kubeblocks-addons/kafka --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search kafka + # 安装指定版本插件(将 替换为您选择的版本号) + kbcli addon install kafka --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + kafka 0.9.0 kubeblocks + kafka 0.9.1 kubeblocks + kafka 1.0.0 kubeblocks + ``` + 启用或禁用插件: + + ```bash + # 启用插件 + kbcli addon enable kafka + # 禁用插件 + kbcli addon disable kafka + ``` + + + + +:::note +**版本兼容性** + +请始终确保 Kafka 插件版本与您的 KubeBlocks 主版本相匹配,以避免兼容性问题。 + +::: + +## 部署 Kafka 集群 + +使用默认配置部署基础 Kafka 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/kafka/cluster-separated.yaml +``` + +该操作将创建: +- 一个包含 3 个组件的 Kafka 集群:1 个副本的 Kafka 控制器、1 个副本的 Kafka 代理和 1 个副本的 Kafka 导出器 +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + # 指定删除集群时的行为策略 + # 有效选项:[DoNotTerminate, Delete, WipeOut](KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`:阻止删除集群,确保所有资源保持完整 + # - `Delete`:在 `Halt` 策略基础上同时移除 PVC,实现包含持久化数据的彻底清理 + # - `WipeOut`:激进策略,删除包括外部存储中的卷快照和备份在内的所有集群资源,将导致数据完全删除,应谨慎使用(主要用于非生产环境以避免不可逆数据丢失) + terminationPolicy: Delete + # 指定创建集群时使用的 ClusterDefinition 名称 + # 注意:请勿修改此字段 + # 值必须为 `kafaka` 才能创建 Kafka 集群 + clusterDef: kafka + # 指定创建集群时使用的 ClusterTopology 类型 + # - combined:Kafka 控制器(KRaft)与代理合并为单一组件 + # - combined_monitor:合并模式并包含监控组件 + # - separated:KRaft 与代理分离为独立组件 + # - separated_monitor:分离模式并包含监控组件 + # 有效选项:[combined,combined_monitor,separated,separated_monitor] + topology: separated_monitor + # 定义组成集群的各个组件的详细配置列表 + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP # 用于设置代理堆内存的 ENV + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP # 用于设置控制器堆内存的 ENV + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + # 是否启用直接 Pod IP 访问模式 + # - 设为 'true' 时,Kafka 客户端将直接通过 Pod IP 连接代理 + # - 设为 'false' 时,客户端将通过 Headless Service 的 FQDN 连接代理 + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + +更多 API 字段说明,请参阅 [API 参考文档](../user_docs/references/api-reference/cluster)。 + +## 验证集群状态 + +当部署一个包含3个副本的Kafka集群时,可通过以下方式确认部署成功: + +1. 集群状态为`Running` +2. 所有Pod均正常运行 + +可通过以下任一方法检查状态: + + + +```bash +kubectl get cluster kafka-separated-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +kafka-separated-cluster kafka Delete Running 2m48s + +kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 2m33s +kafka-separated-cluster-kafka-controller-0 2/2 Running 0 2m58s +kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 2m9s +``` + + + + +若已安装`kbcli`,可查看完整的集群信息: + +```bash +kbcli cluster describe kafka-separated-cluster -n demo + +名称: kafka-separated-cluster 创建时间: 2025年5月19日 16:56 UTC+0800 +命名空间 集群定义 拓扑结构 状态 终止策略 +demo kafka separated_monitor Running Delete + +访问端点: +组件 内部地址 外部地址 +kafka-broker kafka-separated-cluster-kafka-broker-advertised-listener-0.demo.svc.cluster.local:9092 <无> + +拓扑结构: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +kafka-broker 3.3.2 kafka-separated-cluster-kafka-broker-0 <无> Running zone-x x.y.z 2025年5月19日 16:57 UTC+0800 +kafka-controller 3.3.2 kafka-separated-cluster-kafka-controller-0 <无> Running zone-x x.y.z 2025年5月19日 16:56 UTC+0800 +kafka-exporter 1.6.0 kafka-separated-cluster-kafka-exporter-0 <无> Running zone-x x.y.z 2025年5月19日 16:57 UTC+0800 + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +kafka-controller 500m / 500m 512Mi / 512Mi 元数据:1Gi <无> +kafka-broker 500m / 500m 512Mi / 512Mi 数据:20Gi + 元数据:1Gi +kafka-exporter 100m / 500m 200GB / 1Gi <无> <无> + +镜像信息: +组件 组件定义 镜像 +kafka-controller kafka-controller-1.0.0 docker.io/bitnami/kafka:3.3.2-debian-11-r54 + docker.io/bitnami/jmx-exporter:0.18.0-debian-11-r20 +kafka-broker kafka-broker-1.0.0 docker.io/bitnami/kafka:3.3.2-debian-11-r54 + docker.io/bitnami/jmx-exporter:0.18.0-debian-11-r20 +kafka-exporter kafka-exporter-1.0.0 docker.io/bitnami/kafka-exporter:1.6.0-debian-11-r67 + +查看集群事件: kbcli cluster list-events -n demo kafka-separated-cluster +``` + + + + +## 访问 Kafka 集群 + +**步骤 1. 获取 Kafka 服务的地址** +```bash +kubectl get svc -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +预期输出: +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.131.175 9092/TCP 5m8s +``` +服务名称为 `kafka-separated-cluster-kafka-broker-advertised-listener-0`,位于命名空间 `demo` 中。 + +**步骤 2. 通过端口号连接 Kafka 集群** + +1. 启动客户端 Pod。 + + ```bash + kubectl run kafka-producer --restart='Never' --image docker.io/bitnami/kafka:3.3.2-debian-11-r54 --command -- sleep infinity + kubectl run kafka-consumer --restart='Never' --image docker.io/bitnami/kafka:3.3.2-debian-11-r54 --command -- sleep infinity + ``` + +2. 登录到 kafka-producer。 + + ```bash + kubectl exec -ti kafka-producer -- bash + ``` + +3. 创建主题。 + + ```bash + kafka-topics.sh --create --topic quickstart-events --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + +4. 创建生产者。 + + ```bash + kafka-console-producer.sh --topic quickstart-events --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + +5. 输入:"Hello, KubeBlocks" 并按回车键。 + +6. 开启新的终端会话并登录到 kafka-consumer。 + + ```bash + kubectl exec -ti kafka-consumer -- bash + ``` + +7. 创建消费者并指定消费主题,从起始位置消费消息。 + + ```bash + kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server kafka-separated-cluster-kafka-broker-advertised-listener-0.demo:9092 + ``` + + 此时您将看到输出 'Hello, KubeBlocks'。 + +## 停止 Kafka 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)会被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运行成本降低 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/kafka/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-stop + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + +## 启动 Kafka 集群 + +重启已停止的集群将恢复运行,所有数据和配置保持不变。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前的状态 + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-start + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设置为 false 来重启集群: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + +## 删除 Kafka 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略 | 删除的资源 | 数据清除情况 | 适用场景 | +|-----------------|---------------------|--------------------|------------------------| +| DoNotTerminate | 无 | 保留所有数据 | 关键生产环境集群 | +| Delete | 所有Kubernetes资源 | 删除PVC存储卷 | 非关键环境 | +| WipeOut | 所有资源 | 彻底清除所有数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份数据 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保已存在必要的备份 +3. 验证terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster kafka-separated-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster kafka-separated-cluster -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..ced49b54 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,322 @@ +--- +description: 了解如何在KubeBlocks中管理Kafka集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- Kafka +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: Kafka 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Kafka 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 Kafka 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运营成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|--------------------------| +| 停止 | 暂停集群,保留存储 | 节省成本、维护期间 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重新创建组件 Pod | 配置变更、故障排查 | + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Kafka 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 Kafka 集群将执行以下操作: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 保持集群配置不变 + +此操作适用于以下场景: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + + 选项1:使用 OpsRequest API + + 创建停止操作请求: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-stop-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: Stop + ``` + + + + + 选项2:使用 使用 Cluster API Patch + + 通过修改 stop 字段直接调整集群配置: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + + + +### 验证集群停止 + +确认停止操作成功执行: + +1. 检查集群状态转换: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Stopping 16m3s + kafka-separated-cluster kafka Delete Stopped 16m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=kafka-separated-cluster -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-kafka-separated-cluster-kafka-broker-0 Bound pvc-ddd54e0f-414a-49ed-8e17-41e9f5082af1 20Gi RWO standard 14m + metadata-kafka-separated-cluster-kafka-broker-0 Bound pvc-d63b7d80-cac5-41b9-b694-6a298921003b 1Gi RWO standard 14m + metadata-kafka-separated-cluster-kafka-controller-0 Bound pvc-e6263eb1-405a-4090-b2bb-f92cca0ba36d 1Gi RWO standard 14m + ``` + +### 启动集群 + +启动已停止的 Kafka 集群将: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + + 发起启动操作请求: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-start-ops + namespace: demo + spec: + # 指定此操作目标集群资源的名称 + clusterName: kafka-separated-cluster + type: Start + ``` + + + + + + 修改集群配置以恢复运行: + 1. 设置 stop: false,或 + 2. 完全移除 stop 字段 + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + + + +### 验证集群启动 + +确认启动操作成功执行: + +1. 检查集群状态转换: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Updating 24m + kafka-separated-cluster kafka Delete Running 24m + kafka-separated-cluster kafka Delete Running 24m + ``` + +2. 验证 Pod 重建情况: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + kafka-separated-cluster-kafka-broker-0 2/2 Running 0 2m4s + kafka-separated-cluster-kafka-controller-0 2/2 Running 0 104s + kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 84s + ``` + +### 重启集群 + +重启操作提供以下特性: +- 无需完全停止集群即可重建 Pod +- 组件级细粒度控制 +- 最小化服务中断 + +适用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**检查组件** + +Milvus 集群包含五个组件。获取组件列表: +```bash +kubectl get cluster -n demo kafka-separated-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +预期输出: +```text +kafka-controller +kafka-broker +kafka-exporter +``` + +**通过 OpsRequest API 重启代理** + +列出需要重启的特定组件: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: kafka-separated-cluster-restart-ops + namespace: demo +spec: + clusterName: kafka-separated-cluster + type: Restart + restart: + - componentName: kafka-broker +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest kafka-separated-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 1/1 22s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Running 1/1 23s + kafka-separated-cluster-restart-ops Restart kafka-separated-cluster Succeed 1/1 23s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳。只有属于组件 `kafka-broker` 的 Pod 会被重启。 + +操作完成后,集群将恢复至 Running 状态。 + +## 概述 +在本指南中,您学习了如何: +1. **停止 Kafka 集群**:暂停集群运行同时保留持久化存储 +2. **启动已停止的集群**:将集群重新恢复在线状态 +3. **重启特定集群组件**:在不停止整个集群的情况下重建目标组件的 Pod + +通过管理 Kafka 集群的生命周期,您可以优化资源利用率、降低成本,并在 Kubernetes 环境中保持运维灵活性。KubeBlocks 提供了无缝执行这些操作的能力,在确保高可用性的同时将业务中断降至最低。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..5bb8f427 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,178 @@ +--- +description: 了解如何在KubeBlocks管理的Kafka集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- Kafka +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: Kafka 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks对Kafka集群进行垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的Kafka集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改Kafka实例的计算资源(CPU和内存)但保持副本数不变。主要特点: + +- **无中断性**:正确配置时,可在扩缩容期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:根据需要可随时扩容或缩容 + +KubeBlocks通过遵循受控的、角色感知的更新策略确保扩缩操作影响最小化: + +**角色感知副本(主/从副本)** +- 从副本优先更新 - 先升级非领导者Pod以最小化影响 +- 主副本最后更新 - 仅当所有从副本健康后才重启主Pod +- 集群状态在所有副本稳定后从"更新中"转为"运行中" + +**无角色副本(基于序号的扩缩容)** +若副本未定义角色,则按Kubernetes Pod序号顺序更新: +- 从最高序号开始(如pod-2 → pod-1 → pod-0)以确保确定性滚动更新 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Kafka集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. Pod按序号从高到低顺序更新(如pod-2 → pod-1 → pod-0) +1. 集群状态从"更新中"转为"运行中" + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为kafka-broker组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-vscale-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: VerticalScaling + verticalScaling: + - componentName: kafka-broker + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + 可通过以下命令查看扩缩容进度: + + ```bash + kubectl -n demo get ops kafka-separated-cluster-vscale-ops -w + ``` + + 预期结果: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 0/1 12s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 1/1 13s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Running 1/1 13s + kafka-separated-cluster-vscale-ops VerticalScaling kafka-separated-cluster Succeed 1/1 13s + ``` + + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段直接调整资源: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + requests: + cpu: "1" # 按需调整资源 + memory: "1Gi" # 按需调整资源 + limits: + cpu: "1" # 按需调整资源 + memory: "1Gi" # 按需调整资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 选择维护窗口或低流量时段进行扩缩容 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有其他操作正在进行 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩后:** +- 监控资源利用率和应用性能 +- 根据需要调整Kafka参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe kafka-separated-cluster -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +kafka-broker 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩容的核心优势 +- 无缝扩缩容:按特定顺序重建Pod确保最小影响 +- 动态资源调整:根据工作负载需求灵活调整CPU和内存 +- 灵活性:可选择OpsRequest动态扩缩或直接API更新精确控制 +- 高可用性:扩缩过程中集群保持可操作状态 + +## 清理 +删除Kafka集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您学会了如何: +1. 部署由KubeBlocks管理的Kafka集群 +2. 通过增减kafka组件资源进行垂直扩缩容 +3. 使用OpsRequest和直接Cluster API两种方式调整资源分配 + +垂直扩缩容是优化资源利用率和适应工作负载变化的强大工具,可确保您的Kafka集群保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..3e7b4726 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,230 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的Kafka集群执行水平扩缩容(横向扩展与收缩)。 +keywords: +- KubeBlocks +- Kafka +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用 KubeBlocks 实现 Kafka 集群的水平扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现Kafka集群水平扩缩容 + +本指南将介绍如何对KubeBlocks管理的Kafka集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用**OpsRequest**和直接修改**Cluster API**两种方式实现这一目标。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Kafka集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## 扩容(增加副本数) + +**预期工作流程**: + +1. 新Pod被创建,状态从`Pending`转为`Running` +2. 集群状态从`Updating`变为`Running` + + + + + + 选项1:使用水平扩容OpsRequest + + 为kafka组件增加1个副本实现扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-scale-out-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + # 指定组件扩容的副本数变更 + scaleOut: + # 指定组件的副本数变更 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops kafka-separated-cluster-scale-out-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Running 0/1 9s + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Running 1/1 16s + kafka-separated-cluster-scale-out-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 16s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 2 # 增加副本数实现扩容 + ... + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/1/replicas", "value": 2}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新Pod被创建,Kafka集群状态从`Updating`变为`Running`,且新建Pod具有`secondary`角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 3m7s +kafka-separated-cluster-kafka-broker-1 2/2 Running 0 28s +``` + +## 缩容(减少副本数) + +**预期工作流程**: + +1. 选择序号最大的副本进行移除 +2. Pod被优雅终止 +3. 集群状态从`Updating`变为`Running` + + + + + + 选项1:使用水平缩容OpsRequest + + 为kafka组件减少1个副本实现缩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-scale-in-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + # 指定组件缩容的副本数变更 + scaleIn: + # 指定组件的副本数变更 + # 当前组件减少1个副本 + replicaChanges: 1 + ``` + + 监控操作进度: + ```bash + kubectl get ops kafka-separated-cluster-scale-in-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Running 1/1 24s + kafka-separated-cluster-scale-in-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 24s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 1 # 减少副本数实现缩容 + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/1/replicas", "value": 1}]' + ``` + + + + +### 验证缩容结果 + +示例输出(仅剩1个Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 5m7s +``` + +## 最佳实践 + +执行水平扩缩容时建议: +- 尽可能选择业务低峰期进行操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够的资源配额 +- 考虑新增副本的存储需求 + +## 清理资源 +删除Kafka集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +通过本指南您学会了如何: +- 执行扩容操作为Kafka集群增加副本 +- 执行缩容操作为Kafka集群减少副本 +- 使用OpsRequest和直接修改Cluster API两种方式进行水平扩缩容 + +KubeBlocks能确保在最小化影响数据库服务的前提下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..f0f14899 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/04-volume-expansion.mdx @@ -0,0 +1,259 @@ +--- +description: 了解如何在KubeBlocks管理的Kafka集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- Kafka +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: Kafka 集群存储卷扩容 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 扩展 Kafka 集群存储卷 + +本指南介绍如何在 **KubeBlocks** 管理的 Kafka 集群中扩展持久卷声明(PVC)。存储卷扩展支持动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持此功能时,该操作可在不中断服务的情况下执行。 + +存储卷扩展允许您在创建持久卷声明(PVC)后增加其容量。该功能在 Kubernetes v1.11 中引入,并在 Kubernetes v1.24 版本正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持卷扩展 + +列出所有可用存储类,通过检查 `ALLOWVOLUMEEXPANSION` 字段确认是否支持卷扩展: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +请确保您使用的存储类 `ALLOWVOLUMEEXPANSION` 设置为 true。若为 false,则表示该存储类不支持卷扩展。 + +## 使用 StorageClass 部署 Kafka 集群 + +KubeBlocks 采用声明式方式管理 Kafka 集群。以下是一个部署 3 副本 Kafka 集群的配置示例。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: kafka + topology: separated_monitor + componentSpecs: + - name: kafka-broker + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + +**关键字段说明** +- `storageClassName`: 指定支持卷扩展的 `StorageClass` 名称。若未设置,将使用标记为 `default` 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时请确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 + +::: + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 扩展存储卷 + +:::note +1. 确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`) +2. 新容量必须大于当前容量 +3. 根据存储提供商不同,卷扩展可能需要额外配置 +::: + +您可以通过以下两种方式扩展存储卷: + + + + + 方法一:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 kafka 组件增加存储容量: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: kafka-broker + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩展进度: + + ```bash + kubectl describe ops kafka-separated-cluster-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 完成后,PVC 容量将更新。 + + :::note + 如果使用的存储类不支持卷扩展,此 OpsRequest 会快速失败并提示: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 方法二:直接更新 Cluster API + + 您也可以直接更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段至目标容量。 + + ```yaml + componentSpecs: + - name: kafka-broker + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量,确保大于当前容量 + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证 + +检查更新后的集群配置: +```bash +kbcli cluster describe kafka-separated-cluster -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +kafka-broker 500m / 500m 512Mi / 512Mi data:30Gi +``` +数据 PVC 的存储容量已更新至指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS +data-kafka-separated-cluster-kafka-broker-0 Bound pvc-uuid 30Gi RWO +data-kafka-separated-cluster-kafka-broker-1 Bound pvc-uuid 30Gi RWO +``` + +## 清理 +删除所有创建的资源,包括 Kafka 集群及其命名空间: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +在本指南中您学习了如何: +1. 验证存储类对卷扩展的兼容性 +2. 通过以下方式执行卷扩展: + - 使用 OpsRequest 进行动态更新 + - 通过 Cluster API 进行手动更新 +3. 验证更新后的 PVC 容量并确保扩容操作完成 + +通过存储卷扩展,您可以高效扩展 Kafka 集群的存储容量而无需中断服务,确保数据库能够随着应用需求增长而扩展。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..195cb745 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,322 @@ +--- +description: 了解如何在KubeBlocks中通过负载均衡器及其他服务类型配置和管理Kafka服务,实现内外部访问。 +keywords: +- KubeBlocks +- Kafka +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理Kafka服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建和销毁Kafka服务 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 Kafka 服务 + +本指南提供了逐步操作说明,指导如何通过 KubeBlocks 管理 Kafka 服务的内外部暴露。您将学习到: +- 使用云服务商 LoadBalancer 服务配置外部访问 +- 管理内部服务 +- 在不需要时正确关闭外部暴露功能 + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Kafka 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 查看网络服务 +列出为 Kafka 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +示例服务列表: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.101.247 9092/TCP 19m +``` + +说明: +- 该命令会筛选出标签 `app.kubernetes.io/instance=kafka-separated-cluster` 且位于 `demo` 命名空间下的所有服务 +- 示例输出显示了一个 Kafka broker 的广告监听器服务,类型为 ClusterIP,内部端口为 9092 +- 服务名称遵循 `<集群名称>-<组件类型>-<功能描述>-<序号>` 的命名规范 + +## 暴露 Kafka 服务 + +外部服务地址允许公网访问 Kafka,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云服务成本 | 安全性 | +|---------------|--------------------|------------|-------------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试环境 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 对外暴露 Kafka 服务,创建 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: kafka-separated-cluster + expose: + - componentName: kafka-broker + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 有效选项为 'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + ports: + - name: kafka-client + port: 9092 + targetPort: kafka-client + # 当 ServiceType 为 LoadBalancer 时包含云服务商相关参数 + # 以下是 AWS EKS 的示例配置 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops kafka-separated-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-expose-enable-ops Expose kafka-separated-cluster Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分添加 LoadBalancer 服务配置: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: kafka-separated-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: kafka + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + componentSelector: kafka-broker + name: kafka-internet + serviceName: kafka-internet + spec: # 定义 K8s 服务行为 + ipFamilyPolicy: PreferDualStack + ports: + - name: kafka-client + # 暴露端口 + port: 9092 # 15672 端口用于 kafka 管理控制台 + protocol: TCP + targetPort: kafka-client + type: LoadBalancer + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分新增了一个外部服务。该 LoadBalancer 服务包含了 AWS 网络负载均衡器 (NLB) 的注解。 + + :::note + 云服务商注解说明 + + 使用 LoadBalancer 服务时,必须添加对应云服务商的特定注解。以下是常用云服务商的注解示例: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为 "false" 表示面向互联网的负载均衡器 + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为 "false" 表示面向互联网的负载均衡器 + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制负载均衡器仅限内部 VPC 访问。未指定时默认为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网负载均衡器的优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为 "intranet" 表示内部负载均衡器 + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制负载均衡器是内部还是面向互联网。注意该注解在服务创建后无法动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + ``` + 如果在服务创建后将此注解从 "false" 改为 "true",注解可能在服务对象中会更新,但负载均衡器仍会保留其公网 IP。 + + 正确修改该行为的步骤: + - 首先删除现有的负载均衡器服务 + - 使用更新后的注解重新创建服务(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新负载均衡器分配正确的内部或外部 IP + ::: + + + 使用以下命令等待集群状态变为 Running: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kafka-separated-cluster-kafka-broker-advertised-listener-0 ClusterIP 10.96.101.247 9092/TCP 24m +kafka-separated-cluster-kafka-broker-internet LoadBalancer 10.96.180.189 9092:31243/TCP 59s +``` + +## 禁用外部访问 + + + + + + 方法一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + expose: + - componentName: kafka-broker + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops kafka-separated-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-expose-disable-ops Expose kafka-separated-cluster Succeed 1/1 16s + ``` + + + + + + 方法二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster kafka-separated-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster kafka-separated-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + kafka-separated-cluster kafka Delete Running 26m + ``` + + + +### 验证服务移除 + +确保 'kafka-separated-cluster-kafka-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +预期结果:'kafka-separated-cluster-kafka-internet' 服务应被移除。 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除Kafka集群及其命名空间: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 对外部或内部暴露 Kafka 服务 +- 通过云服务商特定注解配置 LoadBalancer 类型的服务 +- 通过 OpsRequest 或直接更新 Cluster API 来启用/禁用服务,从而管理外部访问 + +KubeBlocks 为 Kubernetes 环境中的 Kafka 服务管理提供了灵活且简化的解决方案。 diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..3fdc33e9 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,161 @@ +--- +description: 了解如何对由KubeBlocks管理的Kafka集群中特定Pod执行下线(停用)操作。 +keywords: +- KubeBlocks +- Kafka +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 下线 Kafka 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的Kafka集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 管理的 Kafka 集群中的特定 Pod + +本文档介绍如何在 KubeBlocks 管理的 Kafka 集群中下线(停用)特定 Pod。通过精确控制 Pod 下线,可以在保持服务可用性的同时实现资源管理,适用于工作负载重平衡、节点维护或故障处理等场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法指定下线特定 Pod。StatefulSet 会严格保证 Pod 的顺序和身份标识,缩容操作总是优先移除序号最大的 Pod(例如从 3 个副本缩容时,会先移除 `Pod-2`)。这种限制使得无法精确控制下线目标,给维护工作、负载分配和故障处理带来不便。 + +KubeBlocks 突破了这一限制,允许管理员直接指定需要下线的 Pod。这种细粒度控制能力在保障高可用性的同时,实现了更优的资源管理。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Kafka 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 下线指定 Pod + +**预期工作流程**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转变为 `Running` + +在下线组件中的特定 Pod 前,请确保该组件拥有多个副本。如果不符合条件,请先进行扩容操作。 + +例如,可以通过以下命令修改集群 CR,将 querynode 组件的副本数设置为 3: + +```bash +kubectl patch cluster kafka-separated-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/1/replicas", + "value": 3 + } +]' +``` + +等待所有 Pod 进入运行状态: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` +预期输出: +``` +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 18m +kafka-separated-cluster-kafka-broker-1 2/2 Running 0 3m33m +kafka-separated-cluster-kafka-broker-2 2/2 Running 0 2m1s +``` + +如需下线特定 Pod(例如 'kafka-separated-cluster-kafka-broker-1'),可采用以下任一方法: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 标记需要下线的 Pod: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: kafka-separated-cluster-decommission-ops + namespace: demo + spec: + clusterName: kafka-separated-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: kafka-broker + scaleIn: + onlineInstancesToOffline: + - 'kafka-separated-cluster-kafka-broker-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作执行状态: + + ```bash + kubectl get ops kafka-separated-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Running 0/1 8s + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Running 1/1 31s + kafka-separated-cluster-decommission-ops HorizontalScaling kafka-separated-cluster Succeed 1/1 31s + ``` + + + + + + 方法二:使用 Cluster API + + 也可以直接更新 Cluster 资源来下线指定 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: kafka-broker + replicas: 2 # 下线后的预期副本数 + offlineInstances: + - kafka-separated-cluster-kafka-broker-1 # <----- 指定需要下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=kafka-separated-cluster,apps.kubeblocks.io/component-name=kafka-broker +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 24m +kafka-separated-cluster-kafka-broker-2 2/2 Running 0 2m1s +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向 Pod 下线 +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保障服务可用性的同时,提供了更精细的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-kafka/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..0b325f06 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,249 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为Kafka集群配置可观测性。设置监控并通过Grafana实现指标可视化。 +keywords: +- KubeBlocks +- Kafka +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: Kafka 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 Kafka 集群的可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 Kafka + +本指南演示如何在 KubeBlocks 中为 Kafka 集群配置全面的监控方案: + +1. 使用 Prometheus Operator 收集指标 +2. 通过内置 Kafka Exporter 暴露指标 +3. 使用 Grafana 进行可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 安装监控组件栈 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 Kafka 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 配置指标收集 + +### 1. 获取 Exporter 详情 + +```bash +kubectl get po -n demo kafka-separated-cluster-kafka-broker-0 -oyaml | yq '.spec.containers[] | select(.name=="jmx-exporter") | .ports' +``` + +
+示例输出: + +```text +- containerPort: 5556 + name: metrics + protocol: TCP +``` + +
+ +```bash + kubectl get po -n demo kafka-separated-cluster-kafka-exporter-0 -oyaml | yq '.spec.containers[] | select(.name=="kafka-exporter") | .ports' +``` + +
+示例输出: + +```text +- containerPort: 9308 + name: metrics + protocol: TCP +``` +
+ +### 2. 验证 Exporter 端点 + +检查 jmx-exporter: +```bash +kubectl -n demo exec -it pods/kafka-separated-cluster-kafka-broker-0 -- \ + curl -s http://127.0.0.1:5556/metrics | head -n 50 +``` + +检查 kafka-exporter: + +```bash +kubectl -n demo exec -it pods/kafka-separated-cluster-kafka-broker-0 -- \ + curl -s http://kafka-separated-cluster-kafka-exporter-0.kafka-separated-cluster-kafka-exporter-headless.demo.svc:9308/metrics | head -n 50 +``` + +### 2. 创建 PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: kafka-jmx-pod-monitor + namespace: demo + labels: # 需匹配 `prometheus.spec.podMonitorSelector` 中的标签 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # 定义从关联的 Kubernetes Pod 对象传输到指标数据的标签 + # 根据实际需求设置标签 + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/instance: kafka-separated-cluster +``` +**PodMonitor 配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须匹配 exporter 端口名称 ('http-metrics') | +| `namespaceSelector` | 是 | 指定 Kafka 运行的命名空间 | +| `labels` | 是 | 必须匹配 Prometheus 的 podMonitorSelector | +| `path` | 否 | 指标端点路径 (默认: /metrics) | +| `interval` | 否 | 抓取间隔 (默认: 30s) | + + +## 验证监控配置 + +### 1. 检查 Prometheus 目标 +端口转发并访问 Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +浏览器访问: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的抓取任务(任务名应为 'demo/kafka-separated-cluster-pod-monitor')。 + +预期状态: +- 目标状态应为 UP +- 目标标签应包含 podTargetLabels 中定义的标签(如 'app_kubernetes_io_instance') + +### 2. 测试指标收集 +验证指标是否被正确抓取: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="kafka-separated-cluster"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "kafka-separated-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "kafka-broker", + "apps_kubeblocks_io_pod_name": "kafka-separated-cluster-kafka-broker-2", + "container": "jmx-exporter", + "endpoint": "metrics", + "instance": "10.244.0.236:5556", + "job": "kubeblocks", + "namespace": "demo", + "pod": "kafka-separated-cluster-kafka-broker-2" + }, + "value": [ + 1747654851.995, + "1" + ] + }, +... // 更多省略行 +``` +## Grafana 可视化 + +### 1. 访问 Grafana +端口转发并登录: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +浏览器访问 `http://localhost:3000`,使用默认凭证登录: +- 用户名: 'admin' +- 密码: 'prom-operator' (默认) + +### 2. 导入仪表板 +导入 KubeBlocks Kafka 仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 从 [Grafana Kafka Dashboard](https://github.com/apecloud/kubeblocks-addons/tree/main/addons/kafka/dashboards) 导入仪表板 + +![kafka-jmx-monitoring-grafana-dashboard.png](/img/docs/en/kafka-jmx-monitoring-grafana-dashboard.png) +图 1. Kakfa jmx 仪表板 + + +![kafka-monitoring-grafana-dashboard.png](/img/docs/en/kafka-monitoring-grafana-dashboard.png) +图 2. Kafka exporter 仪表板 + + +## 清理资源 +执行以下命令删除所有创建的资源: +```bash +kubectl delete cluster kafka-separated-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor kafka-separated-cluster-pod-monitor -n demo +``` + +## 总结 +本教程演示了如何在 KubeBlocks 中使用 Prometheus Operator 为 Kafka 集群建立可观测性方案。通过配置 `PodMonitor`,我们实现了 Prometheus 对 Kafka exporter 指标的自动抓取,并最终在 Grafana 中实现了指标可视化。这套方案为监控 Kafka 数据库的健康状态和性能表现提供了有力支持。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-kafka/_category_.yml b/docs/zh/preview/kubeblocks-for-kafka/_category_.yml new file mode 100644 index 00000000..966aa772 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for Kafka 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-kafka/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_category_.yml new file mode 100644 index 00000000..cd891c2b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 (tpl) +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-kafka/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..e9b0cf0a --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_create-cluster.mdx @@ -0,0 +1,82 @@ +KubeBlocks 采用声明式方式管理 Kafka 集群。以下是一个包含 3 个组件的 Kafka 集群部署配置示例: + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: kafka-separated-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: kafka + topology: separated_monitor + componentSpecs: + - name: kafka-broker + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + env: + - name: KB_KAFKA_BROKER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_KAFKA_CONTROLLER_HEAP + value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64" + - name: KB_BROKER_DIRECT_POD_ACCESS + value: "true" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-controller + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: metadata + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + - name: kafka-exporter + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "1Gi" + requests: + cpu: "0.1" + memory: "0.2Gi" +``` + +:::note + +这三个组件将严格按照 `ClusterDefinition` 中定义的 `controller->broker->exporter` 顺序创建。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..1cd7d2d5 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续操作之前,请确保满足以下条件: +- 环境准备: + - Kubernetes 集群已启动并正常运行。 + - 已配置 kubectl CLI 工具与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装指南进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..90537077 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-kafka/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster kafka-separated-cluster -n demo -w +``` + +预期输出: + +```bash +kubectl get cluster kafka-separated-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +kafka-separated-cluster kafka Delete Creating 13s +kafka-separated-cluster kafka Delete Running 63s +``` + +检查 Pod 状态及其角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=kafka-separated-cluster -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +kafka-separated-cluster-kafka-broker-0 2/2 Running 0 13m +kafka-separated-cluster-kafka-controller-0 2/2 Running 0 13m +kafka-separated-cluster-kafka-exporter-0 1/1 Running 0 12m +``` + +当集群状态显示为 Running 时,表示您的 Kafka 集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/01-overview.mdx b/docs/zh/preview/kubeblocks-for-milvus/01-overview.mdx new file mode 100644 index 00000000..c4ac76c7 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/01-overview.mdx @@ -0,0 +1,88 @@ +--- +description: 了解KubeBlocks Milvus插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- Milvus +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks Milvus 插件概述 +--- +# KubeBlocks Milvus 插件概述 + +Milvus 是一款开源(Apache-2.0 许可)的向量数据库,专为支持嵌入相似性搜索和 AI 应用而构建。Milvus 的架构设计用于处理大规模向量数据集,并包含多种部署模式:Milvus 单机模式和分布式模式,以适应不同的数据规模需求。 + +## 核心特性 + +### 支持的拓扑结构 + +Milvus 支持两种部署模式以满足不同规模需求: + +#### 单机模式 + +适用于开发和测试的轻量级部署: + +- **Milvus 核心**:提供向量搜索和数据库功能 +- **元数据存储(ETCD)**:存储集群元数据和配置 +- **对象存储(MinIO/S3)**:持久化向量数据和索引 + +#### 集群模式 + +面向生产工作负载的分布式部署,包含多个专用组件: + +**接入层** + +- 无状态代理,处理客户端连接和请求路由 + +**计算层** + +- 查询节点:执行搜索操作 +- 数据节点:处理数据摄入和压缩 +- 索引节点:构建和维护向量索引 + +**协调层** + +- 根协调器:管理全局元数据 +- 查询协调器:编排查询执行 +- 数据协调器:管理数据分布 +- 索引协调器:监督索引构建 + +**存储层** + +- 元数据存储(ETCD):集群元数据和配置 +- 对象存储(MinIO/S3):持久化向量数据存储 +- 日志存储(Pulsar):用于变更数据捕获的消息队列 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 Milvus 运维: + +| 特性 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 增减副本来调整容量 | +| **垂直扩展** | 调整 Milvus 实例的 CPU/内存资源 | +| **重启操作** | 以最小中断进行受控集群重启 | +| **启动/停止** | 临时暂停/恢复集群操作 | +| **自定义服务** | 暴露专用数据库端点 | +| **副本管理** | 安全地停用或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 放置和资源分配 | +| **监控** | 集成的 Prometheus 指标收集 | +| **日志** | 通过 Loki Stack 实现集中式日志 | + +### 支持版本 + +KubeBlocks Milvus 插件支持以下 Milvus 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 2.3 | 2.3.2 | + +可通过以下命令查看支持的版本列表: +```bash +kubectl get cmpv milvus +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-milvus/02-quickstart.mdx new file mode 100644 index 00000000..268a5429 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/02-quickstart.mdx @@ -0,0 +1,436 @@ +--- +description: 使用KubeBlocks部署和管理Milvus ReplicaSet集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- Milvus +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: Milvus 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Milvus 快速入门 + +本指南提供了使用 **KubeBlocks Milvus 插件** 部署和管理 Milvus ReplicaSet 集群的完整流程,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 运维管理(包括启动/停止流程) +- 连接方式与集群监控 + +## 前提条件 + +### 系统要求 + +开始前请确保环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装 `kubectl` v1.21+ 并配置集群访问权限 +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 Milvus 插件 + +Milvus 插件默认随 KubeBlocks 安装。检查其状态: + +```bash +helm list -n kb-system | grep milvus +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-milvus kb-system 1 2025-05-21 deployed milvus-1.0.0 +``` +
+ +若插件未启用,请选择安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难或缓慢,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/milvus --versions + # 安装指定版本(将 替换为所选版本) + helm upgrade -i kb-addon-milvus kubeblocks-addons/milvus --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 索引默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search milvus + # 安装指定版本插件(将 替换为所选版本) + kbcli addon install milvus --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + milvus 0.9.0 kubeblocks + milvus 0.9.1 kubeblocks + milvus 1.0.0 kubeblocks + ``` + 启用或禁用插件: + + ```bash + # 启用插件 + kbcli addon enable milvus + # 禁用插件 + kbcli addon disable milvus + ``` + + + + +:::note +**版本兼容性** + +请始终确保 Milvus 插件版本与 KubeBlocks 主版本匹配,以避免兼容性问题。 + +::: + +### 验证支持的 Milvus 版本 + +**列出可用 Milvus 版本:** + +```bash +kubectl get cmpv milvus +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +milvus v2.3.2 Available 26d +``` +
+ +### 存储配置 + +Milvus 需要持久化存储。验证可用选项: + +```bash +kubectl get storageclass +``` + +推荐存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩容 +- 满足工作负载的性能需求 + +## 部署 Milvus 集群 + +使用默认设置部署一个基础的 Milvus 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/milvus/cluster-standalone.yaml +``` + +该操作将创建: +- 一个包含 3 个副本的 Milvus 集群,分别用于 milvus、etcd 和 minio 组件 +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-standalone + namespace: demo +spec: + # 指定删除集群时的行为策略 + # 有效选项为:[DoNotTerminate, Delete, WipeOut](自 KB 0.9 起 `Halt` 已弃用) + # - `DoNotTerminate`:阻止集群删除。此策略确保所有资源保持完整 + # - `Delete`:在 `Halt` 策略基础上还会移除 PVC,实现包括持久化数据在内的彻底清理 + # - `WipeOut`:激进策略,会删除包括外部存储中的卷快照和备份在内的所有集群资源。这将导致数据完全删除,应谨慎使用,主要适用于非生产环境以避免不可逆的数据丢失 + terminationPolicy: Delete + # 指定创建集群时使用的 ClusterDefinition 名称 + # 注意:请勿更新此字段 + # 该值必须设为 `milvus` 才能创建 Milvus 集群 + clusterDef: milvus + # 指定创建集群时使用的 ClusterTopology 名称 + # 有效选项为:[standalone,cluster] + topology: standalone + # 指定构成集群的组件规格列表 + # 该字段允许对集群中的每个组件进行详细配置 + componentSpecs: + - name: etcd + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: minio + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: milvus + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +更多 API 字段及描述,请参阅 [API 参考](../user_docs/references/api-reference/cluster)。 + +## 验证集群状态 + +当部署包含3个副本的Milvus集群时,请通过以下方式确认部署成功: + +1. 集群状态为`Running`(运行中) +2. 所有Pod均正常运行 + +可通过以下任一方式检查状态: + + + +```bash +kubectl get cluster milvus-standalone -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 27s +milvus-standalone milvus Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=milvus-standalone -n demo +NAME READY STATUS RESTARTS AGE +milvus-standalone-etcd-0 2/2 Running 0 25m +milvus-standalone-milvus-0 1/1 Running 0 24m +milvus-standalone-minio-0 1/1 Running 0 25m +``` + + + + + 安装`kbcli`后,可查看完整的集群信息: + +```bash +kbcli cluster describe milvus-standalone -n demo + +名称: milvus-standalone 创建时间: 2025年5月19日 11:03 UTC+0800 +命名空间 集群定义 拓扑结构 状态 终止策略 +demo milvus standalone Running Delete + +访问端点: +组件 内部地址 外部地址 + +拓扑结构: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +etcd 3.5.15 milvus-standalone-etcd-0 leader Running zone-x x.y.z 2025年5月19日 11:03 UTC+0800 +milvus v2.3.2 milvus-standalone-milvus-0 <无> Running zone-x x.y.z 2025年5月19日 11:04 UTC+0800 +minio 8.0.17 milvus-standalone-minio-0 <无> Running zone-x x.y.z 2025年5月19日 11:03 UTC+0800 + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +etcd 500m / 500m 512Mi / 512Mi data:10Gi <无> +minio 500m / 500m 512Mi / 512Mi data:10Gi <无> +milvus 500m / 500m 512Mi / 512Mi data:10Gi <无> + +镜像信息: +组件 组件定义 镜像地址 +etcd etcd-3-1.0.0 quay.io/coreos/etcd:v3.5.15 +minio milvus-minio-1.0.0 docker.io/minio/minio:RELEASE.2022-03-17T06-34-49Z +milvus milvus-standalone-1.0.0 docker.io/milvusdb/milvus:v2.3.2 + +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间 + +查看集群事件: kbcli cluster list-events -n demo milvus-standalone +``` + + + + +## 访问 Milvus + +要访问 Milvus 服务,您可以通过创建服务来暴露该服务: + +```bash +kubectl port-forward pod/milvus-standalone-milvus-0 -n demo 19530:19530 +``` + +之后,您可以通过 `localhost:19530` 访问 Milvus 服务。 + +## 停止 Milvus 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)会被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 降低运行成本 + + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-stop + namespace: demo + spec: + clusterName: milvus-standalone + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + + ```bash + kubectl patch cluster milvus-standalone -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/2/stop", + "value": true + } + ]' + ``` + + + +## 启动 Milvus 集群 + +重启已停止的集群可恢复运行,所有数据和配置将保持完整。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前的状态 + + + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-start + namespace: demo + spec: + clusterName: milvus-standalone + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设置为 false 来重启集群: + + ```bash + kubectl patch cluster milvus-standalone -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/2/stop" + } + ]' + ``` + + + +## 删除 Milvus 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略 | 删除的资源 | 删除的数据 | 适用场景 | +|-----------------|-------------------|-------------------|-------------------------| +| DoNotTerminate | 无 | 无 | 关键生产环境集群 | +| Delete | 所有资源 | PVC存储卷 | 非关键环境 | +| WipeOut | 所有资源 | 全部数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份 + +**删除前检查清单:** +1. 确认没有应用程序正在使用该集群 +2. 确保已存在必要的备份 +3. 确认terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster milvus-standalone -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster milvus-standalone -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/03-topologies/01-standlone.mdx b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/01-standlone.mdx new file mode 100644 index 00000000..aad97411 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/01-standlone.mdx @@ -0,0 +1,137 @@ +--- +description: 了解如何使用KubeBlocks部署Milvus单机集群。本指南涵盖配置、验证、故障转移测试及超时设置。 +keywords: +- KubeBlocks +- Redis +- Kubernetes +- High Availability +sidebar_label: Milvus 单机模式集群 +sidebar_position: 1 +title: 使用KubeBlocks部署Milvus单机集群 +--- +# 使用 KubeBlocks 部署 Milvus 单机集群 + +单机模式是一种轻量级部署方案,适用于开发和测试环境,包含以下组件: + +- **Milvus 核心**:提供向量搜索和数据库功能 +- **元数据存储 (ETCD)**:存储集群元数据和配置信息 +- **对象存储 (MinIO/S3)**:持久化向量数据和索引 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Milvus 单机集群 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-standalone + namespace: demo +spec: + terminationPolicy: Delete + # 该值必须为 `milvus` 以创建 Milvus 集群 + clusterDef: milvus + # 有效选项为: [standalone,cluster] + topology: standalone + componentSpecs: + - name: etcd + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: minio + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: milvus + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + +**关键配置说明**: +- `clusterDef: milvus`:指定集群的 ClusterDefinition CR +- `topology: standalone`:配置集群使用单机拓扑 +- `componentSpecs`:定义集群中的组件 + +## 验证部署 + +### 检查集群状态 +集群部署完成后,检查其状态: +```bash +kubectl get cluster milvus-standalone -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 40s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Updating 71s +milvus-standalone milvus Delete Running 2m55s +``` + +### 验证组件状态 +```bash +kubectl get component -n demo -l app.kubernetes.io/instance=milvus-standalone +``` +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-standalone-etcd etcd-3-1.0.0 3.5.15 Running 3m5s +milvus-standalone-milvus milvus-standalone-1.0.0 v2.3.2 Running 114s +milvus-standalone-minio milvus-minio-1.0.0 8.0.17 Running 3m5s +``` + +## 清理资源 +要删除本教程中创建的所有资源: + +```bash +kubectl delete cluster milvus-standalone -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/03-topologies/02-cluster.mdx b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/02-cluster.mdx new file mode 100644 index 00000000..5e98bd3b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/02-cluster.mdx @@ -0,0 +1,527 @@ +--- +description: 了解如何使用KubeBlocks部署Redis复制集群。本指南涵盖配置、验证、故障转移测试及超时设置。 +keywords: +- KubeBlocks +- Redis +- Kubernetes +- High Availability +sidebar_label: Milvus 集群 +sidebar_position: 1 +title: 使用KubeBlocks部署Milvus集群 +--- +# 使用 KubeBlocks 部署 Milvus 集群 + +Milvus 集群是一种面向生产工作负载的分布式部署方案,包含多个专用组件: + +**接入层** + +- 无状态代理节点,负责处理客户端连接和请求路由 + +**计算层** + +- 查询节点(Query Nodes):执行搜索操作 +- 数据节点(Data Nodes):处理数据写入和压缩 +- 索引节点(Index Nodes):构建和维护向量索引 + +**协调层** + +- 根协调器(Root Coordinator):管理全局元数据 +- 查询协调器(Query Coordinator):协调查询执行 +- 数据协调器(Data Coordinator):管理数据分布 +- 索引协调器(Index Coordinator):监督索引构建 + +**存储层** + +- 元数据存储(ETCD):集群元数据和配置存储 +- 对象存储(MinIO/S3):向量数据的持久化存储 +- 日志存储(Pulsar):用于变更数据捕获的消息队列 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Milvus 集群 + +### 步骤 1. 部署 ETCD 集群 + +ETCD 集群用于元数据存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: etcdm-cluster + namespace: demo +spec: + terminationPolicy: WipeOut + componentSpecs: + - name: etcd + componentDef: etcd-3-1.0.0 + serviceVersion: 3.5.6 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤 2. 部署 MinIO 集群 + +MinIO 用于对象存储 +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: miniom-cluster + namespace: demo +spec: + terminationPolicy: WipeOut + componentSpecs: + - name: minio + componentDef: milvus-minio-1.0.0 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤 3. 部署 Pulsar 集群 + +Pulsar 用于日志存储 +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsarm-cluster + namespace: demo +spec: + terminationPolicy: Delete + # 该值必须为 `pulsar` 以创建 Pulsar 集群 + clusterDef: pulsar + topology: pulsar-basic-cluster + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + componentSpecs: + - name: broker + serviceVersion: 3.0.2 + replicas: 1 + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "false" + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + serviceVersion: 3.0.2 + replicas: 4 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: zookeeper + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "100m" + memory: "512Mi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi +``` + +### 部署 Milvus 集群 + +该集群将包含以下组件: +- Proxy(代理节点) +- Data Node(数据节点) +- Index Node(索引节点) +- Query Node(查询节点) +- Mixed Coordinator(混合协调器) + +每个组件将通过 `serviceRef` 关联到之前创建的对应服务:etcd、minio 和 pulsar。 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + namespace: demo + name: milvus-cluster +spec: + terminationPolicy: Delete + # 该值必须为 `milvus` 以创建 Milvus 集群 + clusterDef: milvus + # 可选值: [standalone,cluster] + topology: cluster + componentSpecs: + - name: proxy + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # 定义组件的服务引用列表 + serviceRefs: + - name: milvus-meta-storage # 指定服务引用声明的标识符,定义在 `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: demo # 引用集群的命名空间,按需修改 + # 引用另一个 KubeBlocks 集群提供的服务 + clusterServiceSelector: + cluster: etcdm-cluster # ETCD 集群名称,按需修改 + service: + component: etcd # 组件名称,应为 etcd + service: headless # 引用默认的无头服务 + port: client # 引用端口名称 'client' + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster # Pulsar 集群名称 + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster # Minio 集群名称 + service: + component: minio + service: headless + port: http + credential: # 指定用于认证和建立与引用集群连接的系统账户 + component: minio # 对应组件 'minio' + name: admin # 引用的凭证(系统账户)名称,本例使用 'admin' 账户 + - name: mixcoord + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: datanode + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: indexnode + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin + - name: querynode + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + serviceRefs: + - name: milvus-meta-storage + namespace: demo + clusterServiceSelector: + cluster: etcdm-cluster + service: + component: etcd + service: headless + port: client + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster + service: + component: broker + service: headless + port: pulsar + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster + service: + component: minio + service: headless + port: http + credential: + component: minio + name: admin +``` + +:::note + +像 Pulsar、MinIO 和 ETCD 这样的集群有多个端口提供不同服务。 +当使用 `serviceRef` 创建集群时,您需要知道哪个 `port` 提供对应的服务。 +例如,在 MinIO 中主要有四个端口:9000、9001、3501 和 3502,它们用于不同的服务或功能。 + +::: + +服务引用通过 `serviceRefs` 指定如下,请根据您的运行环境更新集群名称和端口: +```yaml +# 定义组件的服务引用列表 +serviceRefs: + - name: milvus-meta-storage # 指定服务引用声明的标识符,定义在 `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: demo # 引用集群的命名空间,按需修改 + # 引用另一个 KubeBlocks 集群提供的服务 + clusterServiceSelector: + cluster: etcdm-cluster # ETCD 集群名称,按需修改 + service: + component: etcd # 组件名称,应为 etcd + service: headless # 引用默认的无头服务 + port: client # 注意:引用端口名称 'client',对应端口号 '3501' + - name: milvus-log-storage + namespace: demo + clusterServiceSelector: + cluster: pulsarm-cluster # Pulsar 集群名称 + service: + component: broker + service: headless + port: pulsar # 注意:引用端口名称 'pulsar',对应端口号 '6650' + - name: milvus-object-storage + namespace: demo + clusterServiceSelector: + cluster: miniom-cluster # Minio 集群名称 + service: + component: minio + service: headless + port: http # 注意:引用端口名称 'http',对应端口号 '9000' + credential: # 指定用于认证和建立与引用集群连接的系统账户 + component: minio # 对应组件 'minio' + name: admin # 注意:引用的凭证(系统账户)名称,本例使用 'admin' 账户 +``` + +## 验证部署 + +### 检查集群状态 +集群部署完成后,检查其状态: +```bash +kubectl get cluster milvus-cluster -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-cluster milvus Delete Running 4m38s +``` + +### 验证组件和Pod状态 +```bash +kubectl get component -l app.kubernetes.io/instance=milvus-cluster -n demo +``` +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-cluster-datanode milvus-datanode-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-indexnode milvus-indexnode-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-mixcoord milvus-mixcoord-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-proxy milvus-proxy-1.0.0 v2.3.2 Running 5m8s +milvus-cluster-querynode milvus-querynode-1.0.0 v2.3.2 Running 5m8s +``` + +检查Pod状态: + +```bash +kubectl get pods -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-datanode-0 1/1 Running 0 5m30s +milvus-cluster-indexnode-0 1/1 Running 0 5m31s +milvus-cluster-mixcoord-0 1/1 Running 0 5m32s +milvus-cluster-proxy-0 1/1 Running 0 5m32s +milvus-cluster-querynode-0 1/1 Running 0 5m31s +milvus-cluster-querynode-1 1/1 Running 0 3m51s +``` + +## 清理资源 +要删除本教程中创建的所有资源,请执行以下命令: + +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete cluster etcdm-cluster -n demo +kubectl delete cluster miniom-cluster -n demo +kubectl delete cluster pulsarm--cluster -n demo +kubectl delete ns demo +``` + +说明: +- 上述命令会依次删除在demo命名空间中创建的Milvus集群、ETCD集群、MinIO集群和Pulsar集群 +- 最后一条命令将删除整个demo命名空间及其下所有资源 +- 请确保这些资源不再需要后再执行删除操作 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/03-topologies/_category_.yml b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/_category_.yml new file mode 100644 index 00000000..8b10f9b7 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 拓扑结构 +position: 3 diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..63cb741e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,261 @@ +--- +description: 了解如何在KubeBlocks中管理Milvus集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- Milvus +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: Milvus 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Milvus 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 Milvus 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运营成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|--------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护窗口 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重新创建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Milvus 集群 + +请参考[使用 KubeBlocks 部署 Milvus 集群](../03-topologies/02-cluster)来部署一个 Milvus 集群。 + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 Milvus 集群将: + +1. 终止所有运行中的 Pod +2. 保留集群配置 + +此操作适用于: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + +选项 1:OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-stop-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Stop +``` + + + + +选项 2:Cluster API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-stop-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Stop +``` + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Stopping 6m33s + milvus-cluster milvus Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=milvus-cluster -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +### 启动集群 + +启动已停止的 Milvus 集群: +1. 重新创建所有 Pod +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: milvus-cluster + type: Start +``` + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Updating 30m + milvus-cluster milvus Delete Updating 32m + milvus-cluster milvus Delete Updating 32m + milvus-cluster milvus Delete Running 33m + milvus-cluster milvus Delete Running 33m + ``` + +2. 验证 Pod 重新创建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + milvus-cluster-datanode-0 1/1 Running 0 5m24s + milvus-cluster-indexnode-0 1/1 Running 0 5m24s + milvus-cluster-mixcoord-0 1/1 Running 0 5m24s + milvus-cluster-proxy-0 1/1 Running 0 5m24s + milvus-cluster-querynode-0 1/1 Running 0 5m24s + milvus-cluster-querynode-1 1/1 Running 0 3m43s + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重新创建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +使用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**检查组件** + +Milvus 集群包含五个组件。获取组件列表: +```bash +kubectl get cluster -n demo milvus-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +预期输出: +```text +proxy +mixcoord +datanode +indexnode +querynode +``` + +**通过 OpsRequest API 重启 Proxy** + +列出要重启的特定组件: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: milvus-cluster-restart-ops + namespace: demo +spec: + clusterName: milvus-cluster + type: Restart + restart: + - componentName: proxy +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest milvus-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-restart-ops Restart milvus-cluster Running 0/1 4s + milvus-cluster-restart-ops Restart milvus-cluster Running 1/1 2m12s + milvus-cluster-restart-ops Restart milvus-cluster Running 1/1 2m12s + milvus-cluster-restart-ops Restart milvus-cluster Succeed 1/1 2m12s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳。只有属于 `proxy` 组件的 Pod 会被重启。 + +操作完成后,集群将返回 Running 状态。 + +## 总结 +在本指南中,您学习了如何: +1. 停止 Milvus 集群以暂停操作,同时保留持久存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重新创建其 Pod,而无需停止整个集群 + +通过管理 Milvus 集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了一种无缝执行这些操作的方式,确保高可用性和最小化中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..0c402a3d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,192 @@ +--- +description: 了解如何在由KubeBlocks管理的Milvus集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- Milvus +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: Milvus 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks垂直扩缩Milvus单机集群 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的Milvus集群进行垂直扩缩,同时保持副本数量不变。 + +垂直扩缩会修改Milvus实例的计算资源(CPU和内存)同时保持副本数量。主要特点: + +- **无中断性**:正确配置时,可在扩缩期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:支持按需扩容或缩容 + +KubeBlocks通过遵循受控的、角色感知的更新策略确保扩缩操作影响最小化: +**角色感知副本(主/从副本)** +- 从副本优先更新 - 先升级非主节点Pod以最小化影响 +- 主节点最后更新 - 仅当所有从节点健康后才重启主Pod +- 集群状态在所有副本稳定后从"更新中"转为"运行中" + +**无角色副本(基于序号的扩缩)** +若副本未定义角色,更新遵循Kubernetes Pod序号顺序: +- 最高序号优先(如pod-2 → pod-1 → pod-0)以确保确定性滚动更新 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Milvus集群 + +请参考[使用KubeBlocks部署Milvus集群](../03-topologies/02-cluster)部署一个milvus集群。 + +## 垂直扩缩 + +**预期工作流程**: + +1. Pod按序号从高到低顺序更新(如pod-2 → pod-1 → pod-0) +1. 集群状态从"更新中"转为"运行中" + +**检查组件** + +Milvus集群包含五个组件。获取组件列表: +```bash +kubectl get cluster -n demo milvus-cluster -oyaml | yq '.spec.componentSpecs[].name' +``` + +预期输出: +```text +proxy +mixcoord +datanode +indexnode +querynode +``` + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为**querynode**组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-vscale-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: VerticalScaling + verticalScaling: + - componentName: querynode + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + 可通过以下命令查看扩缩进度: + + ```bash + kubectl -n demo get ops milvus-cluster-vscale-ops -w + ``` + + 预期结果: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 0/2 33s + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 1/2 55s + milvus-cluster-vscale-ops VerticalScaling milvus-cluster Running 2/2 88s + ``` + + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段直接调整资源: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: querynode + replicas: 1 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + ... + ``` + + + + +:::note + +Milvus集群包含五个组件。本教程仅展示如何调整单个组件资源。 +其他组件可采用相同方式进行变更。 + +::: + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段执行扩缩 +- 确认Kubernetes集群有足够资源 +- 操作前检查是否有其他进行中的操作 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩后:** +- 监控资源利用率和应用性能 +- 必要时调整Milvus参数配置 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe milvus-cluster -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +milvus 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩的核心优势 +- 无缝扩缩:按特定顺序重建Pod确保最小影响 +- 动态资源调整:根据工作负载需求灵活调整CPU和内存 +- 灵活性:可选择OpsRequest动态扩缩或直接API更新精确控制 +- 高可用性:扩缩过程中集群保持可操作状态 + +## 清理 +删除所有创建的资源,包括Milvus集群及其命名空间: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您学会了如何: +1. 部署KubeBlocks管理的Milvus集群 +2. 通过增减资源对milvus组件进行垂直扩缩 +3. 使用OpsRequest和直接Cluster API两种方式调整资源分配 + +垂直扩缩是优化资源利用率和适应工作负载变化的强大工具,可确保您的Milvus集群始终保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..92d5a5b0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,241 @@ +--- +description: 了解如何通过OpsRequest和直接更新Cluster API,对KubeBlocks管理的Milvus集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- Milvus +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现Milvus集群的水平扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现Milvus集群的水平扩缩容 + +本指南介绍如何对KubeBlocks管理的Milvus集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用**OpsRequest**和直接修改**Cluster API**两种方式实现这一目标。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Milvus集群 + +请参考[使用KubeBlocks部署Milvus集群](../03-topologies/02-cluster)完成集群部署。 + +## 扩容(增加副本) + +**预期工作流程**: +1. 新Pod被创建,状态从`Pending`转为`Running` +2. 集群状态从`Updating`变为`Running` + + + + + + 方案一:使用水平扩容OpsRequest + + 为milvus组件的querynode增加1个副本: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-scale-out-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + # 指定组件扩容的副本变化量 + scaleOut: + # 指定该组件的副本变化数 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops milvus-cluster-scale-out-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Running 0/1 9s + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Running 1/1 16s + milvus-cluster-scale-out-ops HorizontalScaling milvus-cluster Succeed 1/1 16s + ``` + + + + + 方案二:直接修改Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: querynode + replicas: 3 # 将副本数从2增加到3 + ... + ``` + + 或者使用命令直接修改集群CR: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 3 + } +]' +``` + + + +### 验证扩容结果 + +操作完成后,您将看到新Pod被创建,Milvus集群状态从`Updating`变为`Running`,且新建Pod具有`secondary`角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster,apps.kubeblocks.io/component-name=querynode +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 1/1 Running 0 85m +milvus-cluster-querynode-1 1/1 Running 0 87m +milvus-cluster-querynode-2 1/1 Running 0 99m +``` + +## 缩容(减少副本) + +**预期工作流程**: +1. 选择序号最大的副本进行移除 +2. Pod被优雅终止 +3. 集群状态从`Updating`变为`Running` + + + + + + 方案一:使用水平缩容OpsRequest + + 为milvus组件的querynode减少1个副本: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-scale-in-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + # 指定组件缩容的副本变化量 + scaleIn: + # 指定该组件的副本变化数 + # 当前组件减少1个副本 + replicaChanges: 1 + ``` + + 监控操作进度: + ```bash + kubectl get ops milvus-cluster-scale-in-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Running 0/1 8s + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Running 1/1 24s + milvus-cluster-scale-in-ops HorizontalScaling milvus-cluster Succeed 1/1 24s + ``` + + + + + 方案二:直接修改Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: milvus + replicas: 2 # 将副本数从3减少到2 + ``` + +或使用命令直接修改集群CR: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ +{ + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 2 + } +]' +``` + + + + + +### 验证缩容结果 + +示例输出(两个Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 1/1 Running 0 101m +milvus-cluster-querynode-1 1/1 Running 0 102m +``` + +:::note + +Milvus集群包含五个组件。本教程展示了如何对一个组件进行操作。 +您可以用相同方式对其他组件执行扩缩容。 + +::: + +## 最佳实践 + +执行水平扩缩容时: +- 尽可能选择低流量时段操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够的资源 +- 考虑新副本的存储需求 + +## 清理资源 +删除Milvus集群及其命名空间以清除所有资源: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中您学会了: +- 执行扩容操作为Milvus集群增加副本 +- 执行缩容操作减少Milvus集群副本 +- 使用OpsRequest和直接修改Cluster API两种方式进行水平扩缩容 + +KubeBlocks能确保在最小化影响数据库服务的情况下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..200a46e1 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,304 @@ +--- +description: 了解如何通过负载均衡器及其他服务类型,在KubeBlocks中配置和管理Milvus服务,实现内外部访问。 +keywords: +- KubeBlocks +- Milvus +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理 Milvus 服务 +sidebar_position: 5 +title: 使用KubeBlocks中的声明式集群API创建与销毁Milvus服务 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在KubeBlocks中使用声明式集群API管理Milvus服务 + +本指南提供了逐步操作说明,指导如何对外部和内部暴露由KubeBlocks管理的Milvus服务。您将学习使用云服务提供商的负载均衡器服务配置外部访问、管理内部服务,以及在不再需要时正确禁用外部暴露功能。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Milvus 集群 + +请参考[使用 KubeBlocks 部署 Milvus 集群](../03-topologies/02-cluster)来部署 Milvus 集群。 + +## 查看网络服务 +列出为 Milvus 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +示例服务输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +milvus-cluster-proxy ClusterIP 10.96.157.187 19530/TCP,9091/TCP 133m +``` + +说明: +- 该命令会筛选出命名空间 `demo` 中带有标签 `app.kubernetes.io/instance=milvus-cluster` 的所有 Service 资源 +- 输出示例显示了一个 ClusterIP 类型的服务 `milvus-cluster-proxy`,暴露了 19530(Milvus 服务端口)和 9091(指标端口) +- 服务已运行 133 分钟(2 小时 13 分钟) + +## 暴露 Milvus 服务 + +外部服务地址允许公网访问 Milvus,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云服务成本 | 安全性 | +|------|----------|------------|--------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试环境 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 对外暴露 Milvus 服务,创建 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: milvus-cluster + expose: + - componentName: milvus + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 可选值:'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + ports: + - name: milvus + port: 19530 + protocol: TCP + targetPort: milvus + # 当 ServiceType 为 LoadBalancer 时,包含云服务商相关参数 + # 以下是 AWS EKS 的配置示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops milvus-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-expose-enable-ops Expose milvus-cluster Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分添加 LoadBalancer 服务: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: milvus-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: milvus + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + componentSelector: milvus + name: milvus-internet + serviceName: milvus-internet + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: milvus + port: 19530 + protocol: TCP + targetPort: milvus + type: LoadBalancer # [ClusterIP, NodePort, LoadBalancer] + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分添加了一个新的外部服务。这个 LoadBalancer 服务包含了 AWS 网络负载均衡器 (NLB) 的注解。 + + :::note + 云服务商注解说明 + + 使用 LoadBalancer 服务时,必须包含对应云服务商的特定注解。以下是不同云服务商的常用注解: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为 "false" 则创建面向互联网的负载均衡器 + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为 "false" 则创建面向互联网的负载均衡器 + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制负载均衡器仅限内部 VPC 访问。默认不指定时为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网负载均衡器的优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为 "intranet" 则创建内部负载均衡器 + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制负载均衡器是内部还是面向互联网。注意此注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 则使用内部 VPC IP + ``` + 如果在服务创建后将该注解从 "false" 改为 "true",注解会在服务对象中更新,但负载均衡器仍会保留其公网 IP。 + + 正确修改此行为的步骤: + - 首先删除现有的负载均衡器服务 + - 使用更新后的注解重新创建服务(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新的负载均衡器配置正确的内部或外部 IP + ::: + + + 使用以下命令等待集群状态变为 Running: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +milvus-cluster-milvus-internet LoadBalancer 172.20.60.24 19530:31243/TCP 1m +``` + +## 禁用外部暴露 + + + + + + 选项一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: milvus-cluster + expose: + - componentName: milvus + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops milvus-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + milvus-cluster-expose-disable-ops Expose milvus-cluster Succeed 1/1 24s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster milvus-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster milvus-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + milvus-cluster milvus Delete Running 44m + ``` + + + +### 验证服务移除 + +确保 'milvus-cluster-milvus-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=milvus-cluster -n demo +``` + +预期结果:'milvus-cluster-milvus-internet' 服务应被移除。 + +## 清理资源 +要删除所有已创建的资源,请连同其命名空间一起删除 Milvus 集群: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 对外或对内暴露 Milvus 服务 +- 通过云服务商特定注解配置负载均衡器服务 +- 通过 OpsRequest 或直接更新 Cluster API 来启用/禁用服务,从而管理外部访问 + +KubeBlocks 为 Kubernetes 环境中的 Milvus 服务管理提供了灵活性和简便性,同时也为 Milvus 服务管理提供了同样的优势。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..603846c6 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,141 @@ +--- +description: 了解如何在由KubeBlocks管理的Milvus集群中下线(停用)特定Pod。 +keywords: +- KubeBlocks +- Milvus +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 停用 Milvus 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的Milvus集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在 KubeBlocks 托管的 Milvus 集群中下线特定 Pod + +本指南介绍如何在 KubeBlocks 管理的 Milvus 集群中下线(停用)特定 Pod。通过精确控制集群资源的同时保持可用性,此操作适用于工作负载重新平衡、节点维护或故障处理场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法下线特定 Pod。StatefulSet 会确保 Pod 的顺序和身份标识,缩减规模时总是移除序号最高的 Pod(例如从 3 个副本缩减时首先移除 `Pod-2`)。这种限制导致无法精确控制下线哪个 Pod,使得维护、工作负载分配或故障处理变得复杂。 + +KubeBlocks 通过允许管理员直接下线特定 Pod 来突破这一限制。这种细粒度控制既能确保高可用性,又能在不中断整个集群的情况下实现更好的资源管理。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Milvus 集群 + +请参考[使用 KubeBlocks 部署 Milvus 集群](../03-topologies/02-cluster)完成集群部署。 + +## 下线 Pod + +**预期工作流程**: +1. 从 `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +在下线组件中的特定 Pod 前,请确保该组件拥有多个副本。若不符合条件,请先进行扩容操作。 + +例如,可以通过以下命令修改集群 CR,将 querynode 组件设置为 3 个副本: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/4/replicas", + "value": 3 + } +]' +``` + +要下线特定 Pod(例如 'milvus-cluster-querynode-1'),可采用以下任一方法: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 将 Pod 标记为下线: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: milvus-cluster-decommission-ops + namespace: demo + spec: + clusterName: milvus-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: querynode + scaleIn: + onlineInstancesToOffline: + - 'milvus-cluster-querynode-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作执行状态: + + ```bash + kubectl get ops milvus-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Running 0/1 8s +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Running 1/1 31s +milvus-cluster-decommission-ops HorizontalScaling milvus-cluster Succeed 1/1 31s + ``` + + + + + + 方法二:使用 Cluster API + + 也可以直接更新 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: milvus + replicas: 2 # 下线后期望的副本数 + offlineInstances: + - milvus-cluster-querynode-1 # <----- 指定需要下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,验证集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=milvus-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +milvus-cluster-querynode-0 2/2 Running 0 25m +milvus-cluster-querynode-2 2/2 Running 0 24m +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向 Pod 下线 +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保持可用性的同时,提供了精细化的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-milvus/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..6d33656c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,227 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为Milvus集群配置可观测性。设置监控并通过Grafana实现指标可视化。 +keywords: +- KubeBlocks +- Milvus +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: Milvus 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 Milvus 集群的可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 Milvus + +本指南演示如何在 KubeBlocks 中为 Milvus 集群配置全面的监控方案,包括: + +1. 使用 Prometheus Operator 进行指标采集 +2. 通过内置的 Milvus Exporter 暴露指标 +3. 使用 Grafana 实现可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 安装监控组件栈 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 Milvus 集群 + +请参考[使用 KubeBlocks 部署 Milvus 集群](../03-topologies/02-cluster)完成部署。 + +## 配置指标采集 + +### 1. 验证 Exporter 端点 + +```bash +kubectl -n demo exec -it pods/milvus-cluster-proxy-0 -- \ + curl -s http://127.0.0.1:9091/metrics | head -n 50 +``` + +需要对所有 Milvus 副本执行验证,包括: +- milvus-cluster-datanode +- milvus-cluster-indexnode +- milvus-cluster-mixcoord +- milvus-cluster-proxy +- milvus-cluster-querynode + +### 2. 创建 PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: milvus-cluster-pod-monitor + namespace: demo + labels: # 必须与 'prometheus.spec.podMonitorSelector' 中的设置匹配 + release: prometheus +spec: + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + relabelings: + - targetLabel: app_kubernetes_io_name + replacement: milvus + namespaceSelector: + matchNames: + - demo # 目标命名空间 + selector: + matchLabels: + app.kubernetes.io/instance: milvus-cluster +``` +**PodMonitor 配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与 exporter 端口名称('http-metrics')匹配 | +| `namespaceSelector` | 是 | 指定 Milvus 运行的命名空间 | +| `labels` | 是 | 必须与 Prometheus 的 podMonitorSelector 匹配 | +| `path` | 否 | 指标端点路径(默认: /metrics) | +| `interval` | 否 | 采集间隔(默认: 30s) | + +该配置创建了一个 `PodMonitor` 来监控 Milvus 集群,并从 Milvus 组件采集指标。 + +```yaml + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + relabelings: + - targetLabel: app_kubernetes_io_name + replacement: milvus # 为目标添加标签: app_kubernetes_io_name=milvus +``` + +## 验证监控配置 + +### 1. 检查 Prometheus 目标 +端口转发并访问 Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +浏览器访问: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的采集任务(任务名应为'demo/milvus-cluster-pod-monitor')。 + +预期状态: +- 目标状态应为 UP +- 目标标签应包含 podTargetLabels 中定义的标签(如'app_kubernetes_io_instance') + +### 2. 测试指标采集 +验证指标是否被正确采集: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=milvus_num_node{app_kubernetes_io_name="milvus"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "milvus_num_node", + "app_kubernetes_io_name": "milvus", + "container": "indexnode", + "endpoint": "metrics", + "instance": "10.244.0.149:9091", + "job": "demo/milvus-cluster-pod-monitor", + "namespace": "demo", + "node_id": "23", + "pod": "milvus-cluster-indexnode-0", + "role_name": "indexnode" + }, + "value": [ + 1747637044.313, + "1" + ] + }, + { + "metric": { + "__name__": "milvus_num_node", + "app_kubernetes_io_name": "milvus", + "container": "querynode", + "endpoint": "metrics", + "instance": "10.244.0.153:9091", + "job": "demo/milvus-cluster-pod-monitor", + "namespace": "demo", + "node_id": "27", + "pod": "milvus-cluster-querynode-1", + "role_name": "querynode" + }, + "value": [ + 1747637044.313, + "1" + ] + }, + ... // 更多输出省略 +``` +## Grafana 可视化 + +### 1. 访问 Grafana +端口转发并登录: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +浏览器访问 `http://localhost:3000`,使用默认凭据登录: +- 用户名: 'admin' +- 密码: 'prom-operator'(默认值) + +### 2. 导入仪表板 +导入 KubeBlocks Milvus 仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 从以下地址导入仪表板: [Milvus Dashboard](https://raw.githubusercontent.com/milvus-io/milvus/refs/heads/master/deployments/monitor/grafana/milvus-dashboard.json) + 更多详情请参考 [Milvus 官网](https://milvus.io/) + +![milvus-monitoring-grafana-dashboard.png](/img/docs/en/milvus-monitoring-grafana-dashboard.png) + +## 清理资源 +执行以下命令删除所有创建的资源: +```bash +kubectl delete cluster milvus-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor milvus-cluster-pod-monitor -n demo +``` + +## 总结 +本教程演示了如何在 KubeBlocks 中使用 Prometheus Operator 为 Milvus 集群建立可观测性方案。通过配置 `PodMonitor`,我们实现了 Prometheus 对 Milvus Exporter 指标的采集,最终在 Grafana 中实现了指标可视化。这套方案为监控 Milvus 数据库的健康状态和性能表现提供了有力支持。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-milvus/_category_.yml b/docs/zh/preview/kubeblocks-for-milvus/_category_.yml new file mode 100644 index 00000000..f71b2637 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for Milvus 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-milvus/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_category_.yml new file mode 100644 index 00000000..82d8374c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-milvus/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..3220111e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_create-cluster.mdx @@ -0,0 +1,42 @@ +KubeBlocks 采用声明式方法来管理 Milvus 集群。 +以下是一个部署包含 3 个副本的 Milvus 集群的配置示例。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: milvus-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: milvus + topology: clustermode + componentSpecs: + - name: milvus + serviceVersion: 3.13.7 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +注: +1. 配置中 `terminationPolicy: Delete` 表示删除集群时会同时清理相关资源 +2. `topology: clustermode` 指定以集群模式部署 Milvus +3. 每个 Pod 资源限制为 0.5 核 CPU 和 0.5GiB 内存 +4. 为每个节点配置了 20Gi 的持久卷存储,使用 `ReadWriteOnce` 访问模式 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..be7f1c98 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续操作之前,请确保满足以下条件: +- 环境准备: + - 已有一个正常运行中的 Kubernetes 集群。 + - 已配置 kubectl CLI 工具与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。具体安装指引请参考链接内容。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..d9cc2934 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-milvus/_tpl/_verify-cluster.mdx @@ -0,0 +1,48 @@ +监控集群状态直至其转为运行中状态: +```bash +kubectl get cluster milvus-cluster -n demo -w +``` + +预期输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +milvus-standalone milvus Delete Creating 40s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Creating 71s +milvus-standalone milvus Delete Updating 71s +milvus-standalone milvus Delete Running 2m55s +``` + +检查组件和Pod状态: + +```bash +kubectl get component -n demo -l app.kubernetes.io/instance=milvus-standalone +``` +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +milvus-standalone-etcd etcd-3-1.0.0 3.5.15 Running 3m5s +milvus-standalone-milvus milvus-standalone-1.0.0 v2.3.2 Running 114s +milvus-standalone-minio milvus-minio-1.0.0 8.0.17 Running 3m5s +``` + + +```bash +kubectl get pods -l app.kubernetes.io/instance=milvus-standalone -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +milvus-standalone-etcd-0 2/2 Running 0 4m31s +milvus-standalone-milvus-0 1/1 Running 0 3m20s +milvus-standalone-minio-0 1/1 Running 0 4m31s +``` + +当集群状态显示为Running时,您的Milvus集群即可投入使用。 + +:::tip +如果是首次创建集群,可能需要花费一些时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/01-overview.mdx b/docs/zh/preview/kubeblocks-for-mongodb/01-overview.mdx new file mode 100644 index 00000000..c8964c39 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/01-overview.mdx @@ -0,0 +1,78 @@ +--- +description: 了解KubeBlocks MongoDB插件的功能特性,包括部署拓扑结构、生命周期管理、备份恢复机制以及支持的版本信息。 +keywords: +- MongoDB +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks MongoDB 插件概述 +--- +# KubeBlocks MongoDB 插件概述 + +MongoDB 是一种 NoSQL 文档数据库,专为灵活性、可扩展性和高性能而设计。与传统关系型数据库(SQL)不同,MongoDB 以类 JSON 文档(BSON 格式)存储数据,非常适合处理非结构化或半结构化数据。 + +### 支持的拓扑结构 + +**副本集(replicaset)** + +MongoDB 副本集是一组维护相同数据集的 MongoDB 服务器,提供高可用性和数据冗余。副本集是 MongoDB 容错和数据可靠性的基础。通过在多个节点间复制数据,MongoDB 确保当一台服务器发生故障时,另一台可以无缝接管,而不会影响应用程序的可用性。 + +在副本集中,通常有三种类型的节点: + +- **主节点(Primary Node)**:处理所有写操作,默认情况下也处理读请求 +- **从节点(Secondary Nodes)**:维护主节点数据的副本,可选择性地处理读请求 +- **仲裁节点(Arbiter Node)**:参与选举但不存储数据,用于保持副本集中投票成员数量为奇数 + +建议创建至少包含**三个**节点的集群以确保高可用性:一个主节点和两个从节点。 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 MongoDB 操作: + +| 功能 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 增减副本数量以调整容量 | +| **垂直扩展** | 调整 MongoDB 实例的 CPU/内存资源 | +| **存储卷扩容** | 动态增加存储容量而无需停机 | +| **重启操作** | 以最小影响进行受控集群重启 | +| **启动/停止** | 临时暂停/恢复集群操作 | +| **密码管理** | 可在创建时为 MongoDB 集群设置和管理自定义 root 密码 | +| **自定义服务** | 暴露专门的数据库端点 | +| **主从切换** | 计划内的主从角色变更 | +| **副本管理** | 安全地停用或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 放置和资源分配 | +| **监控** | 集成 Prometheus 指标收集 | +| **日志** | 通过 Loki Stack 实现集中式日志记录 | + +### 备份与恢复 + +KubeBlocks 支持多种 MongoDB 备份策略: + +| 功能 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | dump | 使用 MongoDB 工具 `mongodump` 创建数据库内容的二进制导出 | +| 全量备份 | datafile | 备份数据库的数据文件 | +| 持续备份 | archive-oplog | 使用 `wal-g` 持续归档 MongoDB oplog | + +### 支持的版本 + +KubeBlocks MongoDB 插件支持以下 MongoDB 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 4.0 | 4.0.28,4.2.24,4.4.29 | +| 5.0 | 5.0.28 | +| 6.0 | 6.0.22,6.0.20,6.0.16 | +| 7.0 | 7.0.19,7.0.16,7.0.12 | +| 8.0 | 8.0.8,8.0.6,8.0.4| + +可通过以下命令查看支持的版本列表: +```bash +kubectl get cmpv mongodb +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-mongodb/02-quickstart.mdx new file mode 100644 index 00000000..012eba28 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/02-quickstart.mdx @@ -0,0 +1,538 @@ +--- +description: 使用KubeBlocks部署和管理MongoDB副本集集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- MongoDB +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: MongoDB 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB 快速入门 + +本指南提供了使用 **KubeBlocks MongoDB 插件** 部署和管理 MongoDB 副本集集群的完整流程,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 运维管理(包括启动/停止流程) +- 连接方式与集群监控 + +## 前置条件 + +### 系统要求 + +开始前请确认您的环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装并配置好集群访问权限的 `kubectl` v1.21+ +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 MongoDB 插件 + +MongoDB 插件默认随 KubeBlocks 安装。检查其状态: + +```bash +helm list -n kb-system | grep mongodb +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-mongodb kb-system 1 2025-05-21 deployed mongodb-1.0.0 +``` +
+ +如果插件未启用,请选择安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难或缓慢,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/mongodb --versions + # 安装指定版本(将 替换为目标版本号) + helm upgrade -i kb-addon-mongodb kubeblocks-addons/mongodb --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 索引默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search mongodb + # 安装指定版本插件(将 替换为目标版本号) + kbcli addon install mongodb --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + mongodb 0.9.0 kubeblocks + mongodb 0.9.1 kubeblocks + mongodb 1.0.0 kubeblocks + ``` + 启用/禁用插件: + + ```bash + # 启用插件 + kbcli addon enable mongodb + # 禁用插件 + kbcli addon disable mongodb + ``` + + + + +:::note +**版本兼容性说明** + +请始终确保 MongoDB 插件版本与 KubeBlocks 主版本相匹配,以避免兼容性问题。 + +::: + +### 验证支持的 MongoDB 版本 + +**列出可用 MongoDB 版本:** + +```bash +kubectl get cmpv mongodb +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +mongodb 8.0.8,8.0.6,8.0.4,7.0.19,7.0.16,7.0.12,6.0.22,6.0.20,6.0.16,5.0.30,5.0.28,4.4.29,4.2.24,4.0.28 Available 26d +``` +
+ +**检查 ComponentDefinitions 的版本兼容性** + +**步骤 1.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv mongodb -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +mongodb-1.0.0 +``` +
+ +**步骤 2.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv mongodb -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("mongodb"))) | .releases[]' +``` + +此命令返回与名为 `mongodb` 的 `ComponentDefinition` 兼容的版本: + +
+示例输出 +```text +8.0.8 +8.0.6 +8.0.4 +7.0.19 +7.0.16 +7.0.12 +6.0.22 +6.0.20 +6.0.16 +5.0.30 +5.0.28 +4.4.29 +4.2.24 +4.0.28 +``` +
+ +### 存储配置 + +MongoDB 需要持久化存储。验证可用选项: + +```bash +kubectl get storageclass +``` + +推荐存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩容 +- 满足工作负载的性能需求 + +## 部署 MongoDB 复制集群 + +使用默认配置部署基础 MongoDB ReplicaSet 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/cluster.yaml +``` + +该操作将创建: +- 一个包含两个组件的 MongoDB ReplicaSet 集群:MongoDB(2个副本)和 MongoDB Sentinel(3个副本) +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 +- 自动化的主从节点配置 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + # 指定删除集群时的行为策略 + # 有效选项:[DoNotTerminate, Delete, WipeOut](KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`:阻止集群删除,确保所有资源保持完整 + # - `Delete`:在 `Halt` 策略基础上增加删除 PVC 操作,实现包含持久化数据的彻底清理 + # - `WipeOut`:激进策略,将删除包括外部存储中的卷快照和备份在内的所有集群资源,导致数据完全清除。应谨慎使用,主要适用于非生产环境以避免不可逆数据丢失 + terminationPolicy: Delete + # 指定创建集群时使用的 ClusterDefinition 名称 + # 注意:请勿修改此字段 + # 必须设置为 `mongodb` 才能创建 MongoDB 集群 + clusterDef: mongodb + # 指定创建集群时使用的 ClusterTopology 名称 + # 有效选项:[replicaset] + topology: replicaset + # 定义组成集群的各个组件的 ClusterComponentSpec 对象列表 + # 该字段允许对集群内每个组件进行详细配置 + componentSpecs: + - name: mongodb + # serviceVersion 指定该组件期望部署的服务版本 + # 有效选项:[4.0.28,4.2.24,4.4.29,5.0.28,6.0.16,7.0.1] + serviceVersion: "6.0.16" + # 指定该组件的期望副本数 + replicas: 3 + # 指定该组件所需的计算资源 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + # 定义组件存储需求的 PersistentVolumeClaim 模板列表 + volumeClaimTemplates: + # 引用 componentDefinition.spec.runtime.containers[*].volumeMounts 中定义的 volumeMount 名称 + - name: data + spec: + # 声明所需的 StorageClass 名称 + # 若未指定,默认使用标注了 `storageclass.kubernetes.io/is-default-class=true` 的 StorageClass + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储容量 + storage: 20Gi +``` + +更多 API 字段说明请参阅 [API 参考文档](../user_docs/references/api-reference/cluster)。 + +### 创建指定版本的 MongoDB ReplicaSet 集群 + +在应用配置前,通过设置 `spec.componentSpecs.serviceVersion`(主版本.次版本)字段可创建特定版本的集群: + + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 4.0.28 # 有效选项:[4.0.28,4.2.24,4.4.29] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 5.0.28 # 有效选项:[5.0.28] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 6.0.22 # 有效选项:[6.0.22,6.0.20,6.0.16] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 7.0.19 # 有效选项:[7.0.19,7.0.16,7.0.12] + ``` + + + ```yaml + componentSpecs: + - name: mongodb + serviceVersion: 8.0.8 # 有效选项:[8.0.8,8.0.6,8.0.4] + ``` + + + +## 验证集群状态 + +当部署包含1个主副本和2个从副本的MongoDB副本集集群时: + +通过以下检查确认部署成功: +1. 集群阶段为`Running`(运行中) +2. 所有Pod均正常运行 +3. 副本角色配置正确 + +可通过以下任一方式检查状态: + + + +```bash +kubectl get cluster mongo-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster mongodb Delete Creating 49s +mongo-cluster mongodb Delete Running 62s + +kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role -n demo +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 78s primary +mongo-cluster-mongodb-1 2/2 Running 0 63s secondary +mongo-cluster-mongodb-2 2/2 Running 0 48s secondary +``` + + + + + 安装`kbcli`后,可查看完整的集群信息: + +```bash +kbcli cluster describe mongo-cluster -n demo + +名称: mongo-cluster 创建时间: 2025年5月18日 21:16 UTC+0800 +命名空间 集群定义 拓扑结构 状态 终止策略 +demo mongodb replicaset 运行中 Delete + +访问端点: +组件 内部地址 外部地址 +mongodb mongo-cluster-mongodb.demo.svc.cluster.local:27017 <无> + mongo-cluster-mongodb-mongodb.demo.svc.cluster.local:27017 + mongo-cluster-mongodb-mongodb-ro.demo.svc.cluster.local:27017 + +拓扑结构: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +mongodb 6.0.16 mongo-cluster-mongodb-0 主节点 运行中 zone-x x.y.z 2025年5月18日 21:16 UTC+0800 +mongodb 6.0.16 mongo-cluster-mongodb-1 从节点 运行中 zone-x x.y.z 2025年5月18日 21:16 UTC+0800 +mongodb 6.0.16 mongo-cluster-mongodb-2 从节点 运行中 zone-x x.y.z 2025年5月18日 21:17 UTC+0800 + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +mongodb 500m / 500m 512Mi / 512Mi data:20Gi <无> + +镜像信息: +组件 组件定义 镜像 +mongodb mongodb-1.0.0 docker.io/library/mongo:6.0.16 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间 + +查看集群事件: kbcli cluster list-events -n demo mongo-cluster +``` + + + + +## 访问 MongoDB 副本集集群 + +KubeBlocks 会自动配置以下资源: +1. 凭证信息存储在 Secret `mongo-cluster-mongodb-account-root` 中 +2. 用于读写(主节点)的 ClusterIP 服务 `mongo-cluster-mongodb-mongodb` +3. 用于只读(从节点)的 ClusterIP 服务 `mongo-cluster-mongodb-mongodb-ro` + +### 获取凭证 +```bash +# 获取用户名 +NAME=$(kubectl get secret -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.username}' | base64 --decode) +# 获取密码 +PASSWD=$(kubectl get secret -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 --decode) +``` + +### 连接方式 + + + + + 直接连接到 Pod: + ```bash + kubectl exec -ti -n demo mongo-cluster-mongodb-0 -- \ + mongosh "mongodb://${NAME}:${PASSWD}@mongo-cluster-mongodb-mongodb:27017/admin" + ``` + + + + + 1. 转发服务端口: + + ```bash + kubectl port-forward svc/mongo-cluster-mongodb-mongodb 27017:27017 -n demo + ``` + + 2. 通过本地地址连接: + ```bash + mongosh "mongodb://${NAME}:${PASSWD}@127.0.0.1:27017/admin" + ``` + + + +:::note +**生产环境注意事项** + +在生产环境中,应避免使用 `kubectl exec` 和 `port-forward`,建议采用: +- 通过 LoadBalancer 或 NodePort 服务实现外部访问 +- 配置网络策略限制访问权限 +- 启用 TLS 加密确保连接安全 +- 使用连接池提升性能 +::: + +## 停止 MongoDB 副本集集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)将被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运营成本降低 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-stop + namespace: demo + spec: + clusterName: mongo-cluster + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + ```yaml + spec: + componentSpecs: + - name: mongodb + stop: true # 设置为停止组件 + replicas: 2 + ``` + + + +## 启动 MongoDB 副本集集群 + +重启已停止的集群可恢复运行,所有数据和配置将保持完整。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前的状态 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mongodb/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-start + namespace: demo + spec: + clusterName: mongo-cluster + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设置为 false 来重启集群: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + + + + +## 删除 MongoDB ReplicaSet 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略 | 删除的资源范围 | 数据清除情况 | 适用场景 | +|-----------------|---------------------|-------------------|-----------------------| +| DoNotTerminate | 不删除任何资源 | 保留所有数据 | 关键生产环境集群 | +| Delete | 删除所有Kubernetes资源 | PVC存储卷会被删除 | 非关键环境 | +| WipeOut | 删除所有资源 | 彻底清除所有数据* | 仅限测试环境使用 | + +*包含外部存储中的快照和备份数据 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保已存在必要的备份 +3. 验证terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster mongo-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster mongo-cluster -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..a92db83e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,288 @@ +--- +description: 了解如何在KubeBlocks中管理MongoDB副本集集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- MongoDB +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: MongoDB 副本集集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB ReplicaSet 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 MongoDB ReplicaSet 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运营成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|------------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 MongoDB ReplicaSet 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 MongoDB ReplicaSet 集群将: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 保持集群配置不变 + +此操作适用于: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + +选项 1:OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-stop-ops + namespace: demo +spec: + clusterName: mongo-cluster + type: Stop +``` + + + + +选项 2:使用 Cluster API Patch + +通过修改 stop 字段直接调整集群规格: + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Stopping 6m3s + mongo-cluster mongodb Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=mongo-cluster -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-mongo-cluster-mongodb-0 Bound pvc-uuid 20Gi RWO 22m + data-mongo-cluster-mongodb-1 Bound pvc-uuid 20Gi RWO 21m + data-mongo-cluster-mongodb-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### 启动集群 + +启动已停止的 MongoDB ReplicaSet 集群: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: mongo-cluster + type: Start +``` + + + + + +修改集群规格以恢复运行: +1. 设置 stop: false,或 +2. 完全移除 stop 字段 + +```bash +kubectl patch cluster mongo-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Updating 24m + mongo-cluster mongodb Delete Running 24m + mongo-cluster mongodb Delete Running 24m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE ROLE + mongo-cluster-mongodb-0 2/2 Running 0 55s primary + mongo-cluster-mongodb-1 2/2 Running 0 44s secondary + mongo-cluster-mongodb-2 2/2 Running 0 33s secondary + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +适用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**使用 OpsRequest API** + +针对特定组件 `mongodb` 进行重启: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restart-ops + namespace: demo +spec: + clusterName: mongo-cluster + type: Restart + restart: + - componentName: mongodb +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest mongo-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-restart-ops Restart mongo-cluster Running 0/3 4s + mongo-cluster-restart-ops Restart mongo-cluster Running 1/3 28s + mongo-cluster-restart-ops Restart mongo-cluster Running 2/3 56s + mongo-cluster-restart-ops Restart mongo-cluster Running 2/3 109s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳 + +3. 验证组件健康状态: + ```bash + kbcli cluster describe mongo-cluster -n demo + ``` + +操作完成后,集群将返回 Running 状态。 + +## 总结 +在本指南中,您学习了如何: +1. 停止 MongoDB ReplicaSet 集群以暂停运行同时保留持久化存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重建其 Pod 而无需停止整个集群 + +通过管理 MongoDB ReplicaSet 集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了无缝执行这些操作的方式,确保高可用性和最小化中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..006ffef1 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,177 @@ +--- +description: 了解如何在KubeBlocks管理的MongoDB副本集集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- MongoDB +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: MongoDB副本集集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现MongoDB副本集集群的垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的MongoDB副本集集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改MongoDB实例的计算资源(CPU和内存)但维持副本数不变。主要特点: + +- **无中断性**:正确配置时可保持扩缩容期间的可用性 +- **精细化**:可独立调整CPU或内存资源 +- **可逆性**:支持按需进行扩容或缩容 + +KubeBlocks以最小影响协调扩缩容过程: +1. 优先更新从节点副本 +2. 待从节点健康后再更新主节点 +3. 集群状态从`更新中`过渡到`运行中` + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署MongoDB副本集集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. 首先逐个更新从节点副本 +2. 从节点健康后再更新主节点 +3. 集群状态从`更新中`转变为`运行中` + + + + 选项一:使用VerticalScaling操作请求 + + 应用以下YAML为mongodb组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-vscale-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: VerticalScaling + verticalScaling: + - componentName: mongodb + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + 垂直扩缩容期间会发生什么? + - 首先重建从节点Pod以确保主节点Pod持续可用 + - 所有从节点Pod更新完成后,主节点Pod将以新资源配置重启 + + + 可通过以下命令查看扩缩容进度: + + ```bash + kubectl -n demo get ops mongo-cluster-vscale-ops -w + ``` + + 预期输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 0/3 32s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 1/3 55s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 2/3 82s + mongo-cluster-vscale-ops VerticalScaling mongo-cluster Running 3/3 2m13s + ``` + + + + + + 选项二:直接更新集群API + + 也可通过更新`spec.componentSpecs.resources`字段实现垂直扩缩容。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 3 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 选择维护窗口或低流量时段执行扩缩容 +- 确认Kubernetes集群有充足资源 +- 操作前检查是否有其他进行中的运维任务 + +**执行阶段:** +- 保持CPU与内存的合理配比 +- 设置相同的requests/limits以保证服务质量(QoS) + +**后续监控:** +- 观察资源使用率和应用性能表现 +- 必要时调整MongoDB参数配置 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe mongo-cluster -n demo +``` + +预期输出: +```bash +资源分配情况: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +mongodb 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩容的核心优势 +- 无缝扩缩容:按特定顺序重建Pod确保最小影响 +- 动态资源调整:根据工作负载灵活调整CPU和内存 +- 操作灵活:可选择动态扩缩容OpsRequest或直接API更新 +- 高可用保障:扩缩容过程中集群持续可用 + +## 清理资源 +删除MongoDB副本集集群及相关命名空间: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +通过本指南您学会了: +1. 部署KubeBlocks管理的MongoDB副本集集群 +2. 通过增减mongodb组件资源实现垂直扩缩容 +3. 使用OpsRequest和直接Cluster API两种方式调整资源配置 + +垂直扩缩容是优化资源利用率和适应工作负载变化的有效手段,可确保MongoDB副本集集群始终保持高性能与高弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..069f889e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,276 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的MongoDB集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- MongoDB +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现MongoDB集群的水平扩展 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现MongoDB集群水平扩缩容 + +本指南介绍如何对KubeBlocks管理的MongoDB集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用**OpsRequest**和直接修改**Cluster API**两种方式实现这一目标。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署MongoDB副本集集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## 扩容(增加副本) + +**预期工作流程**: + +1. 新Pod被创建,状态从`Pending`转为`Running`,角色为`secondary` +2. 数据从主节点同步到新副本 +3. 集群状态从`Updating`变为`Running` + + + + + + 选项1:使用水平扩容OpsRequest + + 通过为mongodb组件增加1个副本来扩容MongoDB集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-scale-out-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + # 指定组件扩容的副本变更 + scaleOut: + # 指定组件的副本变更数量 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops mongo-cluster-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Running 0/1 9s + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Running 1/1 20s + mongo-cluster-scale-out-ops HorizontalScaling mongo-cluster Succeed 1/1 20s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 4 # 增加副本数实现扩容 + ... + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新Pod被创建,MongoDB集群状态从`Updating`变为`Running`,新建Pod的角色为`secondary`。 + +新副本会自动作为从节点加入集群。 +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 6m24s primary +mongo-cluster-mongodb-1 2/2 Running 0 7m19s secondary +mongo-cluster-mongodb-2 2/2 Running 0 5m57s secondary +mongo-cluster-mongodb-3 2/2 Running 0 3m54s secondary +``` + +验证MongoDB内部状态: + +1. 登录任意MongoDB副本 +```bash +kubectl exec -it -n demo mongo-cluster-mongodb-0 -- /bin/bash +mongosh "mongodb://${MONGODB_ROOT_USER}:${MONGODB_ROOT_PASSWORD}@127.0.0.1:27017/admin" +``` +2. 检查MongoDB的`rs.status()` +```bash +# 登录MongoDB并查询 +mongo-cluster-mongodb [direct: secondary] admin> rs.status() +``` + +## 缩容(减少副本) + +**预期工作流程**: + +1. 移除序号最大的副本 +2. 如果移除的是主副本,会先触发自动故障转移 +3. Pod被优雅终止 +4. 集群状态从`Updating`变为`Running` + +:::注意 +如果被缩容的副本恰好是主节点,KubeBlocks会触发故障转移操作。在故障转移成功前,该Pod不会被终止。 +::: + + + + + + 选项1:使用水平缩容OpsRequest + + 通过减少1个副本来缩容MongoDB集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-scale-in-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + # 指定组件缩容的副本变更 + scaleIn: + # 指定组件的副本变更数量 + # 当前组件减少1个副本 + replicaChanges: 1 + ``` + + 监控进度: + ```bash + kubectl get ops mongo-cluster-scale-in-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Running 0/1 8s + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Running 1/1 24s + mongo-cluster-scale-in-ops HorizontalScaling mongo-cluster Succeed 1/1 24s + ``` + + + + + 选项2:直接更新Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 1 # 减少副本数实现缩容 + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### 验证缩容结果 + +示例输出(保留1个Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster,apps.kubeblocks.io/component-name=mongodb +NAME READY STATUS RESTARTS AGE +mongo-cluster-mongodb-0 2/2 Running 0 18m +``` + +## 故障排查 +如果缩容操作长时间卡住,请检查以下资源: + +```bash +# 检查当前主节点和候选节点的agent日志 +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# 检查集群事件中的错误 +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# 检查KubeBlocks日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +如果从主副本收到如下错误: +```text: +INFO Action Executed {"action": "switchover", "result": "exit code: 1: failed"} +INFO HTTP API Called {"user-agent": "Go-http-client/1.1", "method": "POST", "path": "/v1.0/action", "status code": 200, "cost": 7} +``` + +可能是故障转移错误,请检查KubeBlocks日志获取更多细节。 + +## 最佳实践 + +执行水平扩缩容时: +- 尽可能在低流量时段进行操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够的资源 +- 考虑新副本的存储需求 + +## 清理资源 +删除所有创建的资源,包括MongoDB集群及其命名空间: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中您学会了如何: +- 执行扩容操作为MongoDB集群增加副本 +- 执行缩容操作从MongoDB集群移除副本 +- 使用OpsRequest和直接Cluster API更新两种方式进行水平扩缩容 + +KubeBlocks确保在最小化影响数据库操作的前提下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..45f7af53 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/04-volume-expansion.mdx @@ -0,0 +1,219 @@ +--- +description: 了解如何在KubeBlocks管理的MongoDB集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- MongoDB +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: MongoDB集群中扩展存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 扩展 MongoDB 集群的存储卷 + +本指南介绍如何在 **KubeBlocks** 管理的 MongoDB 集群中扩展持久卷声明(PVC)。存储卷扩展功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持时,此操作可在不中断服务的情况下执行。 + +存储卷扩展允许您在创建持久卷声明(PVC)后增加其容量。该功能在 Kubernetes v1.11 中引入,并在 Kubernetes v1.24 版本正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持卷扩展 + +列出所有可用存储类,并通过检查 `ALLOWVOLUMEEXPANSION` 字段验证是否支持卷扩展: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +请确保您使用的存储类将 `ALLOWVOLUMEEXPANSION` 设置为 true。若为 false,则表示该存储类不支持卷扩展。 + +## 使用支持扩展的存储类部署 MongoDB 副本集集群 + +KubeBlocks 采用声明式方式管理 MongoDB 集群。以下是一个部署包含 2 个副本(1 主节点 + 1 从节点)的 MongoDB 集群配置示例。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩展的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `storageClassName`: 指定支持卷扩展的 `StorageClass` 名称。若未设置,将使用标记为 `default` 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时,请确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION` 字段)。 + +::: + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 扩展存储卷 + +:::note +1. 确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`) +2. 新容量必须大于当前容量 +3. 根据存储提供商不同,卷扩展可能需要额外配置 +::: + +可通过以下两种方式扩展存储卷: + + + + + 方法一:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 mongodb 组件扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: mongodb + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩展进度: + + ```bash + kubectl describe ops mongo-cluster-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 操作完成后,PVC 容量将更新。 + + :::note + 若使用的存储类不支持卷扩展,此 OpsRequest 将快速失败并提示: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 方法二:直接更新 Cluster API + + 您也可以直接更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段: + + ```yaml + componentSpecs: + - name: mongodb + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量(必须大于当前值) + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证操作 + +检查更新后的集群配置: +```bash +kbcli cluster describe mongo-cluster -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mongodb 500m / 500m 512Mi / 512Mi data:30Gi +``` +数据卷 PVC 容量已更新为指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=mongo-cluster -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +mongo-cluster-mongodb-data-0 Bound pvc-uuid 30Gi RWO 33m +mongo-cluster-mongodb-data-1 Bound pvc-uuid 30Gi RWO 33m +mongo-cluster-mongodb-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## 清理资源 +删除 MongoDB 集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南中您已学习如何: +1. 验证存储类的卷扩展兼容性 +2. 通过以下方式执行卷扩展: + - 使用 OpsRequest 进行动态更新 + - 通过 Cluster API 手动更新 +3. 验证 PVC 新容量并确认扩容操作完成 + +通过存储卷扩展功能,您可以在不影响服务的情况下高效扩展 MongoDB 集群的存储容量,确保数据库能够随着应用需求同步增长。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..7c9cca98 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,345 @@ +--- +description: 了解如何通过负载均衡器(LoadBalancer)及其他服务类型,在KubeBlocks中配置和管理MongoDB服务,实现内外部访问。 +keywords: +- KubeBlocks +- MongoDB +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理 MongoDB 服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建与销毁MongoDB服务 +--- +请翻译以下内容: + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在 KubeBlocks 中使用声明式集群 API 管理 MongoDB 服务 + +本指南提供了逐步说明,用于对外暴露或内部管理由 KubeBlocks 托管的 MongoDB 服务。您将学习如何通过云服务提供商的负载均衡器服务配置外部访问、管理内部服务,以及在不再需要时正确关闭外部暴露功能。 + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 MongoDB ReplicaSet 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 查看网络服务 +列出为 MongoDB 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +示例服务列表: +```bash +# 面向所有副本的服务 +mongo-cluster-mongodb ClusterIP 10.96.249.157 27017/TCP 44m +# 读写服务 +mongo-cluster-mongodb-mongodb ClusterIP 10.96.17.58 27017/TCP 44m +# 只读服务 +mongo-cluster-mongodb-mongodb-ro ClusterIP 10.96.2.71 27017/TCP 44m +``` + +## 暴露 MongoDB 服务 + +外部服务地址允许公网访问 MongoDB,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云服务成本 | 安全性 | +|---------------|--------------------|------------|---------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试环境 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 对外暴露 MongoDB 服务,创建一个 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: mongo-cluster + expose: + - componentName: mongodb + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 可选值:'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + # 当 ServiceType 为 LoadBalancer 时,包含云服务商相关参数 + # 以下是 AWS EKS 的配置示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + # 指定服务目标角色 + # 若指定,服务将仅暴露给具有匹配角色的 Pod + roleSelector: primary + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops mongo-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-expose-enable-ops Expose mongo-cluster Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分添加 LoadBalancer 服务配置: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: mongo-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + componentSelector: mongodb + name: mongodb-internet + serviceName: mongodb-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: mongodb + type: LoadBalancer + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分新增了一个外部服务。该 LoadBalancer 服务包含 AWS 网络负载均衡器 (NLB) 的注解。 + + :::note + 云服务商注解说明 + + 使用 LoadBalancer 服务时,必须添加对应云服务商的特定注解。以下是常用云服务商的注解示例: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为 "false" 表示使用面向公网的 LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为 "false" 表示使用面向公网的 LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制 LoadBalancer 仅限内部 VPC 访问。默认不指定时为面向公网。 + cloud.google.com/l4-rbs: "enabled" # 面向公网的 LoadBalancer 优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为 "intranet" 表示使用内部 LoadBalancer + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制 LoadBalancer 是内部还是面向公网。注意该注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + ``` + 如果在服务创建后将该注解从 "false" 改为 "true",注解可能在服务对象中更新,但 LoadBalancer 仍会保留其公网 IP。 + + 正确修改该行为的步骤: + - 首先删除现有的 LoadBalancer 服务 + - 使用更新后的注解重新创建服务(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新的 LoadBalancer 配置正确的内部或外部 IP + ::: + + + 使用以下命令等待集群状态变为 Running: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +mongo-cluster-mongodb-internet LoadBalancer 172.20.60.24 27017:31243/TCP 1m +``` + +### 等待 DNS 解析生效 + +LoadBalancer 的 DNS 名称可能需要 2-5 分钟才能解析。验证解析状态: + +```bash +nslookup # 将 替换为实际获取的 IP 地址 +``` + +## 外部连接 MongoDB + +### 获取凭证 + +KubeBlocks 会自动创建一个包含 MongoDB root 凭证的 Secret。获取 MongoDB root 凭证: +```bash +NAME=`kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 -d` +``` + +### 使用 MongoDB 客户端连接 + +现在你可以从外部(例如你的笔记本电脑或 EC2 实例)连接到 MongoDB 数据库: +```bash +mongosh "mongodb://<$NAME>:<$PASSWD>@:27017/admin" +``` + +## 禁用外部访问 + + + + + + 方法一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: mongo-cluster + expose: + - componentName: mongodb + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops mongo-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-expose-disable-ops Expose mongo-cluster Succeed 1/1 24s + ``` + + + + + + 方法二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster mongo-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster mongo-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + mongo-cluster mongodb Delete Running 24m + ``` + + + +### 验证服务移除 + +确保 'mongo-cluster-mongodb-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=mongo-cluster -n demo +``` + +预期结果:'mongo-cluster-mongodb-internet' 服务应该已被移除。 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除 MongoDB 集群及其所在的命名空间: +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 将 MongoDB 服务暴露给外部或内部访问 +- 通过云服务商特定注解配置负载均衡器(LoadBalancer)服务 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问,实现服务的启用或禁用 + +KubeBlocks 为 Kubernetes 环境中的 MongoDB 服务管理提供了灵活且简化的解决方案。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/08-switchover.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/08-switchover.mdx new file mode 100644 index 00000000..0072f8ff --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/08-switchover.mdx @@ -0,0 +1,186 @@ +--- +description: 使用KubeBlocks对MongoDB集群执行计划内角色切换,实现最短停机时间和可控维护 +keywords: +- MongoDB +- KubeBlocks +- Switchover +- High Availability +- Role Transition +- Kubernetes +sidebar_label: MongoDB 主从切换 +sidebar_position: 8 +title: MongoDB 集群切换 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# MongoDB 集群切换 + +**切换(Switchover)** 是一项有计划的操作,用于将主节点角色从一个 MongoDB 实例转移到另一个实例。与故障转移(failover)不同,切换操作具有以下特点: +- 可控的角色转换 +- 极短的中断时间(通常仅数百毫秒) +- 可预测的维护窗口 + +切换操作适用于以下场景: +- 节点维护/升级 +- 工作负载重新平衡 +- 测试高可用性 +- 有计划的基础设施变更 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 MongoDB 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 检查角色状态 +列出 Pod 及其角色(主节点或从节点): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster,apps.kubeblocks.io/component-name=mongodb -L kubeblocks.io/role +``` + +示例输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 20m primary +mongo-cluster-mongodb-1 2/2 Running 0 21m secondary +mongo-cluster-mongodb-2 2/2 Running 0 19m secondary +``` + +## 执行计划内切换 + +要发起计划内切换,请创建如下 OpsRequest 资源: + + + + 选项1:自动切换(不指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-switchover-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: Switchover + switchover: + - componentName: mongodb + instanceName: mongo-cluster-mongodb-0 + ``` + **关键参数:** + - `instanceName`:指定切换操作前作为主节点(leader)的实例(Pod)。 + + + + 选项2:定向切换(指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongodb-switchover-targeted + namespace: demo + spec: + clusterName: mongo-cluster + type: Switchover + switchover: + - componentName: mongodb + # 指定需要转移角色的实例 + # 典型用法是在共识系统中转移 leader 角色 + instanceName: mongo-cluster-mongodb-0 + # 如果指定 candidateName,角色将转移到该实例 + # 名称必须匹配组件中的某个 Pod + # 详情请参考 ComponentDefinition 的 Switchover 生命周期操作 + candidateName: mongo-cluster-mongodb-1 + ``` + + **关键参数:** + - `instanceName`:指定切换操作前作为主节点(leader)的实例(Pod)。 + - `candidateName`:如果指定候选节点名称,角色将转移到该实例。 + + + +## 监控切换过程 + +监控切换进度: + +```bash +kubectl get ops mongodb-switchover-ops -n demo -w +``` + +预期结果: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +mongodb-switchover-ops Switchover mongo-cluster Succeed 1/1 33s +``` + +## 验证切换结果 + +切换完成后,指定的实例将被提升为主节点角色,而原先的主节点实例将转为从节点角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role +``` + +预期输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 23m secondary +mongo-cluster-mongodb-1 2/2 Running 0 24m primary +mongo-cluster-mongodb-2 2/2 Running 0 23m secondary +``` + +在本示例中: +- Pod 'mongo-cluster-mongodb-1' 已被提升为主节点角色 +- Pod 'mongo-cluster-mongodb-0' 已转为从节点角色 + +## 故障排查 + +### 常见切换问题 + +如果切换操作卡住,请检查以下资源: +```bash +# 检查当前主节点和候选节点的 agent 日志 +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# 检查集群事件中的错误信息 +kubectl get events -n demo --field-selector involvedObject.name=mongo-cluster + +# 检查 kubeblocks 日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +## 总结 + +本指南演示了如何: +1. 部署 MongoDB 高可用集群 +2. 执行自动和定向两种切换操作 +3. 验证角色转换 + +**关键要点:** +- 切换操作可实现可控维护,中断时间极短(约100-500毫秒) +- KubeBlocks 提供声明式操作实现可靠的角色转换 +- 切换后务必验证: + - 集群状态 + - 应用连接性 + - 复制健康状况 +- 排查问题时检查以下日志: + - KubeBlocks operator(位于 kb-system 命名空间) + - 数据库 Pod 上的 kbagent \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..be3d182f --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,142 @@ +--- +description: 了解如何在KubeBlocks管理的MongoDB集群中下线(停用)特定Pod。 +keywords: +- KubeBlocks +- MongoDB +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 停用 MongoDB 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的MongoDB集群中下线特定Pod +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 下线 KubeBlocks 托管的 MongoDB 集群中的特定 Pod + +本指南介绍如何在 KubeBlocks 托管的 MongoDB 集群中下线(停用)特定 Pod。通过精确控制集群资源的同时保持可用性,此功能适用于工作负载再平衡、节点维护或故障处理场景。 + +## 为何选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法下线特定 Pod。StatefulSet 会严格保证 Pod 的顺序和身份标识,缩容操作总是优先移除序号最大的 Pod(例如从 3 个副本缩容时,会先移除 `Pod-2`)。这种限制导致无法精确控制下线目标,给维护工作、负载分配和故障处理带来不便。 + +KubeBlocks 突破了这一限制,允许管理员直接下线指定 Pod。这种细粒度控制既能保障高可用性,又能实现更优的资源管理,且不会影响整个集群。 + +## 前置条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 MongoDB 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 下线 Pod + +**预期流程**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +要下线特定 Pod(例如 'mongo-cluster-mongodb-1'),可采用以下任一方式: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 标记待下线 Pod: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: mongo-cluster-decommission-ops + namespace: demo + spec: + clusterName: mongo-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: mongodb + scaleIn: + onlineInstancesToOffline: + - 'mongo-cluster-mongodb-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作执行状态: + + ```bash + kubectl get ops mongo-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + mongo-cluster-decommission-ops HorizontalScaling mongo-cluster Succeed 1/1 5s + ``` + + + + + + 方法二:使用 Cluster API + + 也可直接更新 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: mongodb + replicas: 1 # 下线后的预期副本数 + offlineInstances: + - mongo-cluster-mongodb-1 # <----- 指定待下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +mongo-cluster-mongodb-0 2/2 Running 0 25m +mongo-cluster-mongodb-2 2/2 Running 0 24m +``` + +登录 MongoDB 副本集验证状态: +```bash +# 登录任意 mongodb 副本: +mongo-cluster-mongodb [direct: secondary] admin> rs.status() +``` +检查 `members` 字段的变化。 + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向下线特定 Pod +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保障可用性的同时,提供了精细化的集群管理能力。 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..a6eeb2e0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,129 @@ +--- +description: 了解如何创建并配置一个使用S3存储桶保存备份数据的KubeBlocks BackupRepo。 +keywords: +- KubeBlocks +- Backup +- BackupRepo +- S3 +- Kubernetes +sidebar_label: 创建备份仓库 +sidebar_position: 1 +title: 为KubeBlocks创建备份仓库 +--- +# 为 KubeBlocks 创建备份仓库 + +本指南将引导您通过使用 S3 存储桶来创建和配置 KubeBlocks 中的备份仓库(BackupRepo),用于存储备份数据。 + +## 前提条件 +- 已配置具有创建 S3 存储桶权限的 AWS CLI +- 拥有 Kubernetes 集群的 kubectl 访问权限 +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)并在 kb-system 命名空间中运行 + +## 步骤 1:创建 S3 存储桶 + +使用 AWS CLI 在目标区域创建 S3 存储桶。将 `` 替换为您所需的 AWS 区域(例如 `us-east-1`、`ap-southeast-1`)。 + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +示例(us-west-1 区域): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +示例输出: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +验证: +通过列出存储桶内容确认创建成功(初始应为空): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## 步骤 2:创建 Kubernetes Secret 存储 AWS 凭证 + +将您的 AWS 凭证安全地存储在 Kubernetes Secret 中。将 `` 和 `` 替换为实际的 AWS 凭证: + +```bash +# 创建 secret 保存访问密钥 +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## 步骤 3:配置备份仓库 + +备份仓库(BackupRepo)是用于定义备份存储位置的自定义资源。本步骤将通过创建 BackupRepo 资源将您的 S3 存储桶与 KubeBlocks 集成。 + +应用以下 YAML 创建备份仓库。请根据实际情况替换字段(如存储桶名称、区域等)。 + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # 将此备份仓库标记为默认仓库 + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # 当前 KubeBlocks 支持配置多种对象存储服务作为备份仓库 + # - s3 (Amazon Simple Storage Service) + # - oss (阿里云对象存储服务) + # - cos (腾讯云对象存储) + # - gcs (Google 云存储) + # - obs (华为云对象存储) + # - minio 及其他 S3 兼容服务 + storageProviderRef: s3 + # 指定备份仓库的访问方式 + # - Tool + # - Mount + accessMethod: Tool + # 指定此备份仓库创建的 PV 回收策略 + pvReclaimPolicy: Retain + # 指定此备份仓库创建的 PVC 容量 + volumeCapacity: 100Gi + # 存储 StorageProvider 的非敏感配置参数 + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # 引用存储 StorageProvider 凭证的 secret + credential: + # name 是在命名空间内引用 secret 资源的唯一标识 + name: s3-credential-for-backuprepo + # namespace 定义了 secret 名称必须唯一的空间范围 + namespace: kb-system +``` + +## 步骤 4:验证备份仓库状态 + +检查 BackupRepo 状态以确保其正确初始化: + +```bash +kubectl get backuprepo s3-repo -w +``` + +预期状态变化: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +故障排除: + - 如果状态变为 Failed: + - 确认存储桶名称和区域与 S3 配置匹配 + - 检查 Secret 中的 AWS 凭证是否正确 + - 验证 KubeBlocks 与 AWS S3 之间的网络连接 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..6bb24a4c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,228 @@ +--- +description: 使用KubeBlocks中的Backup API和OpsRequest API为MongoDB集群创建及验证完整备份的逐步指南 +keywords: +- MongoDB +- Full Backup +- KubeBlocks +- Kubernetes +- Database Backup +- XtraBackup +sidebar_label: 创建完整备份 +sidebar_position: 2 +title: 在KubeBlocks上为MongoDB集群创建完整备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在KubeBlocks上为MongoDB创建全量备份 + +本指南演示如何通过以下两种方式为KubeBlocks上的MongoDB集群创建和验证全量备份: +- 直接使用Backup API进行备份操作 +- 使用OpsRequest API进行托管式备份操作(提供增强的监控能力) + +我们将在[从全量备份恢复](./05-restoring-from-full-backup)指南中介绍如何从备份恢复数据。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署MongoDB集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 备份准备条件 + +创建备份前请确保: +1. 备份仓库已配置: + - 存在`BackupRepo`资源 + - 集群与仓库间网络连通 + - `BackupRepo`状态显示"Ready" + +2. 集群准备就绪: + - 集群状态为"Running" + - 无正在进行的操作(扩缩容、升级等) + +## 查看备份配置 + +检查可用的备份策略和计划: + +```bash +# 列出备份策略 +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=mongo-cluster + +# 列出备份计划 +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +预期输出: +```bash +NAME BACKUP-REPO STATUS AGE +mongo-cluster-mongodb-backup-policy Available 62m + +NAME STATUS AGE +mongo-cluster-mongodb-backup-schedule Available 62m +``` + +查看BackupPolicy CR 'mongo-cluster-mongodb-backup-policy'中支持的备份方法: + +```bash +kubectl get backuppolicy mongo-cluster-mongodb-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**备份方法列表** + +KubeBlocks MongoDB支持以下备份方法: + +| 功能 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | dump | 使用MongoDB工具`mongodump`创建数据库内容的二进制导出 | +| 全量备份 | datafile | 备份数据库的数据文件 | +| 持续备份 | archive-oplog | 使用`wal-g`持续归档MongoDB操作日志 | + +## 通过Backup API备份 + +### 1. 创建按需备份 + +`datafile`方法会备份数据库的数据文件 + +应用以下清单创建备份: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: mongodb-backup-datafile + namespace: demo +spec: + # 指定备份策略中定义的备份方法名称 + # - dump + # - volume-snapshot + # - datafile + backupMethod: datafile + # 指定应用于此备份的备份策略 + backupPolicyName: mongo-cluster-mongodb-backup-policy + # 决定当备份自定义资源(CR)被删除时,备份仓库中的备份内容是否应被删除。支持的值是`Retain`和`Delete` + # - `Retain`表示保留备份内容及其在备份仓库中的物理快照 + # - `Delete`表示删除备份内容及其在备份仓库中的物理快照 + deletionPolicy: Delete +``` + +### 2. 监控备份并验证完成状态 + +跟踪进度直至状态显示"Completed": + +```bash +kubectl get backup mongodb-backup-datafile -n demo -w +``` + +示例输出: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Running 1119698 Delete 2025-05-18T14:21:16Z +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Running 1119698 Delete 2025-05-18T14:21:16Z +mongodb-backup-datafile mongo-cluster-mongodb-backup-policy datafile Completed 1119698 15s Delete 2025-05-18T14:21:16Z 2025-05-18T14:21:31Z +``` + +### 3. 验证备份 + +通过以下方式确认备份成功: +- 备份状态显示"Completed" +- 备份大小符合预期 +- 检查BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 备份文件大小 + + +## 通过OpsRequest API备份 + +### 1. 创建按需备份 + +使用OpsRequest API执行'datafile'方法备份: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-backup + namespace: demo +spec: + clusterName: mongo-cluster + force: false + backup: + backupPolicyName: mongo-cluster-mongodb-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. 监控备份进度 + +#### 1. 监控操作状态 + +实时跟踪备份进度: +```bash +kubectl get ops mongo-cluster-backup -n demo -w +``` + +预期输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +mongo-cluster-backup Backup mongo-cluster Running -/- 5s +mongo-cluster-backup Backup mongo-cluster Succeed -/- 10s +``` + +- 状态'Succeed'表示备份操作成功完成 + +#### 2. 验证完成状态 + +检查最终备份状态: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=mongo-cluster-backup +``` + +示例输出: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-mongo-cluster-20250518142234 mongo-cluster-mongodb-backup-policy datafile kb-oss Completed 1149575 11s Delete 2025-05-18T14:22:34Z 2025-05-18T14:22:44Z 2025-06-17T14:22:44Z +``` + +- 备份状态应显示'Completed' + +### 3. 验证备份 + +通过以下方式确认备份成功: +- 备份状态显示"Completed" +- 备份大小符合预期 +- 检查BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 其他元数据 + +## 总结 + +本指南涵盖: +1. 部署MongoDB复制集群 +2. 使用以下方式创建全量备份: + - 直接Backup API + - 托管式OpsRequest API +3. 监控和验证备份 + +您的MongoDB数据现已安全备份,可在需要时进行恢复。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..1a59a19e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,153 @@ +--- +description: 了解如何使用KubeBlocks部署MongoDB集群,并配置在S3存储库中保留的自动化定时备份。 +keywords: +- MongoDB +- Backup +- KubeBlocks +- Scheduled Backup +- Kubernetes +sidebar_label: 定时备份 +sidebar_position: 3 +title: 在KubeBlocks中设置带定时备份的MongoDB集群 +--- +# 在 KubeBlocks 中设置带定时备份的 MongoDB 集群 + +本指南演示如何使用 KubeBlocks 部署 MongoDB 集群,并配置定时备份到 S3 存储仓库的保留策略。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 MongoDB 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 备份前提条件 + +1. 已配置备份仓库: + - 已配置 `BackupRepo` + - 集群与仓库间网络连通,`BackupRepo` 状态为 `Ready` + +2. 集群运行状态: + - 集群必须处于 `Running` 状态 + - 无正在进行的操作(扩缩容、升级等) + +## 配置定时备份 + +KubeBlocks 在创建集群时会自动生成 `BackupSchedule` 资源。按以下步骤启用并配置定时备份: + +1. 验证默认备份计划配置: + +```bash +kubectl get backupschedule mongo-cluster-mongodb-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: mongo-cluster-MongoDB-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── 分钟 (0-59) + # │ ┌───────────── 小时 (0-23) + # │ │ ┌───────────── 月份中的天 (1-31) + # │ │ │ ┌───────────── 月 (1-12) + # │ │ │ │ ┌───────────── 周中的天 (0-6) (0=周日) + # │ │ │ │ │ + # 0 18 * * * + # 每天18:00(UTC)执行此任务 + cronExpression: 0 18 * * * # 按需修改cron表达式 + enabled: true # 设为`true`启用定期基础备份 + retentionPeriod: 7d # 按需设置保留期限 +``` + +2. 启用并自定义备份计划: +```bash +kubectl edit backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +修改以下关键参数: +- `enabled`:设为 `true` 启用定时备份 +- `cronExpression`:使用 cron 语法配置备份频率 +- `retentionPeriod`:设置备份保留时长(如 `7d`、`1mo`) + +每日18:00 UTC备份并保留7天的配置示例: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. 验证计划配置: +```bash +# 检查计划状态 +kubectl get backupschedule mongo-cluster-mongodb-backup-schedule -n demo -w + +# 查看详细配置 +kubectl describe backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +## 监控与管理备份 + +启用定时备份后,请监控执行情况并管理备份保留: + +1. 查看所有备份: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=mongo-cluster +``` + +2. 检查备份详情: +```bash +kubectl describe backup -n demo +``` + +3. 验证备份文件: +- 状态应显示"Completed" +- 检查备份大小是否符合预期 +- 确认保留策略已生效 +- 验证仓库中存在备份文件 + +4. 管理备份保留: +- 手动删除旧备份: +```bash +kubectl delete backup -n demo +``` +- 修改保留期限: +```bash +kubectl edit backupschedule mongo-cluster-mongodb-backup-schedule -n demo +``` + +## 清理资源 +删除 MongoDB 集群及其命名空间以移除所有资源: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南演示了: +1. MongoDB 自动备份配置 +2. 使用 cron 语法自定义计划 +3. 保留策略管理 +4. 备份验证流程 + +您的 MongoDB 集群现已具备: +- 定期自动备份 +- 可配置的保留策略 +- 完整的备份历史追踪 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..7bf48a87 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,184 @@ +--- +description: 了解如何在KubeBlocks中配置支持定时全量备份与持续增量备份的MongoDB集群。 +keywords: +- MongoDB +- Backup +- PITR +- KubeBlocks +- Kubernetes +sidebar_label: 定时持续备份 +sidebar_position: 4 +title: 在KubeBlocks中设置支持定时持续备份的MongoDB集群 +--- +# 在 KubeBlocks 中配置支持定时持续备份的 MongoDB 集群 + +本指南演示如何在 KubeBlocks 上配置 MongoDB 集群,实现以下功能: +- 定时全量备份(基础备份) +- 持续 WAL(预写日志)归档 +- 时间点恢复(PITR)能力 + +这种组合方案能提供全面的数据保护,并实现最小的恢复点目标(RPO)。 + +## 什么是 PITR? +时间点恢复(PITR)允许您通过结合全量备份和持续的 binlog/wal/归档日志备份,将数据库恢复到特定时间点。 + +有关从全量备份和持续 binlog 备份恢复数据的详细信息,请参阅[从 PITR 恢复](restore-with-pitr.mdx)指南。 + +## 前提条件 + +开始前请确保: +- 环境准备: + - Kubernetes 集群已启动并运行 + - kubectl CLI 工具已配置为可访问您的集群 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处说明进行安装。 +- 命名空间准备:为保持资源隔离,请为本教程创建专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## 备份前提条件 + +1. 备份仓库配置: + - 已配置 `BackupRepo` + - 集群与仓库间网络连通,`BackupRepo` 状态为 `Ready` + +2. 集群运行状态: + - 集群必须处于 `Running` 状态 + - 没有正在进行的操作(扩缩容、升级等) + +## 备份方法列表 + +KubeBlocks MongoDB 支持以下备份方法: + +| 功能 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | dump | 使用 MongoDB 工具 `mongodump` 创建数据库内容的二进制导出 | +| 全量备份 | datafile | 备份数据库的数据文件 | +| 持续备份 | archive-oplog | 使用 `wal-g` 持续归档 MongoDB oplog | + +## 部署支持备份 API 的 MongoDB 副本集集群 + +部署一个包含 3 个副本的 MongoDB 副本集集群,并指定备份信息: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + backup: + retentionPeriod: 7d + # 全量备份配置 + method: datafile # 全量备份方法名称 + enabled: true + cronExpression: 0 18 * * * # 全量备份调度时间 + # 持续备份配置 + continuousMethod: archive-oplog # 持续备份方法,与 wal-g 配对使用 + pitrEnabled: true # 是否启用持续备份方法 + repoName: s3-repo # 指定备份仓库,如未指定则使用标注为 `default` 的 BackupRepo +``` + +**关键配置字段说明** + +| 字段 | 值 | 描述 | +|-------|-------|-------------| +| `backup.enabled` | `true` | 启用定时备份 | +| `method` | `datafile` | 使用 MongoDB 原生工具进行全量备份 | +| `cronExpression` | `0 18 * * *` | 每天 UTC 时间 18:00 执行全量备份 | +| `retentionPeriod` | `7d` | 备份保留 7 天 | +| `repoName` | `s3-repo` | 备份仓库名称(S3 兼容存储) | +| `pitrEnabled` | `true` | 启用持续 WAL 归档以实现 PITR | +| `continuousMethod` | `archive-oplog` | 持续 WAL 归档方法 | + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 监控持续备份 + +使用以下命令验证持续备份操作: +```bash +# 获取持续备份 +kubectl get backup -l app.kubernetes.io/instance=mongo-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# 获取执行持续备份的 Pod +kubectl get pod -l app.kubernetes.io/instance=mongo-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +## 验证备份配置 + +KubeBlocks 会自动创建 `BackupSchedule` 资源。检查配置: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +... +spec: + backupPolicyName: mongo-cluster-mongodb-backup-schedule + schedules: + - backupMethod: datafile + cronExpression: 0 18 * * * + enabled: true # + retentionPeriod: 7d + - backupMethod: archive-oplog + cronExpression: '*/5 * * * *' + enabled: true # 设置为 `true` 启用持续备份 + retentionPeriod: 8d # 根据需要设置保留期限 +``` + +1. **全量备份** (datafile): + - 备份 MongoDB 的数据文件 + - 按配置的计划运行(默认为每日) + - 作为 PITR 的基础 + +2. **持续备份** (archive-oplog): + - 使用 wal-g 持续归档 MongoDB oplog + - 使用 datasafed 作为存储后端,采用 zstd 压缩 + - 维护备份元数据,包括大小和时间范围 + - 自动清理过期备份 + - 验证 MongoDB 主节点状态和进程健康状态 + +## 总结 + +本指南涵盖: +1. 使用 pg-basebackup 配置定时全量备份 +2. 启用 wal-g-archive 持续 WAL 归档 +3. 设置时间点恢复(PITR)能力 +4. 监控备份操作 + +主要优势: +- 定时全量备份确保定期恢复点 +- 持续 WAL 归档最小化潜在数据丢失 +- PITR 支持恢复到任意时间点 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..a12ec53b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,165 @@ +--- +description: 了解如何通过集群注解或OpsRequest API,在KubeBlocks中从现有备份恢复一个新的MongoDB集群。 +keywords: +- MongoDB +- Restore +- Backup +- KubeBlocks +- Kubernetes +sidebar_label: 恢复 MongoDB 集群 +sidebar_position: 5 +title: 从备份恢复 MongoDB 集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 从备份恢复MongoDB集群 + +本指南演示在KubeBlocks中从备份恢复MongoDB集群的两种方法: + +1. **集群注解法** - 使用YAML注解的简单声明式方法 +2. **OpsRequest API法** - 支持进度监控的增强型操作控制 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 恢复准备:定位完整备份 +在恢复前,请确保存在可用的完整备份。恢复过程将使用此备份创建新的MongoDB集群。 + +- 新集群可访问的备份仓库 +- 状态为`Completed`的有效完整备份 +- 充足的CPU/内存资源 +- 足够的存储容量 + +查找可用完整备份: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=mongo-cluster # 获取完整备份列表 +``` + +选择状态为`Completed`的任意一个备份。 + +## 方案一:集群注解恢复法 + +### 步骤1:创建恢复集群 +创建包含恢复配置的新集群: + +关键参数: +- `kubeblocks.io/restore-from-backup`注解 +- 从上一步骤获取的备份名称和命名空间 + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster-restored + namespace: demo + annotations: + # 注意:将替换为您的备份名称 + kubeblocks.io/restore-from-backup: '{"mongodb":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤2:监控恢复进度 +通过以下命令跟踪恢复状态: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +## 方案二:OpsRequest API恢复法 + +### 步骤1:发起恢复操作 +通过OpsRequest API创建恢复请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restore + namespace: demo +spec: + clusterName: mongo-cluster-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### 步骤2:跟踪操作进度 +监控恢复状态: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +### 步骤3:验证恢复集群 +确认恢复成功: +```bash +kubectl get cluster mongo-cluster-restored -n demo +``` +示例输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster-restored mongodb Delete Running 3m3s +``` + + +## 清理资源 +删除所有创建的资源,包括MongoDB集群及其命名空间: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete cluster mongo-cluster-restored -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南涵盖两种恢复方法: + +1. **集群注解法** - 基于YAML的简单方案 + - 获取系统凭证 + - 创建带恢复注解的集群 + - 监控进度 + +2. **OpsRequest API法** - 增强的操作控制方案 + - 创建恢复请求 + - 跟踪操作状态 + - 验证完成情况 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..93370bca --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,182 @@ +--- +description: 了解如何在KubeBlocks上使用完整备份和持续binlog备份实现MongoDB集群的时间点恢复(PITR)。 +keywords: +- MongoDB +- Full Backup +- PITR +- KubeBlocks +sidebar_label: 使用 PITR 恢复 +sidebar_position: 6 +title: 在KubeBlocks上通过时间点恢复(PITR)从备份还原MongoDB集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在KubeBlocks上通过时间点恢复(PITR)从备份还原MongoDB集群 + +本指南演示如何在KubeBlocks中为MongoDB集群执行时间点恢复(PITR),使用以下要素: +1. 完整基础备份 +2. 持续的WAL(预写日志)备份 +3. 两种恢复方法: + - 集群注解(声明式方法) + - OpsRequest API(操作控制) + +PITR支持恢复到指定`timeRange`时间范围内的任意时间点。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 准备PITR恢复 +执行PITR恢复需要同时具备完整备份和持续备份。如果尚未配置,请参考相关文档进行设置。 + +- 已完成的完整备份 +- 活跃的持续WAL备份 +- 可访问的备份存储库 +- 新集群的充足资源 + +可通过以下步骤确认备份列表: + +### 1. 验证持续备份 +确认存在正在运行或已完成的持续WAL备份: + +```bash +# 每个集群应有且仅有一个持续备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=mongo-cluster +``` + +### 2. 检查备份时间范围 +获取有效恢复时间窗口: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +预期输出: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. 识别完整备份 +查找符合条件的完整备份: +- 状态:已完成 +- 完成时间在持续备份开始时间之后 + +```bash +# 应存在一个或多个完整备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=mongo-cluster +``` + +:::tip +KubeBlocks会自动选择最近符合条件的完整备份作为基础。 +确保存在满足条件的完整备份:其`stopTime`/`completionTimestamp`必须**晚于**持续备份的`startTime`,否则PITR恢复将失败。 +::: + +## 方案一:集群注解恢复 + +### 步骤1:创建恢复集群 +在集群注解中配置PITR参数: + +关键参数: +- `name`: 持续备份名称 +- `restoreTime`: 目标恢复时间(需在备份`timeRange`范围内) + +应用以下YAML配置: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster-restore-pitr + namespace: demo + annotations: + # 注意:将替换为持续备份名称 + # 注意:将替换为备份时间范围内的有效时间 + kubeblocks.io/restore-from-backup: '{"mongodb":{"name":"","namespace":"demo","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤2:监控恢复过程 +通过以下命令跟踪恢复进度: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +## 方案二:OpsRequest API恢复 + +如需操作控制和监控,可使用OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mongo-cluster-restore + namespace: demo +spec: + clusterName: mongo-cluster-restore + force: false + restore: + backupName: + backupNamespace: demo + restorePointInTime: + type: Restore +``` + +### 监控恢复过程 +通过以下命令跟踪进度: + +```bash +# 查看恢复操作 +kubectl get restore -n demo -w + +# 验证集群状态 +kubectl get cluster -n demo -w +``` + +## 清理资源 +删除所有创建的资源,包括MongoDB集群及其命名空间: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete cluster mongo-cluster-restore -n demo +kubectl delete ns demo +``` + +## 总结 +本指南演示了如何在KubeBlocks中使用完整备份和持续备份对MongoDB集群执行时间点恢复(PITR)。关键步骤包括: +- 验证可用备份 +- 提取加密的系统账户凭证 +- 创建带有恢复配置的新MongoDB集群 +- 监控恢复过程 + +通过此方法,您可以将MongoDB集群恢复到特定时间点,确保数据损失最小化和业务连续性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/_category_.yml b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/_category_.yml new file mode 100644 index 00000000..09845f2d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 5 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx b/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..f36b9be8 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,138 @@ +--- +description: 了解如何在KubeBlocks上部署MongoDB集群,并通过Kubernetes Secrets安全配置自定义根密码。 +keywords: +- MongoDB +- KubeBlocks +- Custom Password +- Kubernetes +- Secrets +sidebar_label: 自定义密码 +sidebar_position: 1 +title: 在KubeBlocks上创建带有自定义根密码的MongoDB集群 +--- +# 在 KubeBlocks 上创建带自定义密码的 MongoDB 集群 + +本指南演示如何在 KubeBlocks 中部署 MongoDB 集群,并将自定义 root 密码存储在 Kubernetes Secret 中。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 MongoDB 副本集集群 + +KubeBlocks 采用声明式方式管理 MongoDB 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)且带有自定义 root 密码的 MongoDB 集群。 + +### 步骤 1:为默认账户创建 Secret + +自定义 root 密码存储在 Kubernetes Secret 中。通过应用以下 YAML 创建 Secret: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-secret + namespace: demo +``` +- password: 将 custompassword 替换为您想要的密码,并使用 Base64 编码(`echo -n "custompassword" | base64`)。 +- username: 默认 MongoDB root 用户为 'root',编码为 'cm9vdA=='。 + +### 步骤 2:部署 MongoDB 集群 + +应用以下清单文件部署 MongoDB 集群,并引用步骤 1 中创建的 Secret 作为 root 账户凭据: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + systemAccounts: # 覆盖系统账户密码 + - name: root + secretRef: + name: custom-secret + namespace: demo + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `systemAccounts`: 覆盖引用 `ComponentDefinition` 中定义的系统账户。 + +:::tip + +在 KubeBlocks MongoDB 插件中,预定义了一系列系统账户。只有这些账户可以通过新 Secret 进行自定义。 + +::: + +获取账户列表: +```bash +kubectl get cmpd mongodb-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +预期输出: +```bash +root +``` + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 连接 MongoDB 集群 + +KubeBlocks 会自动创建一个包含 MongoDB root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo mongo-cluster-mongodb-account-root -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +使用 MongoDB 客户端和自定义密码连接到集群的主节点: +```bash +kubectl exec -it -n demo mongo-cluster-mongodb-0 -c mongodb -- mongosh "mongodb://root:custompassword@127.0.0.1:27017/admin" +``` + +## 清理资源 +删除 MongoDB 集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster mongo-cluster -n demo +kubectl delete secret custom-secret -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您完成了以下操作: +- 创建 Kubernetes Secret 安全存储自定义 MongoDB root 密码 +- 在 KubeBlocks 中部署带有自定义 root 密码的 MongoDB 集群 +- 验证部署并使用 MongoDB 客户端连接到集群主节点 + +使用 Kubernetes Secret 可以确保 MongoDB 集群凭据的安全管理,而 KubeBlocks 则简化了部署和管理流程。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/_category_.yml b/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/_category_.yml new file mode 100644 index 00000000..76712392 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 自定义 Secret +position: 6 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/_category_.yml b/docs/zh/preview/kubeblocks-for-mongodb/_category_.yml new file mode 100644 index 00000000..e8e71e24 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for MongoDB 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_category_.yml new file mode 100644 index 00000000..cd891c2b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 (tpl) +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..7e93f914 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_create-cluster.mdx @@ -0,0 +1,41 @@ +KubeBlocks 采用声明式方法来管理 MongoDB 复制集群。 +以下是一个部署 MongoDB 副本集集群的配置示例,包含一个主副本和两个从副本。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mongo-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**配置关键点:** + - `terminationPolicy: Delete` 表示删除集群时会清理所有资源 + - `replicas: 3` 指定了 1 个主节点 + 2 个从节点的副本集架构 + - 存储配置中 `storageClassName: ""` 表示使用默认存储类 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..a6deafb8 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续之前,请确保满足以下条件: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装指南操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..1a726e5c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mongodb/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster mongo-cluster -n demo -w +``` + +预期输出: + +```bash +kubectl get cluster mongo-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mongo-cluster mongodb Delete Creating 49s +mongo-cluster mongodb Delete Running 62s +``` + +检查 Pod 状态及其角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=mongo-cluster -L kubeblocks.io/role -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +mongo-cluster-mongodb-0 2/2 Running 0 78s primary +mongo-cluster-mongodb-1 2/2 Running 0 63s secondary +mongo-cluster-mongodb-2 2/2 Running 0 48s secondary +``` + +当集群状态显示为 Running 时,表示您的 MongoDB 集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/01-overview.mdx b/docs/zh/preview/kubeblocks-for-mysql/01-overview.mdx new file mode 100644 index 00000000..c8d75bf8 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/01-overview.mdx @@ -0,0 +1,66 @@ +--- +description: 了解KubeBlocks MySQL插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- MySQL +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks MySQL 插件概述 +--- +# KubeBlocks MySQL 插件概述 + +**KubeBlocks MySQL 插件**为在 Kubernetes 中部署和管理 MySQL 集群提供了一套完整的解决方案。本文档概述了其功能特性,包括部署拓扑结构、生命周期管理选项、备份恢复功能以及支持的 MySQL 版本。 + + + +## 功能特性 + +### 拓扑结构 +**KubeBlocks Operator** 支持以三种不同拓扑结构部署 MySQL,以满足对性能、一致性和高可用性的多样化需求: + +| 特性 | 描述 +|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 半同步复制 (SemiSync) | 利用 MySQL 半同步复制机制实现近实时数据一致性。
• 要求至少一个副本节点确认接收事务后主节点才提交。
• 通过降低主节点故障时的数据丢失概率,平衡性能与一致性。 | +| MySQL 组复制 (MGR) | 使用 MySQL 原生组复制技术构建分布式多主集群。
• 确保容错操作和节点间自动数据同步。
• 提供内置冲突检测与解决机制,保障数据库持续可用性。 | +| Orchestrator 集成 | 集成外部 Orchestrator 实现高可用性(HA)管理。
• 增加自动监控与故障转移能力(包括副本提升)。
• 支持动态处理节点故障或性能降级,减少停机时间。 | + +通过这些选项,您可以根据性能、一致性和可用性的具体需求定制 MySQL 部署方案。 + +### 生命周期管理 + +KubeBlocks 提供强大的生命周期管理功能来简化 MySQL 集群运维: + +| 特性 | 描述 | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------| +| ProxySQL | 使用 ProxySQL 作为数据库负载均衡器和查询路由器,实现连接管理及读写分离 | +| 只读副本 | 从节点副本可提供只读服务 | +| 水平扩展 | 修改副本数量将触发扩缩容操作 | +| 垂直扩展 | 支持调整 MySQL 副本的计算资源(CPU 和内存) | +| 存储卷扩容 | 支持动态扩展 MySQL 副本的持久化存储卷 | +| 重启 | 在保持高可用性的前提下对 MySQL 集群进行受控重启 | +| 停止/启动 | 支持停止和启动 MySQL 集群以进行维护 | +| 自定义 root 密码 | 支持在创建时设置并管理 MySQL 集群的自定义 root 密码 | +| 自定义配置模板 | 在创建时提供定制化的 MySQL 配置文件模板 | +| 动态参数修改 | 无需重启集群即可动态重新配置 MySQL 参数 | +| 暴露自定义服务 | 支持通过自定义配置暴露数据库服务以满足不同访问需求 | +| 主从切换 | 计划内的主从切换操作,可实现最小化停机时间 | +| 下线指定副本 | 安全地将指定 MySQL 副本下线以进行维护或退役 | +| 重建指定副本 | 原地恢复副本或将其重建为新副本以恢复功能 | +| 次版本升级 | 执行 MySQL 集群的次版本升级而不影响可用性 | +| 高级 Pod 管理 | 支持自定义 Pod 资源、自定义调度策略、渐进式滚动更新等高级操作控制 | +| TLS 加密 | 支持启用或禁用 TLS 加密以保障数据库连接安全 | +| Prometheus 集成 | 与 Prometheus Operator 管理的 Prometheus 集成,实现 MySQL 指标的监控和告警 | +| Loki Stack 集成 | 集成 Loki Stack 以收集 MySQL 错误日志、审计日志和慢查询日志,提升可观测性 | + +支持的版本列表可通过以下命令查看: + +``` +kubectl get cmpv mysql +``` + + diff --git a/docs/zh/preview/kubeblocks-for-mysql/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-mysql/02-quickstart.mdx new file mode 100644 index 00000000..23dfd7a2 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/02-quickstart.mdx @@ -0,0 +1,412 @@ +--- +description: 了解如何开始使用KubeBlocks MySQL插件,包括先决条件、启用MySQL插件、创建MySQL集群以及有效管理集群的方法。 +keywords: +- Kubernetes +- MySQL +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: 快速入门 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 快速入门 + +本指南将带您快速上手 **KubeBlocks MySQL 插件**,包括准备工作、启用插件、创建 MySQL 集群以及轻松管理集群的全过程。 + +## 前提条件 + +本教程假设您已安装并运行 Kubernetes 集群,并且已在路径中安装 `kubectl` 命令行工具和 `helm`。请参阅 [Kubernetes 入门指南](https://kubernetes.io/docs/setup/) 和 [Helm 安装文档](https://helm.sh/docs/intro/install/) 获取各平台的安装说明。 + +此外,本示例要求已安装并运行 KubeBlocks。请参阅 [安装 KubeBlocks](../user_docs/overview/install-kubeblocks) 完成安装。 + + +### 启用 MySQL 插件 + +验证 MySQL 插件是否已安装。默认情况下,MySQL 插件会随 KubeBlocks Helm Chart 一同安装。 + +```bash +helm list -A +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +... +kb-addon-mysql kb-system 1 2024-12-16 00:28:52.78819557 +0000 UTC deployed mysql-1.0.0 5.7.44 +``` + +如果 MySQL 插件未启用,您可以按照以下步骤启用它。 + +```bash +# Add Helm repo +helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts +# For users in Mainland China, if github is not accessible or very slow for you, please use following repo instead +#helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + +# Update helm repo +helm repo update +# Search versions of the Addon +helm search repo kubeblocks/mysql --versions +# Install the version you want (replace $version with the one you need) +helm upgrade -i mysql kubeblocks-addons/mysql --version $version -n kb-system +``` + + + + +## 创建 MySQL 集群 + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/cluster.yaml +``` + + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: mysql-cluster + namespace: demo +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies a list of ClusterComponentSpec objects used to define the + # individual Components that make up a Cluster. + # This field allows for detailed configuration of each Component within the Cluster + componentSpecs: + - name: mysql + # Specifies the ComponentDefinition custom resource (CR) that defines the + # Component's characteristics and behavior. + # Supports three different ways to specify the ComponentDefinition: + # - the regular expression - recommended + # - the full name - recommended + # - the name prefix + componentDef: "mysql-8.0" # match all CMPD named with 'mysql-8.0-' + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # When componentDef is "mysql-8.0", + # Valid options are: [8.0.30,8.0.31,8.0.32,8.0.33,8.0.34,8.0.35,8.0.36,8.0.37,8.0.38,8.0.39] + serviceVersion: 8.0.35 + # Determines whether metrics exporter information is annotated on the + # Component's headless Service. + # Valid options are [true, false] + disableExporter: false + # Specifies the desired number of replicas in the Component + replicas: 2 + # Specifies the resources required by the Component. + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + # Specifies a list of PersistentVolumeClaim templates that define the storage + # requirements for the Component. + volumeClaimTemplates: + # Refers to the name of a volumeMount defined in + # `componentDefinition.spec.runtime.containers[*].volumeMounts + - name: data + spec: + # The name of the StorageClass required by the claim. + # If not specified, the StorageClass annotated with + # `storageclass.kubernetes.io/is-default-class=true` will be used by default + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # Set the storage size as needed + storage: 20Gi +``` + +如果你想创建指定版本的集群,在应用 YAML 文件前设置 `spec.componentSpecs.componentDef`(主版本)和 `spec.componentSpecs.serviceVersion`(主次版本)字段,例如: + + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-5.7" means the major version is 5.7 + componentDef: "mysql-5.7" + # Valid options are: [5.7.44] + serviceVersion: 5.7.44 +``` + + + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-8.0" means the major version is 8.0 + componentDef: "mysql-8.0" + # Valid options are: [8.0.30,8.0.31,8.0.32,8.0.33,8.0.34,8.0.35,8.0.36,8.0.37,8.0.38,8.0.39] + serviceVersion: 8.0.35 +``` + + + + + +```yaml + componentSpecs: + - name: mysql + # componentDef is "mysql-8.4" means the major version is 8.4 + componentDef: "mysql-8.4" + # Valid options are: [8.4.0, 8.4.1, 8.4.2] + serviceVersion: 8.4.2 +``` + + + + + + +可通过以下命令查看可用的 componentDef 列表: + +```bash +kubectl get cmpd -l app.kubernetes.io/name=mysql +``` + +支持的版本列表可通过以下命令查看: + +```bash +kubectl get cmpv mysql +``` + +创建 MySQL 集群时,KubeBlocks 会自动创建一个包含一个主节点副本和一个从节点副本的 MySQL 集群。主节点和从节点之间通过半同步复制保持同步。 + +当集群的 status.phase 状态变为 Running 时,表示集群已成功创建,且主节点和从节点副本均已启动。 + +```bash +kubectl get cluster mysql-cluster +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +mysql-cluster Delete Running 22m + +kubectl get pods -l app.kubernetes.io/instance=mysql-cluster +NAME READY STATUS RESTARTS AGE +mysql-cluster-mysql-0 4/4 Running 0 31m +mysql-cluster-mysql-1 4/4 Running 0 31m +``` + +如果已安装 `kbcli`,您可以使用 `kbcli` 工具快速查看与集群相关的重要信息。 + +```bash +kbcli cluster describe mysql-cluster +Name: mysql-cluster Created Time: Dec 16,2024 08:37 UTC+0800 +NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY +default Running Delete + +Endpoints: +COMPONENT MODE INTERNAL EXTERNAL +mysql ReadWrite mysql-cluster-mysql.default.svc.cluster.local:3306 + +Topology: +COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql mysql-cluster-mysql-0 secondary Running ap-southeast-1b ip-10-0-2-243.ap-southeast-1.compute.internal/10.0.2.243 Dec 16,2024 08:37 UTC+0800 +mysql mysql-cluster-mysql-1 primary Running ap-southeast-1a ip-10-0-1-215.ap-southeast-1.compute.internal/10.0.1.215 Dec 16,2024 08:37 UTC+0800 + +Resources Allocation: +COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql false 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT TYPE IMAGE +mysql docker.io/apecloud/mysql:8.0.35 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n default mysql-cluster +``` + + + + +## 连接到 MySQL 集群 + +创建 MySQL 集群时,KubeBlocks 会创建一个名为 "mysql-cluster-mysql-account-root" 的 Secret 来存储 MySQL root 用户名和密码。 + +```bash +kubectl get secret -l app.kubernetes.io/instance=mysql-cluster +NAME TYPE DATA AGE +mysql-cluster-mysql-account-kbadmin Opaque 2 61s +mysql-cluster-mysql-account-kbdataprotection Opaque 2 61s +mysql-cluster-mysql-account-kbmonitoring Opaque 2 61s +mysql-cluster-mysql-account-kbprobe Opaque 2 61s +mysql-cluster-mysql-account-kbreplicator Opaque 2 61s +mysql-cluster-mysql-account-proxysql Opaque 2 61s +mysql-cluster-mysql-account-root Opaque 2 61s +``` + +您可以通过以下两条命令从 Secret 'mysql-cluster-mysql-account-root' 中获取 MySQL 的 root 用户名和密码: + + + +```bash +kubectl get secret mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 --decode + +kubectl get secret mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 --decode +``` + +KubeBlocks 默认会创建一个名为 "mysql-cluster-mysql" 的 ClusterIP 类型服务,用于访问 MySQL 集群。 + +```bash +kubectl get svc -l app.kubernetes.io/instance=mysql-cluster +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +mysql-cluster-mysql ClusterIP 172.20.253.119 3306/TCP 153m +``` + +您可以通过登录到 Kubernetes 集群中的 Pod(例如 MySQL 集群的主副本),并通过该服务访问数据库。在同一个 Kubernetes 集群内,ClusterIP 是可访问的。 + + + +```bash +kubectl exec -ti -n default mysql-cluster-mysql-0 -- mysql -h mysql-cluster-mysql -uroot -pkni676X2W1 +``` + +或者,您可以使用 `kubectl port-forward` 命令将 MySQL 集群主副本的 3306 端口映射到本地机器的 3306 端口: + +```bash +kubectl port-forward svc/mysql-cluster-mysql 3306:3306 -n default +Forwarding from 127.0.0.1:3306 -> 3306 +Forwarding from [::1]:3306 -> 3306 +``` + +然后,打开另一个 shell 并使用 mysql 命令行工具连接到本地端口 3306: + + + +```bash +mysql -h 127.0.0.1 -P3306 -uroot -pkni676X2W1 +``` + +使用 `kubectl exec` 和 `kubectl port-forward` 是用于快速测试 Operator 功能的方法,不应在生产环境中使用。在生产环境中,应通过服务(Service)访问 MySQL 集群。若需从 Kubernetes 外部访问数据库,则需要配置提供 EXTERNAL-IP 的 LoadBalancer 或 NodePort 类型服务。请参考[访问 MySQL 集群](./04-operations/05-manage-loadbalancer)在您的环境中配置服务。 + +## 停止 MySQL 集群 + +停止集群会释放该集群的所有 Pod,但 PVC、Secret、ConfigMap 和服务资源将被保留。当您希望节省集群成本时,此操作非常有用。 + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/stop.yaml +``` + + + + + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mysql-stop + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: mysql-cluster + type: Stop +``` + +或者,您也可以通过将 `spec.componentSpecs.stop` 字段设置为 true 来停止集群。 + +(严格保持原文格式与换行) + +```bash +kubectl edit cluster mysql-cluster +``` + + + + + +```yaml +spec: + componentSpecs: + - name: mysql + stop: true # set stop `true` to stop the component + replicas: 2 +``` + + + + +## 启动已停止的 MySQL 集群 + +启动已停止的集群 + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/mysql/start.yaml +``` + + + + + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: mysql-start + namespace: demo +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: mysql-cluster + type: Start +``` + +或者,您也可以通过将 `spec.componentSpecs.stop` 字段设置为 false 来启动已停止的集群。 + +(严格保持原文格式与换行) + +```bash +kubectl edit cluster mysql-cluster +``` + + + + + +```yaml +spec: + componentSpecs: + - name: mysql + stop: false # set to `false` (or remove this field) to start the component + replicas: 2 +``` + + + + +## 销毁 MySQL 集群 + +您可以使用以下命令删除集群: + +```bash +kubectl delete cluster mysql-cluster +``` + +删除 Cluster 时的行为取决于 terminationPolicy 字段的值: +- 如果 terminationPolicy 值为 DoNotTerminate,删除 Cluster 不会移除与该 Cluster 相关的任何资源。 +- 如果 terminationPolicy 值为 Delete,删除 Cluster 会移除与该 Cluster 相关的所有资源,包括 PVC、Secret、ConfigMap 和 Service。 +- 如果 terminationPolicy 值为 WipeOut,删除 Cluster 会移除与该 Cluster 相关的所有资源,包括 PVC、Secret、ConfigMap 和 Service,以及外部存储中的快照和备份。 + +在测试环境中,您可以使用以下命令删除 Cluster 以释放所有资源。 + +```bash +kubectl patch cluster mysql-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" +kubectl delete cluster mysql-cluster +``` + + diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/01-semisync.mdx b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/01-semisync.mdx new file mode 100644 index 00000000..e9433abe --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/01-semisync.mdx @@ -0,0 +1,377 @@ +--- +description: 了解如何使用KubeBlocks部署MySQL半同步复制集群。本指南涵盖配置、验证、故障转移测试及超时设置等内容。 +keywords: +- KubeBlocks +- MySQL +- Semi-Synchronous Replication +- Kubernetes +- High Availability +sidebar_label: MySQL 半同步集群 +sidebar_position: 1 +title: 使用KubeBlocks部署MySQL半同步集群 +--- +# 使用 KubeBlocks 部署 MySQL 半同步集群 + +**半同步复制** 通过要求主节点在提交事务前至少等待一个副本节点的确认,从而提高了主节点与副本节点之间的数据一致性。本指南将引导您完成使用 KubeBlocks 设置 MySQL 半同步复制集群的过程。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群。 + +应用以下 YAML 配置: + +```yaml +kubectl apply -f - < + +Topology: +COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql example-mysql-cluster-mysql-0 primary Running ap-southeast-1a ip-10-0-1-93.ap-southeast-1.compute.internal/10.0.1.93 Dec 24,2024 09:09 UTC+0800 +mysql example-mysql-cluster-mysql-1 secondary Running ap-southeast-1b ip-10-0-2-253.ap-southeast-1.compute.internal/10.0.2.253 Dec 24,2024 09:09 UTC+0800 + +Resources Allocation: +COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql false 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT TYPE IMAGE +mysql docker.io/apecloud/mysql:8.0.35 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n default example-mysql-cluster +``` + +### 验证组件状态 + +```bash +kubectl get component example-mysql-cluster-mysql -n demo +``` + +预期输出: + +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +example-mysql-cluster-mysql mysql-8.0-1.0.0 8.0.35 Running 2m28s +``` + + + + +## 连接到 MySQL 集群 + +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +z475N4c6ib +``` + +### 连接到主实例 +要连接到集群的主节点,请使用 MySQL 客户端: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pz475N4c6ib +``` + + + + +## 测试半同步复制 + +在本节中,我们将通过验证 Pod 角色并检查其复制状态来测试 MySQL 集群的半同步复制功能。 + +### 1. 验证 Pod 角色 +通过检查 Pod 角色来识别主节点和副本实例: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +预期输出: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +### 2. 检查复制状态 +#### 主节点 +运行以下命令检查主节点上的半同步复制状态: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show status like 'Rpl%_status';" +``` + +示例输出: + +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_source_status: ON":表示主实例已配置为半同步复制的主节点(master)。 +- "Rpl_semi_sync_replica_status: OFF":表示主实例在当前复制架构中不作为从节点运行。 + +#### 从节点 +检查从节点上的半同步复制状态: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show status like 'Rpl%_status';" +``` + +示例输出: + +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_replica_status: ON":表示该从实例正在作为半同步副本运行,并持续接收并确认来自主实例的变更。 +- "Rpl_semi_sync_source_status: OFF":表示该从实例在复制架构中不作为源(或主)节点运行。 + +## 检查与配置超时设置 + +以下是一个检查 'rpl_semi_sync_source_timeout' 变量当前值的示例命令。 +该值通常通过 'SEMI_SYNC_TIMEOUT' 环境变量设置。 +如果未显式设置 'SEMI_SYNC_TIMEOUT' 环境变量,则 'rpl_semi_sync_source_timeout' 的默认值为 10000 毫秒(10 秒)。 + +### 检查当前超时值 + +在以下示例中,可以看到该值已被配置为 3000 毫秒: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pz475N4c6ib -e "show variables like 'rpl_semi_sync_source_timeout';" +``` + +预期输出: + +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| rpl_semi_sync_source_timeout | 3000 | ++------------------------------+-------+ +``` + +### 更新超时时间 +要更新超时时间,请修改集群配置并重新应用 YAML 文件。例如: + +```yaml +kubectl apply -f - < +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1b ip-10-0-2-221.ap-southeast-1.compute.internal/10.0.2.221 Feb 10,2025 08:32 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1a ip-10-0-1-188.ap-southeast-1.compute.internal/10.0.1.188 Feb 10,2025 08:32 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1b ip-10-0-2-221.ap-southeast-1.compute.internal/10.0.2.221 Feb 10,2025 08:34 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1a ip-10-0-1-188.ap-southeast-1.compute.internal/10.0.1.188 Feb 10,2025 08:34 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi +proxysql 500m / 500m 512Mi / 512Mi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + + + + +## 连接到 MySQL 集群 + +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +22mue70Hx6 +``` + +### 通过 ProxySQL 连接 +使用 ProxySQL 连接到 MySQL 集群: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -p22mue70Hx6 +``` + +### 直接连接 MySQL +或者,直接连接到 MySQL 实例: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -p22mue70Hx6 +``` + + + + +## 测试半同步复制 + +### 验证 Pod 角色 +列出集群中的所有 Pod 并检查其角色: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +预期输出: + +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 primary +``` + +### 检查复制状态 +验证主节点和副本节点的复制状态: + +### 主节点 +在主节点上运行以下命令: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -p22mue70Hx6 -e "show status like 'Rpl%_status';" +``` + +预期输出: + +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_replica_status: ON":表示该从节点当前作为半同步副本运行,正在正常接收并确认来自主节点的变更。 +- "Rpl_semi_sync_source_status: OFF":表示该从节点在复制架构中不作为主节点(master)运行。 + +### 副本节点 +在副本节点上执行以下命令: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless.demo.svc.cluster.local -uroot -p22mue70Hx6 -e "show status like 'Rpl%_status';" +``` + +预期输出: + +```sql ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_source_status: ON":表示主实例已配置为半同步复制的主节点(master)。 +- "Rpl_semi_sync_replica_status: OFF":表示主实例在当前复制架构中不作为从节点运行。 + + + +## 故障转移测试 +### 触发故障转移 +要测试故障转移机制,请删除主节点 Pod: + +```bash +kubectl delete pod example-mysql-cluster-mysql-1 -n demo +``` + +这将触发故障转移,从节点实例将被提升为主节点角色。您可以通过以下方式验证 Pod 的新角色: +### 验证更新后的角色 + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +预期输出: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 +``` + +一段时间后,被删除的 Pod 将会重新创建并作为副本重新加入集群: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + + + + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + +## 概述 +在本指南中,您学习了如何: +- 使用 KubeBlocks 部署带有 ProxySQL 的 MySQL 半同步复制集群 +- 验证集群角色和复制状态 +- 测试高可用性的故障转移机制 + +通过将 MySQL 半同步集群与 ProxySQL 结合使用,您可以为生产级部署实现无缝故障转移、高效的流量管理以及更高的可靠性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/03-mgr.mdx b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/03-mgr.mdx new file mode 100644 index 00000000..ee41be30 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/03-mgr.mdx @@ -0,0 +1,254 @@ +--- +description: 了解如何使用KubeBlocks部署和管理MySQL组复制(MGR)集群。本指南涵盖配置、验证、故障转移测试及清理操作。 +keywords: +- KubeBlocks +- MySQL +- Group Replication +- Kubernetes +- High Availability +sidebar_label: MySQL 组复制集群 +sidebar_position: 3 +title: 使用KubeBlocks部署MySQL组复制集群 +--- +# 使用 KubeBlocks 部署 MySQL 组复制集群 + +**MySQL 组复制 (MGR)** 通过跨多个 MySQL 实例同步数据,提供高可用性和可扩展性。它确保集群中的所有节点无缝参与复制,具备自动故障转移和自我修复能力。本指南将引导您使用 **KubeBlocks** 部署 MySQL 组复制集群,该工具简化了 Kubernetes 中 MySQL 集群的管理和部署。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为了保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 组复制集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个部署三节点 MySQL 组复制集群的配置示例。 + +应用以下 YAML 配置来部署 MySQL 组复制(MGR)集群: + +```yaml +kubectl apply -f - < + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-155.ap-southeast-1.compute.internal/10.0.3.155 Feb 10,2025 22:23 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-204.ap-southeast-1.compute.internal/10.0.3.204 Feb 10,2025 22:23 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-2 secondary Running ap-southeast-1c ip-10-0-3-75.ap-southeast-1.compute.internal/10.0.3.75 Feb 10,2025 22:23 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-mgr-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + + + + +## 检查集群角色 +要验证 MySQL 实例的角色('主节点' 和 '从节点'),请使用以下命令: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + + + + +## 连接到 MySQL 集群 +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +q95G8nd87K +``` + +### 连接到主节点 +要连接到集群的主节点,请使用 MySQL 客户端: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pq95G8nd87K +``` + + + + +## 检查组复制状态 + +运行以下查询来检查组复制集群的状态: + +```sql +mysql> SELECT * FROM performance_schema.replication_group_members; +``` + +示例输出: + +```sql ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| group_replication_applier | a17c375d-e7ba-11ef-8b01-3aa4e0d3963f | example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | +| group_replication_applier | a99688a7-e7ba-11ef-be5b-de475d052d4a | example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless | 3306 | ONLINE | PRIMARY | 8.0.35 | XCom | +| group_replication_applier | c4403516-e7ba-11ef-8f11-8a79c903edf0 | example-mysql-cluster-mysql-2.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +3 rows in set (0.00 sec) +``` + +输出中的角色应与 kubectl 输出中显示的角色保持一致。 + +## 故障转移测试 +### 触发故障转移 +要测试 MySQL 组复制故障转移机制,请删除主节点: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +这将触发故障转移,其中一个从节点将被提升为主节点角色。 + +### 验证新角色 +运行以下命令检查更新后的角色: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +```bash +example-mysql-cluster-mysql-0 +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` + +当被删除的 Pod('example-mysql-cluster-mysql-0')被重新创建后,它将作为从节点重新加入集群: + +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` + +这展示了故障转移机制如何通过在主节点故障时自动将一个从节点提升为主节点角色来确保高可用性。 + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + +## 总结 +在本指南中,您学习了如何: +- 使用 KubeBlocks 部署 MySQL 组复制集群。 +- 验证集群状态和角色分配。 +- 连接到主节点并检查复制状态。 +- 测试故障转移机制以确保高可用性。 + +通过利用 KubeBlocks,在 Kubernetes 中管理 MySQL 组复制集群变得高效且简单,使您能够为数据库工作负载实现高可用性和可扩展性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx new file mode 100644 index 00000000..121b8ebf --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/04-mgr-with-proxysql.mdx @@ -0,0 +1,294 @@ +--- +description: 了解如何使用KubeBlocks部署集成ProxySQL的MySQL组复制(MGR)集群。本指南涵盖配置、验证、故障转移测试及清理操作。 +keywords: +- KubeBlocks +- MySQL +- ProxySQL +- Group Replication +- High Availability +- Kubernetes +sidebar_label: MySQL Group Replication 搭配 ProxySQL +sidebar_position: 4 +title: 使用KubeBlocks部署带ProxySQL的MySQL组复制集群 +--- +# 使用 KubeBlocks 部署带 ProxySQL 的 MySQL 组复制集群 + +**MySQL 组复制(MGR)** 通过跨多个 MySQL 实例同步数据来确保高可用性和容错能力。它提供自动故障转移功能,在发生故障时将次级节点提升为主节点,确保持续可用性。 + +**ProxySQL** 是一个高性能 MySQL 代理,作为 MySQL 客户端和数据库服务器之间的中间件。它提供查询路由、负载均衡、查询缓存和无缝故障转移等功能。当与 MGR 结合使用时,ProxySQL 可提升集群性能并实现高效的流量管理。 + +本指南介绍如何使用 **KubeBlocks** 部署 **集成 ProxySQL 的 MySQL 组复制(MGR)集群**,简化在 Kubernetes 中管理 MySQL 集群的过程。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 组复制集群 + +KubeBlocks 采用声明式配置方法简化 MySQL 集群管理。以下是一个配置示例,用于部署包含三个 MySQL 节点和两个 ProxySQL 实例的 MySQL 组复制集群。 + +应用以下 YAML 配置: + +```yaml +kubectl apply -f - < +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-34.ap-southeast-1.compute.internal/10.0.3.34 Feb 11,2025 12:47 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-228.ap-southeast-1.compute.internal/10.0.3.228 Feb 11,2025 12:47 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-2 secondary Running ap-southeast-1c ip-10-0-3-187.ap-southeast-1.compute.internal/10.0.3.187 Feb 11,2025 12:47 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1c ip-10-0-3-228.ap-southeast-1.compute.internal/10.0.3.228 Feb 11,2025 12:49 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1c ip-10-0-3-187.ap-southeast-1.compute.internal/10.0.3.187 Feb 11,2025 12:49 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi +proxysql 500m / 500m 512Mi / 512Mi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-mgr-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + + + + +## 检查集群角色 +要验证 MySQL 实例的角色(例如主节点和从节点),请运行: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + + + + +## 连接到 MySQL 集群 +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + + + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +XKNv07D612 +``` + +### 通过 ProxySQL 连接 +使用 ProxySQL 连接到 MySQL 集群: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -pXKNv07D612 +``` + +### 直接连接 MySQL +或者,直接连接到 MySQL 实例: + + + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql.demo.svc.cluster.local -uroot -pXKNv07D612 +``` + + + + +## 检查组复制状态 +要检查组复制集群的状态,请运行以下查询: + + + +```sql +mysql> SELECT * FROM performance_schema.replication_group_members; +``` + +示例输出: + +```sql ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +| group_replication_applier | a17c375d-e7ba-11ef-8b01-3aa4e0d3963f | example-mysql-cluster-mysql-1.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | +| group_replication_applier | a99688a7-e7ba-11ef-be5b-de475d052d4a | example-mysql-cluster-mysql-0.example-mysql-cluster-mysql-headless | 3306 | ONLINE | PRIMARY | 8.0.35 | XCom | +| group_replication_applier | c4403516-e7ba-11ef-8f11-8a79c903edf0 | example-mysql-cluster-mysql-2.example-mysql-cluster-mysql-headless | 3306 | ONLINE | SECONDARY | 8.0.35 | XCom | ++---------------------------+--------------------------------------+--------------------------------------------------------------------+-------------+--------------+-------------+----------------+----------------------------+ +3 rows in set (0.00 sec) +``` + +输出中的角色应与 kubectl 输出中显示的角色一致。 + +## 故障转移测试 +### 触发故障转移 +要测试 MySQL 组复制故障转移机制,请删除主节点: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +这将触发故障转移,其中一个从节点将被提升为主节点角色。 + +### 验证新角色 +运行以下命令检查更新后的角色: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +```bash +example-mysql-cluster-mysql-0 +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` + +当被删除的 Pod('example-mysql-cluster-mysql-0')被重新创建后,它将作为从节点重新加入集群: + +```bash +example-mysql-cluster-mysql-0 secondary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 primary +``` + +这展示了故障转移机制如何通过在主节点故障时自动将一个从节点提升为主节点角色,从而确保高可用性。 + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + +## 总结 +在本指南中,您学习了如何: +- 使用 KubeBlocks 部署带有 ProxySQL 的 MySQL 组复制集群。 +- 验证集群的部署和角色分配。 +- 通过 ProxySQL 或直接方式连接到集群。 +- 检查复制状态并测试故障转移机制。 + +通过将 MySQL 组复制与 ProxySQL 结合使用,此配置确保了高可用性、无缝故障转移和高效的流量管理,非常适合生产级部署。 + diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx new file mode 100644 index 00000000..d971ff81 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/05-orchestrator.mdx @@ -0,0 +1,412 @@ +--- +description: 使用KubeBlocks部署MySQL半同步复制集群(搭配Orchestrator)的逐步指南 +keywords: +- KubeBlocks +- MySQL +- Orchestrator +- Kubernetes +- DBaaS +sidebar_label: 使用Orchestrator的MySQL集群 +sidebar_position: 5 +title: 使用KubeBlocks部署MySQL集群与Orchestrator +--- +# 使用 KubeBlocks 部署 MySQL 集群与 Orchestrator + +半同步复制通过要求至少一个副本节点确认事务提交,从而提升主节点与副本节点间的数据一致性。 + +Orchestrator 是一款强大的 MySQL 高可用性(HA)与故障转移管理工具。它为 MySQL 集群提供自动化监控、故障检测和拓扑管理功能,是管理大规模 MySQL 部署的关键组件。通过 Orchestrator,您可以实现: +- **监控复制拓扑**:Orchestrator 持续监控 MySQL 复制拓扑,并提供集群状态的实时视图 +- **自动化故障转移**:当主节点发生故障时,Orchestrator 会自动将健康的副本节点提升为主节点,确保最短停机时间 +- **拓扑管理**:Orchestrator 让您能够轻松地重新配置、重新平衡和恢复 MySQL 拓扑 + +本指南将带您完成使用 **KubeBlocks** 搭建 MySQL 半同步复制集群,并配合 **Orchestrator** 实现高效故障转移与恢复管理的全过程。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 安装编排器插件 + +1. 查看插件版本。 + +```bash +# including pre-release versions +helm search repo kubeblocks/orchestrator --devel --versions +``` + +2. 安装插件。使用 '--version' 指定版本。 + +```bash +helm install kb-addon-orc kubeblocks/orchestrator --namespace kb-system --create-namespace --version x.y.z +``` + +3. 验证该 Addon 是否已安装。 + +```bash +helm list -A +``` + +预期输出: + +```bash +orchestrator kb-system 1 2025-02-14 11:12:32.286516 +0800 CST deployed orchestrator-1.0.0 3.2.6 +``` + +STATUS 显示已部署,该插件已成功安装。 + +## 使用Orchestrator部署MySQL集群 + +KubeBlocks采用声明式方法管理MySQL集群。以下是一个配置示例,用于部署一个3节点(1主2从)的半同步模式MySQL集群。同时,该配置还会创建一个采用Raft高可用模式的Orchestrator集群,并配置MySQL半同步集群与Orchestrator集群之间的关系。 + +集群配置 + +```yaml +kubectl apply -f - < + example-mysql-cluster-mysql-1.demo.svc.cluster.local:3306 + example-mysql-cluster-mysql-server.demo.svc.cluster.local:3306 +proxysql example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-0.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6032 + example-mysql-cluster-proxysql-proxy-ordinal-1.demo.svc.cluster.local:6033 + example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local:6033 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:21 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:22 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-0 Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:23 UTC+0800 +proxysql 2.4.4 example-mysql-cluster-proxysql-1 Running ap-southeast-1c ip-10-0-3-40.ap-southeast-1.compute.internal/10.0.3.40 Mar 11,2025 10:23 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi +proxysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-orc-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 +proxysql proxysql-mysql-1.0.0 docker.io/apecloud/proxysql:2.4.4 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +要获取有关 Orchestrator 集群的详细信息: + +```bash +kbcli cluster describe example-orc-cluster -n demo +``` + +示例输出: + +```bash +Name: example-orc-cluster Created Time: Mar 11,2025 10:21 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo orchestrator raft Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +orchestrator example-orc-cluster-orchestrator.demo.svc.cluster.local:80 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +orchestrator 3.2.6 example-orc-cluster-orchestrator-0 primary Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:21 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-1 secondary Running ap-southeast-1c ip-10-0-3-233.ap-southeast-1.compute.internal/10.0.3.233 Mar 11,2025 10:21 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-2 secondary Running ap-southeast-1c ip-10-0-3-55.ap-southeast-1.compute.internal/10.0.3.55 Mar 11,2025 10:22 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +orchestrator 500m / 500m 512Mi / 512Mi data:20Gi kb-default-sc + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +orchestrator orchestrator-raft docker.io/apecloud/orchestrator:v3.2.6 + +Show cluster events: kbcli cluster list-events -n demo example-orc-cluster +``` + + + + +## 连接到 MySQL 集群 +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +d3a5iS499Z +``` + +### 通过 ProxySQL 连接 +使用 ProxySQL 连接到 MySQL 集群: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-proxysql-proxy-server.demo.svc.cluster.local -P6033 -uroot -pd3a5iS499Z +``` + +### 直接连接 MySQL +或者,直接连接到 MySQL 实例: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-server.demo.svc.cluster.local -uroot -pd3a5iS499Z +``` + + + + +## 测试半同步复制 + +在本节中,我们将通过验证 Pod 角色并检查其复制状态,来测试 MySQL 集群的半同步复制功能。 + +首先,列出集群中的所有 Pod 及其角色,以识别主节点和从节点实例: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +``` +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +从输出中,我们可以看到以下信息: +- 'example-mysql-cluster-mysql-0' 是主节点实例。 +- 'example-mysql-cluster-mysql-1' 是从节点实例。 +'kubeblocks.io/role' 标签帮助我们轻松区分复制设置中各实例的角色。 + + +接下来,连接到主节点实例('example-mysql-cluster-mysql-0')并检查其半同步复制状态。使用以下命令在 MySQL Pod 内部执行查询: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.demo.svc.cluster.local -uroot -pd3a5iS499Z -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_source_status: ON":表示主实例已配置为半同步复制的主节点(master)。 +- "Rpl_semi_sync_replica_status: OFF":表示主实例在当前复制架构中不作为从节点运行。 + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.demo.svc.cluster.local -uroot -pd3a5iS499Z -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_replica_status: ON":表示该从节点当前作为半同步副本运行,正在主动接收并确认来自主节点的变更。 +- "Rpl_semi_sync_source_status: OFF":表示该从节点在复制架构中不作为主节点运行。 + +(严格保持原文格式与换行,技术术语采用标准翻译:"replica"译为"副本","source"译为"主节点"以符合MySQL官方术语体系) + +## 故障转移测试 + +以下步骤演示如何在 MySQL 集群中触发故障转移并验证 Pod 的角色变化。 + +要发起故障转移,请删除当前被分配为主节点角色的 Pod: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +这将触发故障转移,从节点实例将被提升为主节点角色。 +一段时间后,被终止的 Pod 将被重新创建并担任从节点角色: + + + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +预期输出: + +```bash +example-semisync-mysql-mysql-0 secondary +example-semisync-mysql-mysql-1 primary +``` + +此过程演示了故障转移机制如何通过在故障发生时自动将次要实例提升为主角色来确保高可用性。 + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-orc-cluster -n demo +kubectl delete ns demo +``` + + + + +## 概述 + +本指南演示了如何使用 KubeBlocks 部署一个支持半同步复制的 MySQL 集群,并通过集成 Orchestrator 实现高可用性和故障转移管理。借助声明式配置方法,您可以轻松在 Kubernetes 环境中扩展和管理 MySQL 集群。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx new file mode 100644 index 00000000..10a171c1 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/06-orchestrator-with-proxysql.mdx @@ -0,0 +1,422 @@ +--- +description: 了解如何使用KubeBlocks部署带有Orchestrator和ProxySQL的MySQL半同步复制集群,实现高可用性和高效查询路由。 +keywords: +- KubeBlocks +- MySQL +- Orchestrator +- ProxySQL +- Kubernetes +- DBaaS +sidebar_label: MySQL 搭配 Orchestrator 和 ProxySQL +sidebar_position: 6 +title: 使用KubeBlocks部署带有Orchestrator和ProxySQL的MySQL集群 +--- +# 使用 KubeBlocks 部署 MySQL 集群及 Orchestrator 和 ProxySQL + +半同步复制通过要求至少一个副本节点确认事务提交,增强了主节点与副本节点之间的数据一致性。 + +本指南演示如何使用 **KubeBlocks** 部署 MySQL 集群,其中包含用于高可用性和故障转移管理的 **Orchestrator**,以及用于高级查询路由和负载均衡的 **ProxySQL**。这些工具共同构建了一个健壮且高效的 MySQL 集群基础设施。 + +### **什么是 Orchestrator?** + +Orchestrator 是一个强大的 MySQL 高可用性(HA)和故障转移管理工具。它自动化了 MySQL 集群的监控、故障检测和拓扑管理,非常适合管理大规模部署。主要功能包括: + +- **复制拓扑监控**:提供 MySQL 复制拓扑的实时视图。 +- **自动化故障转移**:在发生故障时将健康的副本提升为主节点,确保最短停机时间。 +- **拓扑管理**:简化 MySQL 集群的重新配置、重新平衡和恢复。 + +### **什么是 ProxySQL?** + +ProxySQL 是一个高性能的 MySQL 代理,充当 MySQL 客户端和数据库服务器之间的中间件。它通过以下功能提升集群性能: + +- **查询路由**:根据查询目的(如读或写)将查询定向到适当的服务器。 +- **负载均衡**:在副本间分配流量以优化资源使用。 +- **查询缓存**:通过缓存频繁查询减少数据库负载。 +- **故障转移支持**:无缝处理故障转移场景,不中断应用服务。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 安装编排器插件 + +1. 查看插件版本。 + +```bash +# including pre-release versions +helm search repo kubeblocks/orchestrator --devel --versions +``` + +2. 安装插件。使用 '--version' 指定版本。 + +```bash +helm install kb-addon-orc kubeblocks/orchestrator --namespace kb-system --create-namespace --version x.y.z +``` + +3. 验证该 Addon 是否已安装。 + +```bash +helm list -A +``` + +预期输出: + +```bash +orchestrator kb-system 1 2025-02-14 11:12:32.286516 +0800 CST deployed orchestrator-1.0.0 3.2.6 +``` + +状态为已部署,此插件已成功安装。 + +## 使用 Orchestrator 部署 MySQL 集群 + +KubeBlocks 提供了一种声明式的方法来部署 MySQL 集群。以下是一个配置示例,用于在半同步模式下部署包含 2 个节点(1 个主节点和 1 个从节点)的 MySQL 集群。此配置还集成了 Orchestrator(3 个节点)用于故障转移管理,以及 ProxySQL(2 个节点)用于查询路由和负载均衡。 + +集群配置 + +```yaml +kubectl apply -f - < + example-mysql-cluster-mysql-1.demo.svc.cluster.local:3306 + example-mysql-cluster-mysql-server.demo.svc.cluster.local:3306 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +mysql 8.0.35 example-mysql-cluster-mysql-0 primary Running ap-southeast-1c ip-10-0-3-68.ap-southeast-1.compute.internal/10.0.3.68 Mar 10,2025 16:43 UTC+0800 +mysql 8.0.35 example-mysql-cluster-mysql-1 secondary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:44 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +mysql mysql-orc-8.0-1.0.0 docker.io/apecloud/mysql:8.0.35 + docker.io/apecloud/mysqld-exporter:0.15.1 + apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:1.0.0 + +Show cluster events: kbcli cluster list-events -n demo example-mysql-cluster +``` + +要获取有关 Orchestrator 集群的详细信息: + +```bash +kbcli cluster describe example-orc-cluster -n demo +``` + +示例输出: + +```bash +Name: example-orc-cluster Created Time: Mar 10,2025 16:43 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo orchestrator raft Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +orchestrator example-orc-cluster-orchestrator.demo.svc.cluster.local:80 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +orchestrator 3.2.6 example-orc-cluster-orchestrator-0 primary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:43 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-1 secondary Running ap-southeast-1c ip-10-0-3-68.ap-southeast-1.compute.internal/10.0.3.68 Mar 10,2025 16:43 UTC+0800 +orchestrator 3.2.6 example-orc-cluster-orchestrator-2 secondary Running ap-southeast-1c ip-10-0-3-225.ap-southeast-1.compute.internal/10.0.3.225 Mar 10,2025 16:44 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +orchestrator 500m / 500m 512Mi / 512Mi data:20Gi kb-default-sc + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +orchestrator orchestrator-raft docker.io/apecloud/orchestrator:v3.2.6 + +Show cluster events: kbcli cluster list-events -n demo example-orc-cluster +``` + + + + +## 连接到 MySQL 集群 +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.username}' | base64 -d +root + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +GX596H32Oz +``` + +要连接到集群的主节点,请使用 MySQL 客户端: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-server.demo.svc.cluster.local -uroot -pGX596H32Oz +``` + + + + +## 测试半同步复制 + +在本节中,我们将通过验证 Pod 角色并检查其复制状态,来测试 MySQL 集群的半同步复制功能。 + +首先,列出集群中的所有 Pod 及其角色,以识别主节点和从节点实例: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +``` +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +``` + +从输出中,我们可以看到以下信息: +- 'example-mysql-cluster-mysql-0' 是主节点实例。 +- 'example-mysql-cluster-mysql-1' 是从节点实例。 + 'kubeblocks.io/role' 标签帮助我们轻松区分复制设置中各实例的角色。 + + +接下来,连接到主节点实例('example-mysql-cluster-mysql-0')并检查其半同步复制状态。使用以下命令在 MySQL Pod 内部执行查询: + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-0.demo.svc.cluster.local -uroot -pGX596H32Oz -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | OFF | +| Rpl_semi_sync_source_status | ON | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_source_status: ON":表示主实例已配置为半同步复制的主节点(master)。 +- "Rpl_semi_sync_replica_status: OFF":表示主实例在复制设置中不作为从节点运行。 + + +```bash +kubectl exec -it -n demo example-mysql-cluster-mysql-0 -c mysql -- mysql -h example-mysql-cluster-mysql-1.demo.svc.cluster.local -uroot -pGX596H32Oz -e "show status like 'Rpl%_status';" +mysql: [Warning] Using a password on the command line interface can be insecure. ++------------------------------+-------+ +| Variable_name | Value | ++------------------------------+-------+ +| Rpl_semi_sync_replica_status | ON | +| Rpl_semi_sync_source_status | OFF | ++------------------------------+-------+ +``` + +说明: +- "Rpl_semi_sync_replica_status: ON":表示该从节点当前作为半同步副本运行,正在主动接收并确认来自主节点的变更。 +- "Rpl_semi_sync_source_status: OFF":表示该从节点在复制架构中不作为源节点(即主节点)运行。 + +(严格保持原文格式与换行,技术术语采用标准翻译:"replica"译为"副本","source"译为"源节点"并括号注明"主节点","semi-synchronous"译为"半同步") + +## 故障转移测试 + +以下步骤演示如何触发 MySQL 集群的故障转移并验证 Pod 的角色变化。 + +要发起故障转移,请删除当前被分配为主节点角色的 Pod: + +```bash +kubectl delete pod example-mysql-cluster-mysql-0 -n demo +pod "example-mysql-cluster-mysql-0" deleted +``` + +这将触发故障转移,从节点实例将被提升为主节点角色。 +一段时间后,被终止的 Pod 将被重新创建并承担从节点角色: + +```bash +kubectl get pods -n demo -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +预期输出: + +```bash +example-semisync-mysql-mysql-0 secondary +example-semisync-mysql-mysql-1 primary +``` + +此过程演示了故障转移机制如何通过在故障发生时自动将次要实例提升为主角色来确保高可用性。 + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-orc-cluster -n demo +kubectl delete ns demo +``` + + + + +## 概述 + +本指南演示了如何使用 KubeBlocks 部署一个具备以下特性的 MySQL 集群: +- 半同步复制 +- 集成 Orchestrator 实现高可用性 +- 通过 ProxySQL 实现查询路由和负载均衡 + +通过声明式配置方法,您可以轻松在 Kubernetes 环境中扩展和管理 MySQL 集群。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/03-topologies/_category_.yml b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/_category_.yml new file mode 100644 index 00000000..8b10f9b7 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 拓扑结构 +position: 3 diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx new file mode 100644 index 00000000..61d8eac9 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/01-stop_start_restart.mdx @@ -0,0 +1,308 @@ +--- +description: 了解如何在KubeBlocks中管理MySQL集群的生命周期,包括停止、启动和重启集群,以优化资源使用并保持灵活性。 +keywords: +- KubeBlocks +- MySQL +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: 管理MySQL集群生命周期(停止、启动与重启) +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 管理 MySQL 集群生命周期 + +本指南演示如何在 **KubeBlocks** 中管理 MySQL 集群的生命周期,包括停止、启动和重启集群。适当的生命周期管理有助于优化资源使用、降低运维成本,并确保 Kubernetes 环境中的灵活性。 + + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群。 + +应用以下 YAML 配置来部署集群: + +```yaml +kubectl apply -f - < + + + +选项 1:使用 OpsRequest + +您可以通过 OpsRequest 停止集群: + +```yaml +kubectl apply -f - < + + + +选项二:使用声明式集群API + +或者,您可以通过在集群配置中将 `spec.componentSpecs.stop` 字段设置为 `true` 来停止集群: + +```bash +kubectl patch cluster example-mysql-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } +]' +``` + + + + + +### 验证集群停止 +监控集群状态,确保其已转为"已停止(Stopped)"状态: + +```bash +kubectl get cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Stopping 93s +example-mysql-cluster mysql Delete Stopped 101s +``` + +集群中没有正在运行的 Pod,但持久存储仍被保留。 + +```bash +kubectl get pods -n demo +``` + +预期输出: + +```bash +No resources found in demo namespace. +``` + + + + + +```bash +kubectl get pvc -n demo +``` + + + + +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +data-example-mysql-cluster-mysql-0 Bound pvc-98ce87ab-acc6-4f95-8638-16e8052f98d8 20Gi RWO kb-default-sc 16m +data-example-mysql-cluster-mysql-1 Bound pvc-5bb87b23-7c38-45de-bf04-f2822051d897 20Gi RWO kb-default-sc 16m +``` + +### 启动集群 + +启动集群会重新创建 Pod 并使集群恢复在线状态。 + + + + +选项 1:使用 OpsRequest + +您可以通过 OpsRequest 启动已停止的集群: + +```yaml +kubectl apply -f - < + + + +选项1:使用声明式集群API + +或者,您也可以通过以下方式启动集群: +- 将`spec.componentSpecs.stop`字段设置为false,或者 +- 完全移除`spec.componentSpecs.stop`字段。 + + + +```bash +kubectl patch cluster example-mysql-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } +]' +``` + + + + + +### 验证集群启动 + +监控集群状态以确保其恢复至运行状态: + +```bash +kubectl get cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Updating 5m54s +example-mysql-cluster mysql Delete Running 6m6s +``` + +### 重启集群 + +重启集群允许您为特定组件(如mysql)重新创建Pod,而无需删除或停止整个集群。 + +#### 使用 OpsRequest + +要重启特定组件(例如mysql),请使用以下OpsRequest: + +```yaml +kubectl apply -f - < + + +选项一:使用 VerticalScaling OpsRequest + +应用以下 YAML 为 mysql 组件扩容资源: + +```yaml +kubectl apply -f - < + + +选项二:直接通过集群API更新 + +或者,您也可以通过更新 `spec.componentSpecs.resources` 字段来实现垂直扩缩容,将其值设置为期望的资源规格。 + + + +```yaml +kubectl apply -f - < + + + + + +## 验证 +通过检查集群配置或 Pod 详细信息来验证更新后的资源: + +```bash +kbcli cluster describe example-mysql-cluster -n demo +``` + +预期输出: + +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 1 / 1 1Gi / 1Gi data:20Gi +``` + + + + +## KubeBlocks 垂直扩展的核心优势 +- **无缝扩缩容**:Pod 按特定顺序重建,确保业务影响最小化 +- **动态资源调整**:根据工作负载需求轻松调整 CPU 和内存配置 +- **灵活控制**:可选择 OpsRequest 实现动态扩缩容,或直接通过 API 更新实现精准控制 +- **高可用保障**:扩缩过程中集群持续可用,保持高可用性 + + + +## 总结 +在本指南中,您学习了如何: +1. 部署一个由 KubeBlocks 管理的 MySQL 集群。 +2. 通过增加或减少 mysql 组件的资源来进行垂直扩展。 +3. 使用 OpsRequest 和直接更新 Cluster API 两种方式来调整资源分配。 + +垂直扩展是优化资源利用率和适应不断变化的工作负载需求的强大工具,可确保您的 MySQL 集群保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..f2536eca --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,262 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的MySQL集群执行水平扩缩容(横向扩展与收缩)。 +keywords: +- KubeBlocks +- MySQL +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks水平扩展MySQL集群 +--- +请按照要求翻译以下内容,严格保持所有格式、间距和换行不变: + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +部署一个 2 节点的 MySQL 集群(1 个主节点,1 个从节点),采用半同步复制: + +```yaml +kubectl apply -f - < + + + +### 方案一:使用 OpsRequest +通过增加 1 个副本来扩展 MySQL 集群: + +```yaml +kubectl apply -f - < + + + +### 选项 2:直接通过 Cluster API 更新 + +或者,您可以直接更新 Cluster 资源中的 `replicas` 字段: + +```yaml +kubectl patch cluster example-mysql-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' +``` + + + + + + +### 验证扩容操作 + +应用操作后,您将看到一个新的 Pod 被创建,MySQL 集群状态从 `Updating` 变为 `Running`,且新创建的 Pod 具有新角色 `secondary`。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` + +请翻译以下内容,同时保留所有格式、间距和换行: + +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-0 4/4 Running 0 4m30s +example-mysql-cluster-mysql-1 4/4 Running 0 4m30s +example-mysql-cluster-mysql-2 4/4 Running 0 49s +``` + +新副本会自动加入作为从节点。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.labels.kubeblocks\.io/role}{"\n"}{end}' +``` + +示例输出: + +```bash +example-mysql-cluster-mysql-0 primary +example-mysql-cluster-mysql-1 secondary +example-mysql-cluster-mysql-2 secondary +``` + + + + + +## 缩容(移除副本) + + + + + + +选项 1:使用 OpsRequest +通过移除 1 个副本来缩容 MySQL 集群: + +```yaml +kubectl apply -f - < + + + +选项 1:使用 VolumeExpansion OpsRequest + +应用以下 YAML 来增加 mysql 组件的存储卷大小: + +```yaml +kubectl apply -f - < + + + +选项 2:直接通过 Cluster API 更新 + +或者,您可以将 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段更新为所需大小。 + +```yaml +kubectl apply -f - < + + + +## 验证 + +使用以下命令检查更新后的集群配置: + +```bash +kbcli cluster describe example-mysql-cluster -n demo +``` + +预期输出: + +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +mysql 500m / 500m 512Mi / 512Mi data:30Gi +``` + +数据 PVC 的存储卷大小已更新为指定值(例如本例中的 30Gi)。 + +检查集群中 PVC 的状态以确认扩容操作已完成: + +```bash +kubectl get pvc -l app.kubernetes.io/instance=example-mysql-cluster -n demo +``` + +预期输出: + +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +example-mysql-cluster-mysql-data-0 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO kb-default-sc 10m +example-mysql-cluster-mysql-data-1 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO kb-default-sc 10m +``` + + + + +## 关键注意事项 +1. 确保存储类支持存储卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 +2. 新容量必须大于当前容量。 +3. 根据存储提供商的不同,存储卷扩展可能需要额外配置。 + +## 总结 +在本指南中,您学习了如何: +1. 验证存储类是否支持存储卷扩容。 +2. 通过以下两种方式执行存储卷扩容: + - 使用 OpsRequest 进行动态更新。 + - 通过 Cluster API 进行手动更新。 +3. 验证更新后的 PVC 容量并确保扩容操作完成。 + +通过存储卷扩容功能,您可以高效地扩展 MySQL 集群的存储容量而无需中断服务,确保数据库能够随着应用需求增长而扩展。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..c61ed077 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,465 @@ +--- +description: 了解如何在KubeBlocks中通过负载均衡器及其他服务类型配置和管理MySQL服务,实现内外网访问。 +keywords: +- KubeBlocks +- MySQL +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理 MySQL 服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建与销毁MySQL服务 +--- +请严格保持所有格式、间距和换行符不变,翻译以下内容: + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 MySQL 服务 + +本指南提供逐步说明,介绍如何对外或对内暴露由 KubeBlocks 管理的 MySQL 服务。您将学习如何通过云提供商的 LoadBalancer 服务配置外部访问、管理内部服务,以及在不再需要时正确禁用外部暴露。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群。 + +集群配置 + +```yaml +kubectl apply -f - < 3306/TCP 5m16s +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 5m16s +``` + + + + +## 对外或对内暴露 MySQL 服务 + +外部地址允许公网访问 MySQL 服务,而内部地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云成本 | 安全性 | +|----|---|----|---| +| ClusterIP | 内部服务通信 | 免费 | 最高| +| NodePort | 开发/测试 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + +### 选项 1:使用 OpsRequest + +要通过 LoadBalancer 对外暴露 MySQL 服务,创建一个 OpsRequest 资源: + +```yaml +kubectl apply -f - < + + + +### 验证暴露的服务 +检查服务详情以确认负载均衡器服务已创建: + +```bash +kubectl get services -n demo +``` + +示例输出: + +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-mysql-cluster-mysql ClusterIP 172.20.129.84 3306/TCP 4h39m +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 4h39m +example-mysql-cluster-mysql-internet LoadBalancer 172.20.60.24 a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com 3306:30985/TCP 13s +``` + +### 等待 DNS 传播 + +负载均衡器的 DNS 名称可能需要 2-5 分钟才能解析。请通过以下命令验证解析状态: + +```bash +nslookup a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com +``` + +示例输出: + +```bash +Server: 192.168.101.1 +Address: 192.168.101.1#53 + +Non-authoritative answer: +Name: a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com +Address: 54.251.110.4 +``` + + + + + +## 外部连接 MySQL + +### 获取凭据 + +KubeBlocks 会自动创建一个包含 MySQL root 凭据的 Secret。获取 MySQL root 凭据: + +```bash +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +KI260UK7E9 +``` + +### 使用 MySQL 客户端连接 + +您现在可以从外部(例如您的笔记本电脑或 EC2)连接到 MySQL 数据库: + +```bash +mysql -h a1d37733683d244e0bebad8559cbf24d-3728431cad3e24b5.elb.ap-southeast-1.amazonaws.com -uroot -pKI260UK7E9 +``` + + + + +## 禁用外部暴露 + + + + + +### 选项 1.: 使用 OpsRequest +要禁用外部访问,创建一个 OpsRequest: + +```yaml +kubectl apply -f - < + + + +### 选项 2:使用 Cluster API + +或者,从 Cluster 资源中移除 `spec.services` 字段: + +```bash +kubectl patch cluster example-mysql-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } +]' +``` + +监控集群状态直至其变为 Running: + +```bash +kubectl get cluster example-mysql-cluster -n demo -w +``` + + + + +``` +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster mysql Delete Running 23m +``` + + + + + +### 验证服务删除 + +确保 'example-mysql-connect-mysql-internet' 服务已被移除: + +```bash +kubectl get service -n demo +``` + +预期结果:应移除 'example-mysql-cluster-mysql-internet' 服务。 + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 将 MySQL 服务暴露到外部或内部网络 +- 通过云服务商特定注解配置 LoadBalancer 服务 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问的启用/禁用 + +KubeBlocks 为 Kubernetes 环境中的 MySQL 服务管理提供了灵活且简化的解决方案。 + diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..82a75394 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,257 @@ +--- +description: 了解如何以最短停机时间部署和升级由KubeBlocks管理的MySQL集群。 +keywords: +- KubeBlocks +- MySQL +- Upgrade +- Rolling Upgrade +- Kubernetes +sidebar_label: 次版本升级 +sidebar_position: 6 +title: 在KubeBlocks中升级MySQL集群的次版本 +--- +# 在 KubeBlocks 中升级 MySQL 集群的次版本 + +本指南将引导您完成由 KubeBlocks 管理的 MySQL 集群的部署和次版本升级过程,确保升级期间实现最小化停机时间。 + +为了最大限度减少对数据库可用性的影响,升级过程会优先从副本(从节点实例)开始。待所有副本升级完成后,系统会执行主从切换操作,将其中一个已升级的副本提升为主节点。该切换过程非常迅速,通常可在几百毫秒内完成。切换完成后,系统会对原主节点实例进行升级,从而确保对应用程序的影响降至最低。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +部署一个 2 节点的半同步 MySQL 集群(1 个主节点,1 个从节点): + +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 83 | ++-----------------+-------+ +1 row in set (0.00 sec) + +mysql> SHOW VARIABLES LIKE 'performance_schema'; ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| performance_schema | OFF | ++--------------------+-------+ +1 row in set (0.00 sec) +``` + + + + +## 动态参数示例:修改 max_connections + +动态参数可以在不重启数据库的情况下进行修改。例如,更新 'max_connections' 参数可以允许更多并发连接到 MySQL 实例。 + +预期行为是修改配置后,新设置会立即生效,无需重启数据库。 + +要将 'max_connections' 参数从 83 修改为 100,请应用以下重新配置操作请求(Reconfiguring OpsRequest): + +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 100 | ++-----------------+-------+ +1 row in set (0.00 sec) +``` + +输出确认'max_connections'参数已成功更新为100。 + +## 静态参数示例:修改 performance_schema + +静态参数(如 'performance_schema')需要重启数据库才能生效。本示例中,我们将把 performance_schema 设置为 ON。 + +创建一个重配置操作请求(Reconfigure OpsRequest)。应用以下 OpsRequest YAML 来更新 'performance_schema': + + + +```yaml +kubectl apply -f - < SHOW VARIABLES LIKE 'performance_schema'; ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| performance_schema | ON | ++--------------------+-------+ +1 row in set (0.00 sec) +``` + + + + +## 清理 +要移除所有已创建的资源,请删除 MySQL 集群及其命名空间: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + +## 总结 +本指南演示了如何通过 KubeBlocks 的 Reconfiguring OpsRequest 修改 MySQL 的动态参数(如 `max_connections`)和静态参数(如 `performance_schema`)。动态参数变更会立即生效,而静态参数变更需要重启数据库。通过利用 KubeBlocks 的声明式自动化管理能力,可以高效实施这些配置更新,并将停机时间降至最低。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/08-switchover.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/08-switchover.mdx new file mode 100644 index 00000000..53bf4e4a --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/08-switchover.mdx @@ -0,0 +1,195 @@ +--- +description: 了解如何使用KubeBlocks在MySQL集群中执行计划内切换,以确保最短停机时间和无缝角色转换。 +keywords: +- KubeBlocks +- MySQL +- Switchover +- High Availability +- Kubernetes +sidebar_label: MySQL 计划内切换 +sidebar_position: 8 +title: MySQL 集群中的计划内切换 +--- +# MySQL 集群中的计划性切换 + +**切换(switchover)** 是一项计划性操作,指 MySQL 集群中的主实例主动将其角色转移给从实例。与意外故障时触发的非计划性故障转移(failover)不同,切换能确保以可控且可预测的方式完成角色转换,同时将服务中断降至最低。 + +## **切换操作的优势** +1. **最小化停机时间**:主实例主动将其角色转移至从实例,服务中断时间极短(通常仅数百毫秒) +2. **可控的过渡**:相比需要检测故障并恢复的故障转移(通常导致数秒或更长的延迟),能确保更平滑且可预测的角色变更 +3. **维护友好**:非常适合计划内的维护任务(如节点升级或下线),同时保障服务不中断 + + +## **切换(Switchover)与故障转移(Failover)对比** + +| **对比维度** | **切换(Switchover)** | **故障转移(Failover)** | +|---------------------------|-------------------------------------------|---------------------------------------| +| **触发方式** | 计划内手动触发 | 意外故障自动触发 | +| **停机时间** | 数百毫秒级 | 数秒或更长时间 | +| **主节点角色转换** | 主动移交 | 被动晋升 | +| **适用场景** | 计划维护(如升级) | 处理突发故障 | + +使用切换(Switchover)可以确保平滑过渡和最小化服务中断,是计划维护活动的首选方案。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +部署一个 2 节点的半同步 MySQL 集群(1 个主节点,1 个从节点): + +```yaml +kubectl apply -f - < + + + +### 选项 1:使用 OpsRequest +创建一个 OpsRequest 将 Pod 标记为离线: + +```yaml +kubectl apply -f - < + + + +### 选项 2:使用 Cluster API +或者,直接更新 Cluster 资源来下线 Pod: + +```yaml +kubectl apply -f - < + + + +### 验证下线操作 + +应用更新后的配置后,验证集群中剩余的 Pod: + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` + +示例输出: + +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-0 4/4 Running 0 6m38s +example-mysql-cluster-mysql-2 4/4 Running 0 6m38s +``` + + + + +## 总结 +在本指南中,您了解到: +- 传统基于 StatefulSet 的扩缩容在 Kubernetes 中的局限性。 +- KubeBlocks 如何实现对特定 Pod 的精准下线。 +- 两种下线 Pod 的方法:使用 OpsRequest 或直接更新 Cluster API。 + +通过利用 KubeBlocks,您可以对 MySQL 集群进行细粒度管理,确保高可用性,并为维护和工作负载分配提供灵活性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx b/docs/zh/preview/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx new file mode 100644 index 00000000..14e75577 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/11-rebuild-replica.mdx @@ -0,0 +1,478 @@ +--- +description: 如何通过原地修复与非原地修复方法恢复KubeBlocks管理的半同步集群中的MySQL副本 +keywords: +- KubeBlocks +- MySQL +- Replica Recovery +- In-Place Repair +- Non-In-Place Repair +sidebar_label: 恢复 MySQL 副本 +sidebar_position: 11 +title: 在KubeBlocks中恢复MySQL副本 +--- +# 在KubeBlocks中恢复MySQL副本 + +本指南演示如何在KubeBlocks管理的MySQL半同步集群中执行以下任务: +- 向主节点写入记录并验证副本上的复制情况 +- 停止高可用性,中断复制,修改副本上的数据并移除复制 +- 使用"原地"修复和"非原地"修复两种方法重建副本 +- 验证副本上的数据恢复情况 + +> **注意**:上述步骤仅用于测试目的。禁用高可用性、中断复制以及在副本上修改数据可能会破坏数据库一致性。请勿在生产数据库上执行这些操作。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群。 + +集群配置 + +```yaml +kubectl apply -f - < CREATE DATABASE test; +mysql> USE test; +mysql> CREATE TABLE t1 (id INT PRIMARY KEY, name VARCHAR(255)); +mysql> INSERT INTO t1 VALUES (1, 'John Doe'); +``` + +### 步骤3:验证数据复制 +连接到副本实例(example-mysql-cluster-mysql-0)以验证数据是否已复制: + +```bash +kubectl exec -ti -n demo example-mysql-cluster-mysql-0 -- mysql -uroot -pR0z5Z1DS02 +``` + +注意:如果主实例是 'example-mysql-cluster-mysql-0',则应连接到 'example-mysql-cluster-mysql-1'。连接前请务必检查每个实例的角色。 + +(严格保留原文格式与换行) + +```sql +mysql> SELECT * FROM test.t1; +``` + +示例输出: + +```bash ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +``` + + + + +## 中断高可用性与复制并修改副本 + +### 步骤 1:禁用高可用性 + +获取高可用性配置: + +```bash +kubectl get configmap -n demo example-mysql-cluster-mysql-haconfig -o yaml +``` + +预期输出: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + MaxLagOnSwitchover: "10" + enable: "true" + ttl: "15" +``` + +修补 ConfigMap 以禁用高可用性: + +(保持原有格式和换行) + +```bash +kubectl patch configmap -n demo example-mysql-cluster-mysql-haconfig --type merge -p '{"metadata":{"annotations":{"enable":"false"}}}' +``` + +### 步骤 2:停止复制 + +在副本实例上停止复制: + +```sql +mysql> STOP REPLICA; +``` + +### 步骤 3:将副本切换为读写模式 + +将副本实例切换为读写模式: + +```sql +mysql> SET GLOBAL super_read_only = OFF; +mysql> SET GLOBAL read_only = OFF; +``` + +### 步骤 4:从副本删除数据 +删除副本上的数据: + +```sql +mysql> DELETE FROM test.t1 WHERE id = 1; +``` + +### 步骤5:将副本切换为只读模式 +将副本恢复为只读模式: + +```sql +mysql> SET GLOBAL super_read_only = ON; +mysql> SET GLOBAL read_only = ON; +``` + +### 步骤6:启用高可用性 +通过修补ConfigMap重新启用高可用性: + +```bash +kubectl patch configmap -n demo example-mysql-cluster-mysql-haconfig --type merge -p '{"metadata":{"annotations":{"enable":"true"}}}' +``` + +### 步骤7:验证数据删除 +验证数据是否已删除: + +```sql +mysql> SELECT * FROM test.t1; +Empty set (0.00 sec) +``` + + + + +## 重建副本 + +KubeBlocks 提供两种副本重建方式:原地修复和非原地修复。 + +### 原地修复 + +通过以下配置原地重建副本: + +```bash +kubectl apply -f - < SELECT * FROM test.t1; ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +1 row in set (0.01 sec) +``` + +### 非原地修复 + +通过创建新实例重建副本: + +```bash +kubectl apply -f - < SELECT * FROM test.t1; ++----+----------+ +| id | name | ++----+----------+ +| 1 | John Doe | ++----+----------+ +1 row in set (0.01 sec) +``` + +#### 检查所有 Pod +运行以下命令列出 MySQL 集群中的所有 Pod: + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=example-mysql-cluster +``` + +示例输出: + +```bash +NAME READY STATUS RESTARTS AGE +example-mysql-cluster-mysql-1 4/4 Running 0 13m +example-mysql-cluster-mysql-2 4/4 Running 0 2m14s +``` + +此时,您可以看到两个 Pod:'example-mysql-cluster-mysql-1' 和 'example-mysql-cluster-mysql-2'。原有的 Pod 'example-mysql-cluster-mysql-0' 已被删除。 + +要验证集群状态,请检查集群资源: + +```bash +kubectl get cluster example-mysql-cluster -n demo -oyaml +``` + +示例输出: + +```yaml + offlineInstances: + - example-mysql-cluster-mysql-0 +``` + +实例 'example-mysql-cluster-mysql-0' 已被标记为离线。 + +## 总结 +- 原地修复(In-Place Repair):成功重建副本并恢复已删除数据。 +- 非原地修复(Non-In-Place Repair):创建新的副本实例并成功恢复数据。 + +两种方法均能有效恢复副本并确保数据一致性。 + +(严格保持原文格式与换行,技术术语采用标准译法) \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-mysql/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx b/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..1d439996 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,143 @@ +--- +description: 了解如何为KubeBlocks创建并配置一个使用S3存储桶存放备份数据的BackupRepo。 +keywords: +- KubeBlocks +- Backup +- BackupRepo +- S3 +- Kubernetes +sidebar_label: 创建备份仓库 +sidebar_position: 1 +title: 为KubeBlocks创建备份存储库 +--- +# 为 KubeBlocks 创建 BackupRepo + +本指南将引导您通过使用 S3 存储桶来创建和配置 KubeBlocks 中的 BackupRepo,用于存储备份数据。 + +## 前提条件 +- 已配置具有创建 S3 存储桶权限的 AWS CLI +- 拥有 Kubernetes 集群的 kubectl 访问权限 +- 已安装并运行 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +## 步骤 1:创建 S3 存储桶 + +使用 AWS CLI 在目标区域创建 S3 存储桶。将 `` 替换为您所需的 AWS 区域(例如 `us-east-1`、`ap-southeast-1`)。 + +(严格保持原文格式与换行) + +```bash +aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +示例(适用于 us-west-1 区域): + +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +示例输出: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +验证: +通过列出存储桶内容确认其已创建(初始状态下应为空): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + + + + +## 步骤 2:为 AWS 凭证创建 Kubernetes Secret + +将您的 AWS 凭证安全地存储在 Kubernetes Secret 中。请将 `` 和 `` 替换为实际的 AWS 凭证: + +```bash +# Create a secret to save the access key +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + + + + +## 步骤 3:配置备份仓库 + +BackupRepo 是一种自定义资源,用于定义备份的存储仓库。在此步骤中,您将通过创建 BackupRepo 资源将您的 S3 存储桶与 KubeBlocks 集成。 + +应用以下 YAML 来创建 BackupRepo。请将字段(例如存储桶名称、区域)替换为您的具体配置。 + +```yaml +kubectl apply -f - < 0/1 Init:0/1 0 6s +restore-preparedata-XXXXX- 1/1 Running 0 12s +restore-preparedata-XXXXX- 0/1 Completed 0 20s +``` + +这些 Pod 会将备份数据复制到持久卷(PVC)中。 + + +2. MySQL 集群 Pod: + +```bash +example-mysql-cluster-restored-mysql-0 0/4 Pending 0 0s +example-mysql-cluster-restored-mysql-0 4/4 Running 0 20s +``` + +Pod 使用恢复的数据进行初始化并启动 MySQL 服务。 + +## 通过 Ops API 执行恢复 + +或者,使用 Ops API 发起恢复流程: + +### 步骤 1:创建恢复请求 + +```yaml +kubectl apply -f - < 0/1 Init:0/1 0 6s +restore-preparedata-XXXXX- 1/1 Running 0 12s +restore-preparedata-XXXXX- 0/1 Completed 0 20s +``` + +这些 Pod 会将备份数据复制到持久卷(PVCs)中。 + + +2. MySQL 集群 Pods: + +```bash +example-mysql-cluster-restored-mysql-0 0/4 Pending 0 0s +example-mysql-cluster-restored-mysql-0 4/4 Running 0 20s +``` + +恢复完成后,MySQL集群的Pod会使用恢复的数据进行初始化并启动MySQL服务。 + +#### 步骤2:验证集群状态 +检查已恢复集群的状态: + +```bash +kubectl get cluster example-mysql-cluster-restored -n demo +``` + +成功输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +example-mysql-cluster-restored Delete Running 97s +``` + + + + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete cluster example-mysql-cluster-restored -n demo +kubectl delete ns demo +``` + + + + +## 总结 +本指南演示了如何在 KubeBlocks 中通过完整备份和持续的 binlog 备份实现 MySQL 集群的时间点恢复(PITR)。关键步骤包括: +- 验证可用备份 +- 提取加密的系统账户凭证 +- 创建带有恢复配置的新 MySQL 集群 +- 监控恢复过程 + +通过此方法,您可以将 MySQL 集群恢复到特定时间点,确保数据丢失最小化并保持业务连续性。 + diff --git a/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/_category_.yml b/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/_category_.yml new file mode 100644 index 00000000..09845f2d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 5 diff --git a/docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx b/docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..16e7ef7a --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,167 @@ +--- +description: 了解如何在KubeBlocks上部署MySQL集群,并通过Kubernetes Secrets安全配置自定义root密码。 +keywords: +- MySQL +- KubeBlocks +- Custom Password +- Kubernetes +- Secrets +sidebar_label: 自定义密码 +sidebar_position: 1 +title: 在KubeBlocks上创建带自定义root密码的MySQL集群 +--- +# 在 KubeBlocks 中使用自定义密码创建 MySQL 集群 + +本指南演示如何在 KubeBlocks 中部署 MySQL 集群,并将自定义 root 密码存储在 Kubernetes Secret 中。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +``` + +预期输出: + +```bash +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群,并设置自定义 root 密码。 + +### 步骤 1:为 root 账户创建 Secret + +自定义 root 密码存储在 Kubernetes Secret 中。通过应用以下 YAML 创建 Secret: + +```yaml +kubectl apply -f - < STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` + +如果 SSL 字段显示加密套件,则表示连接已成功通过 TLS 加密。 + +## 清理 +要删除本教程中创建的所有资源,请运行以下命令: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + +## 概述 +在本指南中,您学习了如何: +- 使用 KubeBlocks 部署 MySQL 集群,并启用 TLS 加密以确保 MySQL 客户端与服务器之间的安全通信。 +- 建立基于 TLS 的安全 MySQL 连接。 +- 使用 MySQL shell 验证安全连接。 + +TLS 加密通过加密传输中的数据来确保通信安全,保护敏感信息。按照这些步骤,您可以轻松地使用 KubeBlocks 在 Kubernetes 上部署安全的 MySQL 集群。 + diff --git a/docs/zh/preview/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx b/docs/zh/preview/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx new file mode 100644 index 00000000..9699df41 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/07-tls/02-tls-custom-cert.mdx @@ -0,0 +1,231 @@ +--- +description: 了解如何在KubeBlocks上部署一个使用用户提供的TLS证书的MySQL集群,以实现安全通信。本指南涵盖证书生成、集群部署及安全连接验证的全过程。 +keywords: +- KubeBlocks +- MySQL +- Kubernetes +- TLS +- Secure Communication +- User-Provided Certificates +sidebar_label: 使用用户提供TLS的MySQL集群 +sidebar_position: 2 +title: 在KubeBlocks上部署用户提供TLS的MySQL集群 +--- +# 在 KubeBlocks 上部署使用用户提供 TLS 的 MySQL 集群 + +本指南说明如何使用 KubeBlocks 部署一个带有**用户提供 TLS 证书**的 MySQL 集群。通过提供自己的证书,您可以完全控制 MySQL 客户端与服务器之间加密通信的安全配置。本指南涵盖证书生成、集群部署和安全连接验证。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 生成证书 + +要启用TLS加密,您需要提供证书颁发机构(CA)、服务器证书和私钥。按照以下步骤使用OpenSSL生成这些文件: + +1. 生成根证书(CA) + +```bash +# Create the CA private key (password optional) +openssl genrsa -aes256 -out ca-key.pem 4096 + +# Generate a self-signed root certificate (valid for 10 years) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# Enter the required information (e.g., Common Name can be "MySQL Root CA") +``` + +2. 生成服务器证书和密钥 + +```bash +# Generate the server private key +openssl genrsa -out server-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key server-key.pem -out server-req.pem +# Enter server identification details, such as: +# Common Name (CN) = Server domain name or IP (must match the MySQL server address!) + +# Sign the server certificate with the CA (valid for 10 years) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +3. 验证证书 +验证服务器证书是否有效且由CA签发: + + + +```bash +# Verify the server certificate +openssl verify -CAfile ca.pem server-cert.pem +``` + +预期输出: + +```bash +server-cert.pem: OK +``` + + + + +## 创建 Kubernetes Secret +将生成的证书和密钥存储在 Kubernetes Secret 中,以便您的 MySQL 集群可以访问它们: + +```bash +kubectl create secret generic mysql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` + +此 Secret 包含在 MySQL 集群上启用 mTLS 所需的 CA、服务器证书和私钥。 + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群,并使用用户提供的 TLS 证书: + +```yaml +kubectl apply -f - < STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` + +如果输出中显示 SSL 信息,则表示连接已成功通过 TLS 加密。 + +## 清理 +测试完成后移除所有资源: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete secret mysql-tls-secret -n demo +kubectl delete ns demo +``` + + + + +## 总结 +在本指南中,您学习了如何: +- 使用 OpenSSL 生成自签名的 CA 和服务器证书。 +- 将证书存储在 Kubernetes Secret 中。 +- 使用 KubeBlocks 部署启用 TLS 加密的 MySQL 集群。 +- 通过 TLS 安全连接 MySQL 集群并验证连接。 + +使用 TLS 可以确保 MySQL 客户端与服务器之间的通信安全,保护传输中的敏感数据。通过遵循这些步骤,您可以轻松地使用 KubeBlocks 在 Kubernetes 上设置和管理安全的 MySQL 集群。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/07-tls/03-mtls.mdx b/docs/zh/preview/kubeblocks-for-mysql/07-tls/03-mtls.mdx new file mode 100644 index 00000000..d8a0b3be --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/07-tls/03-mtls.mdx @@ -0,0 +1,314 @@ +--- +description: 了解如何在KubeBlocks上为MySQL集群配置双向TLS(mTLS)加密。本指南将逐步介绍证书生成、集群部署、mTLS用户设置以及安全连接验证的全过程。 +keywords: +- KubeBlocks +- MySQL +- Kubernetes +- mTLS +- Mutual TLS +- Secure Communication +sidebar_label: 支持mTLS的MySQL集群 +sidebar_position: 3 +title: 在KubeBlocks上部署支持mTLS的MySQL集群 +--- +# 在 KubeBlocks 上创建支持 mTLS 的 MySQL 集群 + +本指南说明如何使用 KubeBlocks 配置支持**双向 TLS (mTLS)** 加密的 MySQL 集群。mTLS 确保服务器和客户端在建立连接时相互认证,为您的数据库基础设施提供增强的安全性。本指南涵盖证书生成、集群部署、mTLS 用户配置以及安全连接验证等内容。 + + + +## 什么是 mTLS? +双向 TLS(mTLS)是一种增强的安全协议,确保服务器和客户端在建立连接时相互验证身份。与传统 TLS 仅由客户端验证服务器身份不同,mTLS 通过要求双方提供由可信证书颁发机构(CA)签发的有效证书,额外增加了一层安全防护。 + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境准备: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 生成证书 + +要启用TLS加密,您需要提供证书颁发机构(CA)、服务器证书和私钥。按照以下步骤使用OpenSSL生成这些文件: + +1. 生成根证书(CA) + +```bash +# Create the CA private key (password optional) +openssl genrsa -aes256 -out ca-key.pem 4096 + +# Generate a self-signed root certificate (valid for 10 years) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# Enter the required information (e.g., Common Name can be "MySQL Root CA") +``` + +2. 生成服务器证书和密钥 + +```bash +# Generate the server private key +openssl genrsa -out server-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key server-key.pem -out server-req.pem +# Enter server identification details, such as: +# Common Name (CN) = Server domain name or IP (must match the MySQL server address!) + +# Sign the server certificate with the CA (valid for 10 years) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +3. 生成客户端证书和密钥 + +```bash +# Generate the client private key +openssl genrsa -out client-key.pem 4096 + +# Create a Certificate Signing Request (CSR) +openssl req -new -key client-key.pem -out client-req.pem +# Enter client identification details, such as: +# Common Name (CN) = Client username (e.g., "mysql_client_1") + +# Sign the client certificate with the CA (valid for 1 year) +openssl x509 -req -in client-req.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -days 365 -sha256 +``` + +4. 验证证书 +验证服务器证书是否有效且由CA签发: + +```bash +# Verify the server certificate +openssl verify -CAfile ca.pem server-cert.pem +``` + +预期输出: + +```bash +server-cert.pem: OK +``` + + + + + +```bash +# Verify the client certificate +openssl verify -CAfile ca.pem client-cert.pem +``` + +预期输出: + +```bash +client-cert.pem: OK +``` + + + + +## 创建 Kubernetes Secret +将生成的证书和密钥存储在 Kubernetes Secret 中,以便您的 MySQL 集群可以访问它们。 + +```bash +kubectl create secret generic mysql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` + +此 Secret 包含在 MySQL 集群上启用 mTLS 所需的 CA、服务器证书和私钥。 + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群,并使用用户提供的 TLS 证书: + +```yaml +kubectl apply -f - < CREATE USER 'mtls_user'@'%' IDENTIFIED BY 'kni676X2W1' REQUIRE X509; +Query OK, 0 rows affected (0.01 sec) + +mysql> GRANT ALL PRIVILEGES ON *.* TO 'mtls_user'@'%'; +Query OK, 0 rows affected (0.01 sec) + +mysql> FLUSH PRIVILEGES; +Query OK, 0 rows affected (0.01 sec) +``` + + + + +## 通过 mTLS 连接 MySQL 集群 + +使用 `kubectl port-forward` 命令将 MySQL 集群主副本的 3306 端口映射到本地机器的 3306 端口: + +```bash +kubectl port-forward svc/mysql-cluster-mysql 3306:3306 -n default +Forwarding from 127.0.0.1:3306 -> 3306 +Forwarding from [::1]:3306 -> 3306 +``` + +然后,打开另一个 shell 并使用 mysql 命令行工具连接到本地端口 3306。 + +如果未使用客户端证书连接,您将看到错误: + +```bash +mysql -h 127.0.0.1 -umtls_user -pkni676X2W1 --ssl-mode=REQUIRED +``` + +预期输出: + +```bash +ERROR 1045 (28000): Access denied for user 'mtls_user'@'127.0.0.1' (using password: YES) +``` + +要成功连接,需提供客户端证书和密钥: + +```bash +mysql -h 127.0.0.1 -umtls_user -pkni676X2W1 --ssl-mode=REQUIRED --ssl-ca=/path/to/ca.pem --ssl-cert=/path/to/client-cert.pem --ssl-key=/path/to/client-key.pem +``` + +验证 MySQL shell 中的 TLS 连接状态: + +```sql +mysql> STATUS; +-------------- + +SSL: Cipher in use is TLS_AES_256_GCM_SHA384 +``` + + + + +## 清理 +删除本教程中创建的所有资源: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete secret mysql-tls-secret -n demo +kubectl delete ns demo +``` + + + + +## 总结 + +在本指南中,您学习了如何: +1. 使用 OpenSSL 生成自签名的 CA、服务器和客户端证书。 +2. 在 KubeBlocks 上部署启用 mTLS 的 MySQL 集群。 +3. 配置 mTLS 用户并验证安全连接。 + +mTLS 通过确保客户端和服务器的双向认证,提供了额外的信任和安全层。通过遵循本指南,您可以使用 KubeBlocks 在 Kubernetes 上安全地部署和管理支持 mTLS 的 MySQL 集群。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/07-tls/_category_.yml b/docs/zh/preview/kubeblocks-for-mysql/07-tls/_category_.yml new file mode 100644 index 00000000..18f17cf0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/07-tls/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: TLS(传输层安全协议) +position: 7 diff --git a/docs/zh/preview/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..cd9540db --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,286 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为MySQL集群配置可观测性。设置监控并通过Grafana实现指标可视化。 +keywords: +- KubeBlocks +- MySQL +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: MySQL 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 MySQL 集群可观测性 +--- +# 使用 Prometheus Operator 实现 MySQL 集群的可观测性 + +(保持原始 Markdown 格式与空行不变) + +## 前提条件 + +在继续之前,请确保满足以下条件: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 安装 Prometheus Operator + +如果尚未安装 Prometheus Operator,可以使用 Helm 进行安装: + +```bash +kubectl create namespace monitoring +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack -n monitoring --create-namespace +``` + +或者,您可以按照[如何安装 Prometheus Operator](../docs/install-prometheus.md)中的步骤来安装 Prometheus Operator。 + +检查已部署 Pod 的状态: + +```bash +kubectl get pods -n monitoring +``` + +预期输出: + +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + + + + + +## 部署 MySQL 半同步集群 + +KubeBlocks 采用声明式方法管理 MySQL 集群。以下是一个配置示例,用于部署包含 2 个节点(1 个主节点,1 个副本节点)的半同步模式 MySQL 集群。 + +应用以下 YAML 配置来部署集群: + +```yaml +kubectl apply -f - < 91m v1.31.5-eks-5d632ec +ip-10-0-1-183.ap-southeast-1.compute.internal Ready 71m v1.31.5-eks-5d632ec +ip-10-0-1-217.ap-southeast-1.compute.internal Ready 2m13s v1.31.5-eks-5d632ec +ip-10-0-2-186.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +ip-10-0-2-252.ap-southeast-1.compute.internal Ready 71m v1.31.5-eks-5d632ec +ip-10-0-2-71.ap-southeast-1.compute.internal Ready 2m24s v1.31.5-eks-5d632ec +ip-10-0-3-143.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +ip-10-0-3-205.ap-southeast-1.compute.internal Ready 36s v1.31.5-eks-5d632ec +ip-10-0-3-238.ap-southeast-1.compute.internal Ready 91m v1.31.5-eks-5d632ec +``` + +从输出中可以看到,每个可用区(AZ)中有三个节点:ap-southeast-1a、ap-southeast-1b 和 ap-southeast-1c。 + + + +## 跨可用区部署 MySQL 集群 + +### 创建 MySQL 集群 +要跨不同可用区部署一个 3 节点半同步 MySQL 集群(1 主节点,2 从节点),请使用以下 YAML 配置: + +```yaml +kubectl apply -f - < +mysql 500m / 500m 512Mi / 512Mi data:20Gi +``` + +**观察结果**: +- 默认副本配置为 0.5 CPU 和 0.5Gi 内存。 +- 自定义副本配置为 1 CPU 和 1Gi 内存。 + +## 将自定义 Pod 暴露为服务 +要通过独立服务暴露自定义 Pod,请使用以下配置: + +```yaml +kubectl apply -f - < 3306/TCP 12m +example-mysql-cluster-mysql ClusterIP 172.20.11.166 3306/TCP 12m +example-mysql-cluster-mysql-headless ClusterIP None 3306/TCP,3601/TCP,9104/TCP,3501/TCP,3502/TCP,9901/TCP 12m +``` + +### 通过服务访问自定义Pod +获取根凭据: + +```bash +kubectl get secrets -n demo example-semisync-mysql-mysql-account-root -o jsonpath='{.data.username}' | base64 -d + +kubectl get secrets -n demo example-mysql-cluster-mysql-account-root -o jsonpath='{.data.password}' | base64 -d +``` + +预期输出: + +```bash +root + +uk263gR24s +``` + +从 MySQL 容器内部连接到自定义 Pod: + +```bash +kubectl exec -it example-mysql-cluster-mysql-0 -n demo -- mysql -hexample-mysql-cluster-custom-pod -uroot -puk263gR24s +``` + +此自定义 Pod 预配置了额外资源,非常适合运行复杂查询或分析型工作负载。 + +## 清理 +要删除所有已创建的资源,请连同其命名空间一起删除 MySQL 集群: + +```bash +kubectl delete cluster example-mysql-cluster -n demo +kubectl delete ns demo +``` + + + + + +## 结论 +通过 KubeBlocks 自定义 Pod 资源配置和标签,您可以构建灵活且资源高效的 MySQL 环境。无论您需要强大的主实例还是专用于报表生成的副本,KubeBlocks Operator 都能让您根据工作负载需求精细调整每个 Pod 的 CPU、内存和存储配置。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx b/docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx new file mode 100644 index 00000000..01ed69ec --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-mysql/09-advanced-pod-management/03-parallel-pod-management-concurrency.mdx @@ -0,0 +1,305 @@ +--- +description: 了解如何通过`parallelPodManagementConcurrency`参数在KubeBlocks中配置MySQL集群,以控制Pod在创建、扩缩容及删除过程中的并行度。 +keywords: +- KubeBlocks +- MySQL +- Pod Management +- Parallelism +- Kubernetes +sidebar_label: Pod 管理并行度 +sidebar_position: 3 +title: 在KubeBlocks中配置MySQL集群并控制Pod创建、扩缩容及删除的并行度 +--- +# 在 KubeBlocks 中配置 MySQL 集群的 Pod 创建、扩缩容及删除并行度控制 + +本指南演示如何使用 `parallelPodManagementConcurrency` 参数控制 KubeBlocks 中 MySQL 集群的 Pod 创建、扩缩容及删除并行度。通过定义可并行管理的 Pod 最大数量,用户可以在操作速度与系统稳定性之间取得平衡。与 StatefulSet 中仅提供两种固定选项(`OrderedReady` 或 `Parallel`)的 `podManagementPolicy` 不同,`parallelPodManagementConcurrency` 提供了更高的灵活性,使其同时适用于资源敏感型环境和生产环境。 + +## 前提条件 + +在继续之前,请确保满足以下要求: +- 环境设置: + - 已启动并运行一个 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处的安装说明进行操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + + + + +## 部署 MySQL 半同步集群 + +部署一个 2 节点的半同步 MySQL 集群(1 个主节点,1 个从节点),并将 `parallelPodManagementConcurrency` 参数设置为 1 以强制顺序创建 Pod。 + +```yaml +kubectl apply -f - < +示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-postgresql kb-system 1 2025-05-21 deployed postgresql-1.0.0 +``` + + +若插件未启用,请选择以下安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难或缓慢,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/postgresql --versions + # 安装指定版本(将 替换为您选择的版本号) + helm upgrade -i kb-addon-postgresql kubeblocks-addons/postgresql --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 索引默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search postgresql + # 安装指定版本插件(将 替换为您选择的版本号) + kbcli addon install postgresql --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + postgresql 0.9.1 kubeblocks + postgresql 0.9.2 kubeblocks + postgresql 0.9.3 kubeblocks + postgresql 1.0.0 kubeblocks + ``` + 启用或禁用插件: + + ```bash + # 启用插件 + kbcli addon enable postgresql + # 禁用插件 + kbcli addon disable postgresql + ``` + + + + +:::note +**版本兼容性说明** + +请始终确保 PostgreSQL 插件版本与 KubeBlocks 主版本相匹配,以避免兼容性问题。 + +::: + +### 验证支持的 PostgreSQL 版本 + +**列出可用 PostgreSQL 版本:** + +```bash +kubectl get cmpv postgresql +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 33d +``` +
+ +**检查 ComponentDefinitions 的版本兼容性** + +**步骤 1.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv postgresql -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +postgresql-12-1.0.0 +postgresql-14-1.0.0 +postgresql-15-1.0.0 +postgresql-16-1.0.0 +``` +
+ +**步骤 2.** 获取与指定 `ComponentDefinition` 兼容的版本列表 + +```bash +kubectl get cmpv postgresql -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("postgresql-14"))) | .releases[]' +``` + +此命令返回与名为 `postgresql-14` 的 `ComponentDefinition` 兼容的版本: + +
+示例输出 +```text +14.7.2 +14.8.0 +``` +
+ +### 存储配置 + +PostgreSQL 需要持久化存储。检查可用选项: + +```bash +kubectl get storageclass +``` + +推荐存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩容 +- 满足工作负载的性能需求 + +## 部署 PostgreSQL 集群 + +使用默认配置部署基础 PostgreSQL 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/cluster.yaml +``` + +该操作将创建: +- 一个 2 副本的 PostgreSQL 集群 +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 +- 自动主从节点配置 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + # 指定删除 Cluster 时的行为策略 + # 有效选项:[DoNotTerminate, Delete, WipeOut](KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`:阻止删除 Cluster,确保所有资源保持完整 + # - `Delete`:在 `Halt` 策略基础上同时移除 PVC,实现彻底清理(包括持久化数据) + # - `WipeOut`:激进策略,删除所有 Cluster 资源(包括外部存储中的卷快照和备份) + # 该策略会导致数据完全删除,应谨慎使用(建议仅在非生产环境使用以避免不可逆数据丢失) + terminationPolicy: Delete + # 指定创建 Cluster 时使用的 ClusterDefinition 名称 + # 注意:请勿修改此字段 + # 创建 PostgreSQL Cluster 时该值必须为 `postgresql` + clusterDef: postgresql + # 指定创建 Cluster 时使用的 ClusterTopology 名称 + # 有效选项:[replication] + topology: replication + # 定义组成 Cluster 的各个组件的详细配置列表 + componentSpecs: + - name: postgresql + # 指定该组件期望部署的服务版本 + # 有效选项:[12.14.0,12.14.1,12.15.0,14.7.2,14.8.0,15.7.0,16.4.0] + serviceVersion: "14.7.2" + # 是否在组件的 headless Service 上标注指标导出器信息 + # 有效选项:[true, false] + disableExporter: false + # 为组件管理的 Pod/PVC/账户/TLS 密钥/服务添加或覆盖标签 + labels: + # PostgreSQL 的 CMPD 通过环境变量指定 `KUBERNETES_SCOPE_LABEL=apps.kubeblocks.postgres.patroni/scope` + # 该标签用于 Patroni 识别 Kubernetes 资源的归属范围(集群) + # + # 注意:请勿移除此标签 + # 值必须遵循格式 -postgresql + # 本例中为 pg-cluster-postgresql + # 请将 `pg-cluster` 替换为您的集群名称 + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + # 按需调整副本数 + replicas: 2 + # 指定组件所需的计算资源 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # 定义组件存储需求的持久卷声明模板列表 + volumeClaimTemplates: + # 引用 componentDefinition.spec.runtime.containers[*].volumeMounts 中定义的挂载卷名称 + - name: data + spec: + # 声明所需的 StorageClass 名称 + # 若未指定,默认使用标注了 `storageclass.kubernetes.io/is-default-class=true` 的 StorageClass + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储容量 + storage: 20Gi +``` + +更多 API 字段说明请参阅 [API 参考文档](../user_docs/references/api-reference/cluster)。 + +### 创建指定版本的 PostgreSQL 集群 + +在应用配置前,通过设置 `spec.componentSpecs.serviceVersion`(主.次版本)字段可创建特定版本的集群: + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 12.15.0 # 有效选项:[12.15.0,12.14.1,12.14.0] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 14.7.2 # 有效选项:[14.8.0,14.7.2] + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 15.7.0 + ``` + + + + ```yaml + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + ``` + + + +查看可用的 `ComponentDefinition` 和 `ComponentVersion`: + +```bash +kubectl get cmpd -l app.kubernetes.io/name=postgresql +``` + +
+示例输出 +```bash +NAME SERVICE SERVICE-VERSION STATUS AGE +postgresql-12-1.0.0 postgresql 12.15.0 Available 22d +postgresql-14-1.0.0 postgresql 14.8.0 Available 22d +postgresql-15-1.0.0 postgresql 15.7.0 Available 22d +postgresql-16-1.0.0 postgresql 16.4.0 Available 22d +``` +
+ +```bash +kubectl get cmpv -l app.kubernetes.io/name=postgresql +``` + +
+示例输出 +```bash +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 22d +``` +
+ +## 验证集群状态 + +当部署包含2个副本的PostgreSQL集群时,KubeBlocks会自动配置: +- 主副本(读写操作) +- 从副本(只读操作) + +通过以下检查确认部署成功: +1. 集群状态为`Running`(运行中) +2. 所有Pod均正常运行 +3. 副本角色配置正确 + +可通过以下任一方式查看状态: + + + +```bash +kubectl get cluster pg-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Running 107s + +kubectl get pods -l app.kubernetes.io/instance=pg-cluster -n demo +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 31m +pg-cluster-postgresql-1 4/4 Running 0 31m +``` + + + + +安装`kbcli`后,可查看完整的集群信息: + +```bash +kbcli cluster describe pg-cluster -n demo +Name: pg-cluster Created Time: May 15,2025 14:23 UTC+0800 +NAMESPACE CLUSTER-DEFINITION TOPOLOGY STATUS TERMINATION-POLICY +demo postgresql replication Running Delete + +Endpoints: +COMPONENT INTERNAL EXTERNAL +postgresql pg-cluster-postgresql-postgresql.demo.svc.cluster.local:5432 + pg-cluster-postgresql-postgresql.demo.svc.cluster.local:6432 + +Topology: +COMPONENT SERVICE-VERSION INSTANCE ROLE STATUS AZ NODE CREATED-TIME +postgresql 14.7.2 pg-cluster-postgresql-0 primary Running zone-1a ip-x-y-z Dec 16,2024 08:37 UTC+0800 +postgresql 14.7.2 pg-cluster-postgresql-1 secondary Running zone-1b ip-x-y-z Dec 16,2024 08:37 UTC+0800 + +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +postgresql 500m / 500m 512Mi / 512Mi data:20Gi + +Images: +COMPONENT COMPONENT-DEFINITION IMAGE +postgresql postgresql-14-1.0.0 docker.io/apecloud/spilo:14.7.2-pgvector-v0.6.1 + docker.io/bitnami/pgbouncer:1.19.0 + +Data Protection: +BACKUP-REPO AUTO-BACKUP BACKUP-SCHEDULE BACKUP-METHOD BACKUP-RETENTION RECOVERABLE-TIME + +查看集群事件:kbcli cluster list-events -n demo pg-cluster +``` + + + + +## 访问 PostgreSQL 集群 + +KubeBlocks 自动提供以下资源: +1. 凭证信息存储在 Secret `pg-cluster-postgresql-account-postgres` 中 +2. ClusterIP 服务 `pg-cluster-postgresql-postgresql` + +### 获取凭证 +```bash +# 获取用户名 +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) + +# 获取密码 +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### 连接方式 + + + + +直接连接到 Pod: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} \ + psql -h pg-cluster-postgresql-postgresql +``` + + + + +1. 转发服务端口: + ```bash + kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo + ``` + +2. 通过本地地址连接: + ```bash + psql -h 127.0.0.1 -U${NAME} -W + ``` + + + +:::note +**生产环境注意事项** + +在生产环境中,应避免使用 `kubectl exec` 和 `port-forward`,建议采用以下方案: +- 使用 LoadBalancer 或 NodePort 服务实现外部访问 +- 配置网络策略限制访问权限 +- 启用 TLS 加密确保连接安全 +- 使用连接池提升性能 +::: + +## 停止 PostgreSQL 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)会被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运行成本降低 + + + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/stop.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-stop + namespace: demo +spec: + clusterName: pg-cluster + type: Stop +``` + + + +也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + +```yaml +spec: + componentSpecs: + - name: postgresql + stop: true # 设置为停止组件 + replicas: 2 +``` + + + +## 启动 PostgreSQL 集群 + +重启已停止的集群可恢复运行,所有数据与配置均保持不变。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前状态 + + + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/postgresql/start.yaml +``` + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-start + namespace: demo +spec: + clusterName: pg-cluster + type: Start +``` + + + +通过将 `spec.componentSpecs.stop` 设为 false 来启动集群: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ +{ + "op": "remove", + "path": "/spec/componentSpecs/0/stop" +} +]' +``` + +```yaml +spec: + componentSpecs: + - name: postgresql + stop: false # 设为false表示启动组件 + replicas: 2 +``` + + + +## 删除 PostgreSQL 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略类型 | 删除的资源 | 数据清除情况 | 适用场景 | +|-----------------|---------------------|--------------------|------------------------| +| DoNotTerminate | 无 | 无 | 关键生产集群 | +| Delete | 所有资源 | PVC存储卷被删除 | 非关键环境 | +| WipeOut | 所有资源 | 全部数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保已存在必要的备份 +3. 验证terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster pg-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster pg-cluster -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..1bfaa334 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,288 @@ +--- +description: 了解如何在KubeBlocks中管理PostgreSQL集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- PostgreSQL +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: PostgreSQL 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 PostgreSQL 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运维成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|--------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护窗口 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 PostgreSQL 集群将: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 维持集群配置 + +此操作适用于: +- 临时成本节约 +- 维护窗口期 +- 开发环境暂停 + + + + + +选项 1:使用 OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-stop-ops + namespace: demo +spec: + clusterName: pg-cluster + type: Stop +``` + + + + +选项 2:使用 Cluster API 补丁 + +通过修改 stop 字段直接调整集群规格: + +```bash +kubectl patch cluster pg-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } +]' +``` + + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Stopping 6m3s + pg-cluster postgresql Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + data-pg-cluster-postgresql-0 Bound pvc-dcfb1ebc-2773-4edd-9898-e11da76062c4 20Gi RWO standard 19m + data-pg-cluster-postgresql-1 Bound pvc-36366e01-0178-43fa-b1a0-4168b057dd10 20Gi RWO standard 19m + ``` + +### 启动集群 + +启动已停止的 PostgreSQL 集群将: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: pg-cluster + type: Start +``` + + + + + +修改集群规格以恢复运行: +1. 设置 stop: false,或 +2. 完全移除 stop 字段 + + ```bash + kubectl patch cluster pg-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Updating 22m + pg-cluster postgresql Delete Running 22m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + pg-cluster-postgresql-0 1/1 Running 0 2m + pg-cluster-postgresql-1 1/1 Running 0 1m + ``` + +3. 检查服务端点: + ```bash + kubectl get endpoints pg-cluster-postgresql -n demo + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +使用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**使用 OpsRequest API** + +针对特定组件进行重启: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-restart-ops + namespace: demo +spec: + clusterName: pg-cluster + type: Restart + restart: + - componentName: postgresql +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest pg-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-restart-ops Restart pg-cluster Running 0/2 10s + pg-cluster-restart-ops Restart pg-cluster Running 1/2 65s + pg-cluster-restart-ops Restart pg-cluster Running 2/2 2m5s + pg-cluster-restart-ops Restart pg-cluster Succeed 2/2 2m5s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳 + +3. 验证组件健康状态: + ```bash + kbcli cluster describe pg-cluster -n demo + ``` + +操作完成后,集群将返回 Running 状态。 + +## 总结 +在本指南中,您学会了如何: +1. 停止 PostgreSQL 集群以暂停运行同时保留持久化存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重建其 Pod 而无需停止整个集群 + +通过管理 PostgreSQL 集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了执行这些操作的无缝方式,确保高可用性和最小化服务中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..16cea991 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,194 @@ +--- +description: 了解如何在KubeBlocks管理的PostgreSQL集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- PostgreSQL +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: PostgreSQL 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks对PostgreSQL集群进行垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的PostgreSQL集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改PostgreSQL实例的计算资源(CPU和内存)但保持副本数不变。主要特性包括: + +- **非中断性**:正确配置时可在扩缩容期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:可根据需求进行扩容或缩容 + +KubeBlocks以最小影响协调扩缩容过程: +1. 从节点副本优先更新 +2. 主节点在所有从节点健康后最后更新 +3. 集群状态从`更新中`过渡到`运行中` + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署PostgreSQL集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. 从节点副本优先更新(每次更新一个) +2. 主节点在所有从节点健康后最后更新 +3. 集群状态从`更新中`过渡到`运行中` + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为postgresql组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-vscale-ops + namespace: demo + spec: + clusterName: pg-cluster + type: VerticalScaling + verticalScaling: + - componentName: postgresql + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + 垂直扩缩容期间会发生什么? + - 优先重建从节点Pod以确保主节点Pod保持可用 + - 所有从节点Pod更新完成后,主节点Pod将以新资源配置重启 + + + 可通过以下命令查看扩缩容操作进度: + + ```bash + kubectl -n demo get ops pg-cluster-vscale-ops -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 0/2 52s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 1/2 64s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 2/2 2m6s + pg-cluster-vscale-ops VerticalScaling pg-cluster Running 2/2 2m6s + pg-cluster-vscale-ops VerticalScaling pg-cluster Succeed 2/2 2m6s + ``` + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段实现垂直扩缩容。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段进行扩缩容 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有其他正在进行的操作 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证服务质量(QoS) + +**扩缩容后:** +- 监控资源利用率和应用性能 +- 必要时调整PostgreSQL参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe pg-cluster -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +postgresql 1 / 1 1Gi / 1Gi data:20Gi standard +``` + +## KubeBlocks垂直扩缩容的核心优势 +- 无缝扩缩容:按特定顺序重建Pod确保最小中断 +- 动态资源调整:根据工作负载需求轻松调整CPU和内存 +- 灵活性:可选择动态扩缩容的OpsRequest或精确控制的直接API更新 +- 高可用性:扩缩容过程中集群保持运行状态 + +## 清理资源 +删除PostgreSQL集群及其命名空间以清除所有创建的资源: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您学会了如何: +1. 部署KubeBlocks管理的PostgreSQL集群 +2. 通过增减postgresql组件资源进行垂直扩缩容 +3. 使用OpsRequest和直接Cluster API更新两种方式调整资源分配 + +垂直扩缩容是优化资源利用率和适应工作负载变化的强大工具,可确保PostgreSQL集群始终保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..cf8da84f --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,310 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的PostgreSQL集群执行水平扩缩容(横向扩展与收缩)。 +keywords: +- KubeBlocks +- PostgreSQL +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现PostgreSQL集群的水平扩展 +--- +请翻译以下内容: + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现PostgreSQL集群的水平扩展 + +本指南将介绍如何对由KubeBlocks管理的PostgreSQL集群执行水平扩展(扩容和缩容)。您将学习如何通过**OpsRequest**和直接修改**Cluster API**两种方式来实现这一操作。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + + + +## 水平扩展(增加副本) + +**预期工作流程**: + +1. 新 Pod 被创建,状态从 `Pending` 转变为 `Running`,角色为 `secondary` +2. 数据从主节点同步至新副本 +3. 集群状态从 `Updating` 变为 `Running` + + + + + + 选项1:使用水平扩展运维请求 + + 通过增加1个副本来扩展 PostgreSQL 集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-scale-out-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + # 指定组件扩缩容的副本变更 + scaleOut: + # 指定组件的副本变更数量 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩展操作进度: + + ```bash + kubectl get ops pg-cluster-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-scale-out HorizontalScaling pg-cluster Running 0/1 8s + pg-scale-out HorizontalScaling pg-cluster Running 1/1 24s + pg-scale-out HorizontalScaling pg-cluster Succeed 1/1 24s + ``` + + + + + 选项2:直接修改集群API + + 您也可以直接更新 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 3 # 增加副本数实现扩展 + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + 或者使用命令直接修改集群CR: + + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + +### 验证扩展结果 + +操作完成后,您将看到新创建的 Pod,PostgreSQL 集群状态从 `Updating` 变为 `Running`,且新建 Pod 的角色为 `secondary`。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +示例输出(3个Pod): +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 13m +pg-cluster-postgresql-1 4/4 Running 0 12m +pg-cluster-postgresql-2 4/4 Running 0 5m5s +``` + +新增副本会自动加入为从节点。 +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 13m primary +pg-cluster-postgresql-1 4/4 Running 0 12m secondary +pg-cluster-postgresql-2 4/4 Running 0 5m54s secondary +``` + +## 缩容(移除副本) + +**预期工作流程**: + +1. 选定的副本(序号最大的那个)被移除 +2. 如果移除的是主副本,会先进行自动切换 +3. Pod 被优雅终止 +4. 集群状态从 `Updating` 变为 `Running` + +:::note +如果被缩容的副本恰好是主副本,KubeBlocks 会触发切换操作。该 Pod 只有在切换操作成功后才会被终止。 +::: + + + + + + 选项1:使用水平扩缩容 OpsRequest + + 通过移除一个副本对 PostgreSQL 集群进行缩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-scale-in-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + # 指定缩容组件的副本变更 + scaleIn: + # 指定组件的副本变更 + # 从当前组件移除一个副本 + replicaChanges: 1 + ``` + + 监控进度: + ```bash + kubectl get ops pg-cluster-scale-in-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-scale-in HorizontalScaling pg-cluster Running 0/1 8s + pg-scale-in HorizontalScaling pg-cluster Running 1/1 24s + pg-scale-in HorizontalScaling pg-cluster Succeed 1/1 24s + ``` + + + + + 选项2:直接更新 Cluster API + + 或者,您可以直接更新 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 1 # 减少副本数量以缩容 + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "1" + memory: "1Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + 您也可以通过命令修补集群 CR: + + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### 验证缩容 + +示例输出(一个 Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 16m +``` + +## 最佳实践 + +在进行水平扩展时: +- 尽可能选择低流量时段执行扩展操作 +- 在扩展过程中持续监控集群健康状况 +- 在横向扩容前确保有足够的资源可用 +- 考虑新增副本所需的存储需求 + + + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除 PostgreSQL 集群及其所在的命名空间: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您学习了如何: +- 执行水平扩展操作,向 PostgreSQL 集群添加副本。 +- 执行水平收缩操作,从 PostgreSQL 集群移除副本。 +- 通过 OpsRequest 和直接修改 Cluster API 两种方式实现水平扩缩容。 + +KubeBlocks 确保扩缩容过程无缝衔接,对数据库业务的影响降至最低。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..812ad0c5 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/04-volume-expansion.mdx @@ -0,0 +1,242 @@ +--- +description: 了解如何在KubeBlocks管理的PostgreSQL集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- PostgreSQL +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: 在 PostgreSQL 集群中扩展存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# PostgreSQL 集群存储卷扩容指南 + +本文档介绍如何在 **KubeBlocks** 管理的 PostgreSQL 集群中扩展持久卷声明(PVC)。存储卷扩容功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持时,此操作可在不中断服务的情况下执行。 + +存储卷扩容功能允许您在创建持久卷声明(PVC)后增加其容量大小。该特性自 Kubernetes v1.11 版本引入,并在 Kubernetes v1.24 版本正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持卷扩容 + +列出所有可用存储类,通过检查 `ALLOWVOLUMEEXPANSION` 字段确认是否支持卷扩容: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +请确保您使用的存储类已将 `ALLOWVOLUMEEXPANSION` 设为 true。若为 false,则表示该存储类不支持卷扩容。 + +## 使用支持扩容的存储类部署 PostgreSQL 复制集群 + +KubeBlocks 采用声明式方式管理 PostgreSQL 集群。以下是部署包含 2 个副本(1 主节点 + 1 从节点)的 PostgreSQL 集群配置示例。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩容的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `storageClassName`: 指定支持卷扩容的 `StorageClass` 名称。若未设置,将使用标记为 `default` 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时请确保存储类支持卷扩容(检查 `ALLOWVOLUMEEXPANSION` 字段)。 + +::: + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 扩容存储卷 + +:::note +1. 确保存储类支持卷扩容(检查 `ALLOWVOLUMEEXPANSION`) +2. 新容量必须大于当前容量 +3. 根据存储提供商不同,卷扩容可能需要额外配置 +::: + +可通过以下两种方式扩容存储卷: + + + + + 方法一:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 postgresql 组件扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: pg-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: postgresql + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩容进度: + + ```bash + kubectl describe ops pg-cluster-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 完成后,PVC 容量将更新。 + + :::note + 若使用的存储类不支持卷扩容,此 OpsRequest 将快速失败并返回类似信息: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 方法二:直接更新 Cluster API + + 也可通过更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段实现扩容。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量,确保大于当前容量 + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证 + +检查更新后的集群配置: +```bash +kbcli cluster describe pg-cluster -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +postgresql 500m / 500m 512Mi / 512Mi data:30Gi +``` +数据 PVC 的存储容量已更新至指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=pg-cluster -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +pg-cluster-postgresql-data-0 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO 33m +pg-cluster-postgresql-data-1 Bound pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 30Gi RWO 33m +``` + +## 清理资源 +删除 PostgreSQL 集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南中您已学习: +1. 验证存储类对卷扩容的兼容性 +2. 通过以下方式执行卷扩容: + - 使用 OpsRequest 进行动态更新 + - 通过 Cluster API 手动更新 +3. 验证 PVC 新容量并确认扩容操作完成 + +通过存储卷扩容功能,您可以在不影响服务的情况下高效扩展 PostgreSQL 集群的存储容量,确保数据库能够随着应用需求同步增长。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..93b81f32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,400 @@ +--- +description: 了解如何在KubeBlocks中通过负载均衡器及其他服务类型配置和管理PostgreSQL服务,实现内外部访问。 +keywords: +- KubeBlocks +- PostgreSQL +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理 PostgreSQL 服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建和销毁PostgreSQL服务 +--- +请翻译以下内容: + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 PostgreSQL 服务 + +本指南提供了逐步操作说明,指导如何对外部和内部暴露由 KubeBlocks 管理的 PostgreSQL 服务。您将学习使用云服务提供商的负载均衡器服务配置外部访问、管理内部服务,以及在不再需要时正确禁用外部暴露功能。 + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + + + +## 查看网络服务 +列出为 PostgreSQL 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=pg-cluster -n demo +``` + +示例服务输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +pg-cluster-postgresql-postgresql ClusterIP 10.96.19.237 5432/TCP,6432/TCP 157m +``` + +:::note + +这里显示两个端口 5432 和 6432,其中 5432 用于 PostgreSQL 服务,6432 用于 PgBouncer 连接池。 + +::: + + +## 暴露 PostgreSQL 服务 + +外部服务地址允许公网访问 PostgreSQL,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云服务成本 | 安全性 | +|---------------|------------------|------------|-------------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试环境 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 对外暴露 PostgreSQL 服务,创建一个 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: pg-cluster + expose: + - componentName: postgresql + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 有效选项为 'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + # 如果 ServiceType 是 LoadBalancer,包含云服务商相关参数 + # 以下是 AWS EKS 的示例配置 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + # 指定服务目标角色 + # 如果指定,服务将仅暴露给具有匹配角色的 Pod + roleSelector: primary + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops pg-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-expose-enable-ops Expose pg-cluster Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分添加 LoadBalancer 服务: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + componentSelector: postgresql + name: postgresql-internet + serviceName: postgresql-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-postgresql + port: 5432 + protocol: TCP + targetPort: tcp-postgresql + type: LoadBalancer + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + 上述 YAML 配置在 services 部分添加了一个新的外部服务。这个 LoadBalancer 服务包含了 AWS 网络负载均衡器 (NLB) 的注解。 + + :::note + 云服务商注解 + + 使用 LoadBalancer 服务时,必须包含特定云服务商的注解。以下是不同云服务商的常用注解: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为 "false" 表示面向互联网的负载均衡器 + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为 "false" 表示面向互联网的负载均衡器 + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制负载均衡器仅内部 VPC 访问。默认不指定时为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网负载均衡器的优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为 "intranet" 表示内部负载均衡器 + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制负载均衡器是内部还是面向互联网。注意该注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为 "true" 表示使用内部 VPC IP + ``` + 如果在服务创建后将该注解从 "false" 改为 "true",注解可能在服务对象中更新,但负载均衡器仍会保留其公网 IP。 + + 要正确修改此行为: + - 首先删除现有的负载均衡器服务 + - 使用更新后的注解重新创建服务 (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新的负载均衡器配置正确的内部或外部 IP + ::: + + + 使用以下命令等待集群状态变为 Running: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=pg-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +pg-cluster-postgresql-postgresql ClusterIP 10.96.19.237 5432/TCP,6432/TCP 33m +pg-cluster-postgresql-internet LoadBalancer 172.20.60.24 5432:31243/TCP 1m +``` + +### 等待 DNS 解析 + +负载均衡器 DNS 名称可能需要 2-5 分钟才能解析。验证解析状态: + +```bash +nslookup # 将 替换为实际输出中的 IP +``` + +## 外部连接 PostgreSQL + +### 获取凭据 + +KubeBlocks 会自动创建一个包含 PostgreSQL postgres 凭据的 Secret。获取 PostgreSQL postgres 凭据的方法如下: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +### 使用 PostgreSQL 客户端连接 + +现在你可以从外部(例如你的笔记本电脑或 EC2 实例)连接到 PostgreSQL 数据库: +```bash +psql -h -U${NAME} -W +``` + +## 禁用外部访问 + + + + + + 方法一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: pg-cluster + expose: + - componentName: postgresql + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops pg-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-expose-disable-ops Expose pg-cluster Succeed 1/1 12s + ``` + + + + + + 方法二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster pg-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster pg-cluster -n demo -w + ``` + + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + pg-cluster postgresql Delete Running 23m + ``` + + + +### 验证服务移除 + +确保 'pg-cluster-postgresql-internet' 服务已被移除: + +```bash +kubectl get service -n demo +``` + +预期结果:'pg-cluster-postgresql-internet' 服务应被移除。 + +## 暴露 PgBouncer 服务 + +PostgreSQL 采用多进程架构,每个连接都会创建一个独立的后端进程。过多的连接会消耗大量内存,降低数据库的吞吐量和稳定性。KubeBlocks 通过 PgBouncer(PostgreSQL 集群的连接池组件)来解决这一问题。 + +要暴露 PgBouncer 服务,只需按以下方式更新端口信息为 pgbouncer 的配置: + +```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" + componentSelector: postgresql + name: postgresql-internet + serviceName: postgresql-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-pgbouncer + port: 6432 + protocol: TCP + targetPort: tcp-pgbouncer + type: LoadBalancer + componentSpecs: + - name: postgresql +... +``` + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除 PostgreSQL 集群及其所在的命名空间: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 将 PostgreSQL 服务暴露至外部或内部网络 +- 通过云服务商特定注解配置负载均衡器(LoadBalancer)服务 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问,实现服务的启用或禁用 + +KubeBlocks 为 Kubernetes 环境中的 PostgreSQL 服务管理提供了灵活且简化的解决方案。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..ae8ab710 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,308 @@ +--- +description: 了解如何以最短停机时间部署和升级由KubeBlocks管理的PostgreSQL集群。 +hidden: true +keywords: +- KubeBlocks +- PostgreSQL +- Upgrade +- Rolling Upgrade +- Kubernetes +sidebar_label: 次版本升级 +sidebar_position: 6 +title: 在KubeBlocks中升级PostgreSQL集群的次版本 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在 KubeBlocks 中升级 PostgreSQL 集群的次版本 + +本指南将带您完成由 KubeBlocks 管理的 PostgreSQL 集群的部署及次版本升级过程,确保升级期间实现最小化停机时间。 + +为了将对数据库可用性的影响降至最低,升级流程会优先从副本(从节点实例)开始。待所有副本升级完成后,系统会执行主从切换操作,将其中一个已升级的副本提升为主节点。切换过程非常迅速,通常能在几百毫秒内完成。切换完成后,原主节点实例将进行升级,从而确保对应用程序的干扰最小化。 + +## 前提条件 + +在开始之前,请确保满足以下要求: + +- **环境准备**: + - 已部署并运行一个 Kubernetes 集群。 + - 已配置 `kubectl` CLI 工具,能够与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。具体安装步骤请参考链接指引。 + +- **命名空间准备**:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## 部署 PostgreSQL 复制集群 + +KubeBlocks 采用声明式方式管理 PostgreSQL 集群。以下是一个部署包含 2 个副本(1 个主节点,1 个从节点)的 PostgreSQL 集群配置示例。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 14.7.2 # 此处使用 14.7.2 版本用于测试次版本升级 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +说明: +1. 配置中明确指定了集群拓扑为复制模式(replication) +2. 通过 replicas: 2 设置了一个主节点和一个从节点的复制集群 +3. 为测试次版本升级功能,特别指定了 PostgreSQL 14.7.2 版本 +4. 资源配置部分定义了每个 Pod 的 CPU 和内存限制 +5. 通过 volumeClaimTemplates 声明了 20Gi 的持久卷存储 + +## 验证部署 +监控集群状态直至其转为运行状态(Running): +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +当集群状态显示为 Running 时,表示您的 PostgreSQL 集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能进入运行状态。 + +::: + + +## 列出所有可用的 PostgreSQL 版本 + +使用以下命令可查看当前 KubeBlocks 安装支持的 PostgreSQL 版本列表: +```bash +kubectl get cmpv postgresql +``` +预期输出示例: +```bash +NAME VERSIONS STATUS AGE +postgresql 16.4.0,15.7.0,14.8.0,14.7.2,12.15.0,12.14.1,12.14.0 Available 33d +``` + +注意:实际支持的版本列表可能因您使用的 KubeBlocks 版本不同而有所差异。 + +## PostgreSQL 版本升级指南 + +### 识别当前主从实例 + +执行以下命令查看集群实例的角色分布: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 66m primary +pg-cluster-postgresql-1 4/4 Running 0 65m secondary +``` + +### 检查同组件定义的兼容版本 + +**步骤1.** 获取与指定`ComponentVersion`关联的`ComponentDefinition`列表 + +```bash +kubectl get cmpv postgresql -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +postgresql-12-1.0.0 +postgresql-14-1.0.0 +postgresql-15-1.0.0 +postgresql-16-1.0.0 +``` +
+ +**步骤2.** 获取与指定`ComponentDefinition`关联的兼容版本 + +```bash +kubectl get cmpv postgresql -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("postgresql-14"))) | .releases[]' +``` + +该命令返回与名为`postgresql-14`的`ComponentDefinition`兼容的版本: + +
+示例输出 +```text +14.7.2 +14.8.0 +``` +
+ +### 执行版本升级 + +**预期工作流程**: +1. 从节点副本优先升级(逐个进行) +2. 待从节点健康后最后升级主节点 +3. 集群状态从`Updating`转变为`Running` + +通过修改Cluster资源中的serviceVersion字段实现版本升级。本例将PostgreSQL从`14.7.2`升级至`14.8.0` + + + + + + 方案一:使用运维请求(OpsRequest) + + 可通过创建OpsRequest执行升级: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-upgrade + namespace: demo + spec: + # 指定目标Cluster资源名称 + clusterName: pg-cluster + type: Upgrade + upgrade: + components: + - componentName: postgresql + # 指定组件目标服务版本 + serviceVersion: "14.8.0" + ``` + + + + 方案二:使用声明式Cluster API + + 也可通过修改集群配置中的`spec.componentSpecs.serviceVersion`字段实现: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 14.8.0 # 设置为14.8.0进行升级 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +### 监控升级过程 +升级过程中观察Pod状态变化: +```bash +kubectl get pods -n demo -w +``` +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 97s +pg-cluster-postgresql-1 4/4 Running 0 50s +pg-cluster-postgresql-1 3/4 Running 2 (2s ago) 68s +pg-cluster-postgresql-0 4/4 Running 2 (6s ago) 2m6s +``` +**关键观察点**: +- 从节点副本('pg-cluster-postgresql-1')优先升级 +- 发生主从切换操作,原从节点成为新主节点 +- 最终原主节点('pg-cluster-postgresql-0')完成升级 + +升级完成后角色发生切换: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` +更新后的角色分布: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 2m secondary +pg-cluster-postgresql-1 4/4 Running 0 2m primary +``` + +## 验证 + +### 检查集群状态 +确保集群处于 Running(运行中)状态: +```bash +kubectl get cluster pg-cluster -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Running 17m +``` + +### 验证 PostgreSQL 版本 +获取 PostgreSQL 的 postgres 账户凭据: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +连接到升级后的实例并验证 PostgreSQL 版本: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-1 -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -c "SELECT VERSION();" +``` + +## 总结 +在本指南中,您学习了如何: +- 使用 KubeBlocks 部署 PostgreSQL 复制集群 +- 以最小停机时间执行 PostgreSQL 次版本的滚动升级 +- 验证升级是否成功 + +这种滚动升级策略通过先升级副本、执行主从切换,再升级原主节点的方式,确保了高可用性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx new file mode 100644 index 00000000..784d9dff --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/07-modify-parameters.mdx @@ -0,0 +1,263 @@ +--- +description: 了解如何通过Reconfiguring OpsRequest在KubeBlocks中修改PostgreSQL的动态与静态参数,以优化数据库性能及可用性。 +keywords: +- PostgreSQL +- KubeBlocks +- OpsRequest +- dynamic parameters +- static parameters +- database configuration +sidebar_label: 修改 PostgreSQL 参数 +sidebar_position: 7 +title: 修改 PostgreSQL 参数 +--- +# 修改 PostgreSQL 参数 + +数据库重新配置涉及修改参数、设置或配置以优化性能、安全性或可用性。参数变更分为两类: + +| 类型 | 需重启 | 生效范围 | 示例参数 | +|------|------------------|-------|--------------------| +| **动态参数** | 否 | 立即生效 | `max_connections` | +| **静态参数** | 是 | 重启后生效 | `shared_buffers` | + +对于静态参数,KubeBlocks 通过以下方式最小化停机时间: +1. 先修改并重启副本节点 +2. 执行主从切换,将更新后的副本提升为主节点(通常毫秒级完成) +3. 重启原主节点 + +本指南演示如何使用 Reconfiguring OpsRequest 修改 KubeBlocks 管理的 PostgreSQL 集群的动态和静态参数。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 检查参数值 + +### 获取凭据 +KubeBlocks 会自动创建包含 PostgreSQL postgres 凭据的 Secret。通过以下命令获取凭据: +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +### 访问 PostgreSQL 集群 +使用 PostgreSQL 客户端连接集群主节点: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql +``` + +### 查询参数值 + +连接后,可以查询 'max_connections' 和 'shared_buffers' 的当前值: +```sql +postgres=# SHOW max_connections; + max_connections +----------------- + 56 +(1 row) + +postgres=# show pgaudit.log; + pgaudit.log +------------- + ddl,read,write +(1 row) + +postgres=# show shared_buffers; + shared_buffers +---------------- + 128MB +(1 row) +``` + +## 动态参数示例:修改 max_connections 和 pgaudit.log + +像 `max_connections` 这样的动态参数无需重启 PostgreSQL 即可修改。变更会立即生效,允许您: +- 动态调整连接数限制 +- 修改审计日志级别 +- 调优性能参数 + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-reconfigure-dynamic + namespace: demo +spec: + clusterName: pg-clusters + reconfigures: + - componentName: postgresql + parameters: + - key: max_connections + value: '100' + - key: pgaudit.log + value: ddl + type: Reconfiguring +``` + +此配置: +- 将 `pgaudit.log` 从默认的 `ddl,read,write` 改为仅记录 `ddl` +- 将 `max_connections` 从 56 增加到 100 + +`pgaudit.log` 参数控制审计日志粒度。可用选项: + +| 值 | 描述 | +|----------|-------------| +| none | 不执行额外日志记录 | +| ddl | 记录所有数据定义语言(DDL)语句| +| dml | 记录所有数据操作语言(DML)语句 +| role | 记录所有角色相关命令 | +| read | 记录所有读操作| +| write | 记录所有写操作| +| function | 记录所有函数调用| +| misc | 记录杂项命令| +| all | 记录所有操作| + + +等待 OpsRequest 完成: +```bash +kubectl get ops pg-reconfigure-dynamic -n demo -w +``` + +示例输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-reconfigure-dynamic Reconfiguring pg-cluster Running -/- 11s +pg-reconfigure-dynamic Reconfiguring pg-cluster Succeed -/- 31s +``` + +**验证配置变更** + +登录 PostgreSQL 实例确认 `max_connections` 和 `pgaudit.log` 参数已更新: + +```sql +postgres=# show max_connections; + max_connections +----------------- + 100 +(1 row) + +postgres=# show pgaudit.log; + pgaudit.log +------------- + ddl +(1 row) +``` + +输出验证了两个参数均已更新: +- `max_connections` 增加到 100 +- `pgaudit.log` 缩减为仅记录 DDL + +## 静态参数示例:修改 shared_buffers + +像 `shared_buffers` 这样的静态参数需要重启。本示例将缓冲区从 128MB 增加到 256MB。 + +创建 Reconfigure OpsRequest。应用以下 OpsRequest YAML 来更新 'shared_buffers': + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: postgresql-reconfigure-static + namespace: demo +spec: + clusterName: pg-cluster + force: false + reconfigures: + - componentName: postgresql + parameters: + - key: shared_buffers + value: '256MB' + preConditionDeadlineSeconds: 0 + type: Reconfiguring +``` + +检查 OpsRequest 状态直至完成: + +```bash +kubectl get ops postgresql-reconfigure-static -n demo -w +``` +示例输出: +```bash +postgresql-reconfigure-static Reconfiguring pg-cluster Running -/- 5s +postgresql-reconfigure-static Reconfiguring pg-cluster Succeed -/- 31s +``` + +**验证配置变更** + +登录 PostgreSQL 实例确认 `shared_buffers` 参数已更新: + +```sql +postgres=# show shared_buffers; + shared_buffers +---------------- + 256MB +(1 row) +``` + +## 重新配置的有效性检查 + +KubeBlocks 在应用变更前会验证参数。例如,`max_connections` 遵循以下规则: + +```cue +max_connections?: int & >=6 & <=8388607 +``` +这意味着 `max_connections` 必须是 6 到 8388607 之间的整数。 + +如果您尝试为此参数设置字符串值: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: postgresql-reconfigure-invalid + namespace: demo +spec: + type: Reconfiguring + clusterName: pg-cluster + reconfigures: + - componentName: postgresql + parameters: + - key: max_connections + value: 'abc' +``` + +通过检查 OpsRequest 状态 +```bash +kubectl get ops postgresql-reconfigure-invalid -n demo +``` + +此 OpsRequest 会快速失败。要查看详情,可以描述 `Parameter` CR: + +```bash +kubectl describe parameter postgresql-reconfigure-invalid -n demo +``` + +您将看到消息 `failed to validate updated config: [failed to parse field max_connections: [strconv.Atoi: parsing "STRING": invalid syntax]]` + +## 清理 +要删除所有创建的资源,删除 PostgreSQL 集群及其命名空间: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南介绍了通过 KubeBlocks 修改 PostgreSQL 参数: +- 动态变更(如 `max_connections`)立即生效 +- 静态变更(如 `shared_buffers`)需要重启但停机时间最短 +- 所有变更在应用前都会经过验证 +- 配置遵循声明式管理原则 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/08-switchover.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/08-switchover.mdx new file mode 100644 index 00000000..9bc1471e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/08-switchover.mdx @@ -0,0 +1,184 @@ +--- +description: 使用KubeBlocks在PostgreSQL集群中执行计划内角色切换,实现最短停机时间和可控维护 +keywords: +- PostgreSQL +- KubeBlocks +- Switchover +- High Availability +- Role Transition +- Kubernetes +sidebar_label: PostgreSQL 主从切换 +sidebar_position: 8 +title: PostgreSQL 集群切换 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# PostgreSQL 集群切换(Switchover) + +**切换(Switchover)** 是一种有计划的操作,用于将主节点角色从一个 PostgreSQL 实例转移到另一个实例。与故障转移(failover)不同,切换操作具有以下特点: +- 可控的角色转换 +- 极短停机时间(通常仅数百毫秒) +- 可预测的维护窗口 + +切换操作适用于以下场景: +- 节点维护/升级 +- 工作负载重新平衡 +- 测试高可用性 +- 有计划的基础设施变更 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 检查角色状态 +列出所有 Pod 及其角色(主节点或从节点): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +示例输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 9m59s primary +pg-cluster-postgresql-1 4/4 Running 0 11m secondary +``` + +## 执行计划内切换 + +要发起计划内切换,请创建如下 OpsRequest 资源: + + + + 选项 1:自动切换(不指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-switchover-ops + namespace: demo + spec: + clusterName: pg-cluster + type: Switchover + switchover: + - componentName: postgresql + instanceName: pg-cluster-postgresql-0 + ``` + **关键参数:** + - `instanceName`:指定切换操作前的主节点(Pod)名称 + + + + 选项 2:定向切换(指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-switchover-targeted + namespace: demo + spec: + clusterName: pg-cluster + type: Switchover + switchover: + - componentName: postgresql + # 指定需要转移角色的实例 + # 典型用法是转移共识系统中的领导者角色 + instanceName: pg-cluster-postgresql-0 + # 如果指定 candidateName,角色将转移到该实例 + # 名称必须匹配组件中的某个 Pod + # 详情请参考 ComponentDefinition 的 Switchover 生命周期操作 + candidateName: pg-cluster-postgresql-1 + ``` + + **关键参数:** + - `instanceName`:指定切换操作前的主节点(Pod)名称 + - `candidateName`:如果指定候选节点名称,角色将转移到该实例 + + + +## 监控切换过程 + +监控切换操作进度: + +```bash +kubectl get ops pg-switchover-ops -n demo -w +``` + +预期结果: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-switchover-ops Switchover pg-cluster Succeed 1/1 17s +``` + +## 验证切换结果 + +切换完成后,指定的实例将被提升为主节点,而原先的主节点将降级为从节点。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +预期输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 19m59s secondary +pg-cluster-postgresql-1 4/4 Running 0 21m primary +``` + +在本示例中: +- Pod 'pg-cluster-postgresql-1' 已被提升为主节点 +- Pod 'pg-cluster-postgresql-0' 已降级为从节点 + +## 故障排查 + +### 常见切换问题 + +如果切换操作卡住,请检查以下资源: +```bash +# 检查当前主节点和候选节点的 agent 日志 +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# 检查集群事件中的错误 +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# 检查 kubeblocks 日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +## 总结 + +本指南演示了如何: +1. 部署 PostgreSQL 高可用集群 +2. 执行自动和定向两种切换方式 +3. 验证角色转换 + +**关键要点:** +- 切换操作可实现可控维护,停机时间极短(约100-500毫秒) +- KubeBlocks 提供声明式操作实现可靠的角色转换 +- 切换后必须验证: + - 集群状态 + - 应用连接性 + - 复制健康状况 +- 排查问题时检查以下日志: + - KubeBlocks operator(位于 kb-system 命名空间) + - 数据库 Pod 上的 kbagent \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..951001ce --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,157 @@ +--- +description: 了解如何在由KubeBlocks管理的PostgreSQL集群中下线(停用)特定Pod。 +keywords: +- KubeBlocks +- PostgreSQL +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 停用 PostgreSQL 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的PostgreSQL集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 管理的 PostgreSQL 集群中的特定 Pod + +本文档介绍如何在 KubeBlocks 管理的 PostgreSQL 集群中下线(停用)特定 Pod。下线操作可在保持集群可用性的同时,实现对资源的精确控制。此功能适用于工作负载重平衡、节点维护或故障处理等场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法下线特定 Pod。StatefulSet 会确保 Pod 的顺序和身份标识,缩容时总是优先移除序号最大的 Pod(例如从 3 个副本缩容时,会先移除 `Pod-2`)。这种限制使得管理员无法精确控制要下线的 Pod,给维护工作、负载分配或故障处理带来不便。 + +KubeBlocks 突破了这一限制,允许管理员直接下线指定 Pod。这种细粒度控制既能保障高可用性,又能在不影响整个集群的前提下优化资源管理。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 下线 Pod + +**预期工作流程**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +要下线特定 Pod(例如 'pg-cluster-postgresql-1'),可采用以下任一方法: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 将 Pod 标记为下线: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: pg-cluster-decommission-ops + namespace: demo + spec: + clusterName: pg-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: postgresql + scaleIn: + onlineInstancesToOffline: + - 'pg-cluster-postgresql-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 检查下线操作执行状态: + + ```bash + kubectl get ops pg-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + pg-cluster-decommission-ops HorizontalScaling pg-cluster Succeed 1/1 33s + ``` + + + + + + 方法二:使用 Cluster API + + 也可直接更新 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: pg-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 1 + offlineInstances: + - pg-cluster-postgresql-1 # <----- 指定要下线的 Pod + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + + +### 验证下线结果 + +应用更新配置后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 6m12s +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向下线特定 Pod +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保障可用性的同时,提供了细粒度的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx new file mode 100644 index 00000000..0093dfb8 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/11-rebuild-replica.mdx @@ -0,0 +1,331 @@ +--- +description: 如何使用原地与非原地方法重建KubeBlocks管理的PostgreSQL复制集群中的副本 +keywords: +- KubeBlocks +- PostgreSQL +- Replica Rebuild +- In-Place +- Non-In-Place +sidebar_label: 恢复PostgreSQL副本 +sidebar_position: 11 +title: 在KubeBlocks中恢复PostgreSQL副本 +--- +# 在KubeBlocks中重建PostgreSQL副本 + +本指南演示如何使用原地和非原地两种方法重建副本。 + +**什么是副本重建**? + +副本重建是指从零开始或从备份重新创建PostgreSQL副本的过程,同时保持: +- **数据一致性**:确保副本拥有与主节点完全一致的数据副本 +- **高可用性**:在重建过程中最小化停机时间 + +在此过程中: +1. 识别并隔离有问题的副本 +2. 从主节点获取新的基础备份 +3. 流式传输WAL(预写日志)段以追赶进度 +4. 副本重新加入复制集群 + +**何时需要重建PostgreSQL实例**? + +在以下常见场景中需要进行重建: +- 副本落后主节点过多(不可恢复的延迟),或复制槽损坏 +- 无法自动解决的WAL文件间隙 +- 数据损坏:存储级损坏(磁盘/存储卷问题)、主从节点间数据不一致等 +- 基础设施问题:节点故障、存储设备故障或跨可用区/地域迁移 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + + + +## 连接到 PostgreSQL 主副本并写入模拟数据 + +使用以下命令检查副本角色: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=pg-cluster -L kubeblocks.io/role +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +pg-cluster-postgresql-0 4/4 Running 0 13m secondary +pg-cluster-postgresql-1 4/4 Running 0 12m primary +``` + +### 步骤 1:连接到主实例 + +KubeBlocks 会自动创建一个包含 PostgreSQL postgres 凭据的 Secret。获取 PostgreSQL postgres 凭据: + +```bash +NAME=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d` +``` + +通过服务 `pg-cluster-postgresql-postgresql` 连接到主副本,该服务会将数据路由到主副本。 + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h pg-cluster-postgresql-postgresql +``` + +### 步骤 2:向主实例写入数据 +连接到主实例并向数据库写入一条记录: + +```sql +postgrel> CREATE DATABASE test; +postgrel> \c test; +postgrel> CREATE TABLE t1 (id INT PRIMARY KEY, name VARCHAR(255)); +postgrel> INSERT INTO t1 VALUES (1, 'John Doe'); +``` + +### 步骤 3:验证数据复制 + +连接到副本实例(例如 pg-cluster-postgresql-0)以验证数据是否已复制: +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` +:::注意 +如果主实例是 'pg-cluster-postgresql-0',则应连接到 'pg-cluster-postgresql-1'。在连接前请确保检查每个实例的角色。 +::: + +```sql +postgrel> \c test; +postgrel> SELECT * FROM test.t1; +``` + +示例输出: +```bash + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +## 副本重建 + +KubeBlocks 提供两种副本重建方式:原地重建(in-place)和非原地重建(non-in-place)。 + +### 原地重建 + +**工作流程**: +1. 终止原 Pod(例如 'pg-cluster-postgresql-0') +2. 创建同名新 Pod 并分配新 PVC +3. 从主节点同步数据 + +使用以下配置进行原地重建: +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-replica-inplace + namespace: demo +spec: + clusterName: pg-cluster + force: true + preConditionDeadlineSeconds: 0 + rebuildFrom: + - componentName: postgresql + inPlace: true # 设置为原地重建模式 + instances: + - name: pg-cluster-postgresql-0 + type: RebuildInstance +``` + +配置中的 "pg-cluster-postgresql-0" 表示需要修复的实例名称(Pod 名称)。 + +监控重建操作: +```bash +kubectl get ops pg-rebuild-replica-inplace -n demo -w +``` +示例输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 0/1 46s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Running 1/1 46s +pg-rebuild-replica-inplace RebuildInstance pg-cluster Succeed 1/1 47s +``` + +验证 Pod 状态,确认副本("pg-cluster-postgresql-0")及其 PVC 和 PV 已重建: +```bash +kubectl get po,pvc,pv -l app.kubernetes.io/instance=pg-cluster -ndemo +``` +示例输出: +```bash +kubectl get po,pvc,pv -l app.kubernetes.io/instance=pg-cluster -ndemo +NAME READY STATUS RESTARTS AGE +pod/pg-cluster-postgresql-0 4/4 Running 0 5m6s +pod/pg-cluster-postgresql-1 4/4 Running 0 14m + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +persistentvolumeclaim/data-pg-cluster-postgresql-0 Bound pvc-xxx 20Gi RWO 5m6s +persistentvolumeclaim/data-pg-cluster-postgresql-1 Bound pvc-yyy 20Gi RWO 14m +``` + +连接到副本并验证数据恢复情况: + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-0 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` + +```sql +postgrel> \c test; +postgrel> select * from t1; + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +### 非原地重建 + +**工作流程**: +1. 创建新 Pod(例如 'pg-cluster-postgresql-2') +2. 从主节点同步数据 +3. 新副本就绪后终止原 Pod + +通过创建新实例进行重建: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-replica-non-inplace + namespace: demo +spec: + clusterName: pg-cluster + force: true + preConditionDeadlineSeconds: 0 + rebuildFrom: + - componentName: postgresql + inPlace: false + instances: + - name: pg-cluster-postgresql-0 + type: RebuildInstance +``` + +配置中的 "pg-cluster-postgresql-0" 表示需要修复的实例名称(Pod 名称)。 + +监控重建操作: +```bash +kubectl get ops pg-rebuild-replica-inplace -n demo -w +``` +示例输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 5s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 0/1 46s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Running 1/1 46s +pg-rebuild-replica-non-inplace RebuildInstance pg-cluster Succeed 1/1 47s +``` + +```bash +kubectl get pods -l app.kubernetes.io/instance=pg-cluster -n demo -w +NAME READY STATUS RESTARTS AGE +pg-cluster-postgresql-0 4/4 Running 0 53m +pg-cluster-postgresql-1 4/4 Running 0 2m52s +pg-cluster-postgresql-2 0/4 Pending 0 0s +pg-cluster-postgresql-2 0/4 Pending 0 4s +pg-cluster-postgresql-2 0/4 Init:0/4 0 4s +pg-cluster-postgresql-2 0/4 Init:1/4 0 5s +pg-cluster-postgresql-2 0/4 Init:2/4 0 6s +pg-cluster-postgresql-2 0/4 Init:3/4 0 7s +pg-cluster-postgresql-2 0/4 PodInitializing 0 8s +pg-cluster-postgresql-2 2/4 Running 0 9s +pg-cluster-postgresql-2 2/4 Running 0 12s +pg-cluster-postgresql-2 2/4 Running 0 14s +pg-cluster-postgresql-2 3/4 Running 0 14s +pg-cluster-postgresql-2 3/4 Running 0 16s +pg-cluster-postgresql-2 4/4 Running 0 3m30s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +pg-cluster-postgresql-0 4/4 Terminating 0 4m3s +``` + +连接到新副本实例('pg-cluster-postgresql-2')并验证数据: + +```bash +kubectl exec -ti -n demo pg-cluster-postgresql-2 -- env PGUSER=${NAME} PGPASSWORD=${PASSWD} psql -h 127.0.0.1 +``` + +```sql +postgrel> \c test; +postgrel> select * from t1; + id | name +----+---------- + 1 | John Doe +(1 row) +``` + +### 从备份重建 + +以下配置展示通过指定 `backupName` 从已知备份恢复故障副本: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-from-backup + namespace: demo +spec: + clusterName: pg-cluster + force: true + rebuildFrom: + - backupName: + componentName: postgresql + inPlace: true + instances: + - name: pg-cluster-postgresql-1 + type: RebuildInstance +``` + +### 指定节点重建 + +如需将新副本重建到特定节点,可使用 `targetNodeName` 参数: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-rebuild-from-backup + namespace: demo +spec: + clusterName: pg-cluster + force: true + rebuildFrom: + - backupName: + componentName: postgresql + inPlace: true + instances: + - name: pg-cluster-postgresql-1 + targetNodeName: # 新 Pod 将被调度到指定节点 + type: RebuildInstance +``` + +## 总结 +关键要点: +- 原地重建:成功重建副本并恢复已删除数据。 +- 非原地重建:创建了新的副本实例并成功恢复数据。 + +两种方法均能有效恢复副本并确保数据一致性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..e0d9c6cf --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,166 @@ +--- +description: 了解如何创建并配置一个使用S3存储桶保存备份数据的KubeBlocks BackupRepo。 +keywords: +- KubeBlocks +- Backup +- BackupRepo +- S3 +- Kubernetes +sidebar_label: 创建备份仓库 +sidebar_position: 1 +title: 为KubeBlocks创建备份存储库 +--- +# 为 KubeBlocks 创建备份仓库(BackupRepo) + +本指南将引导您通过使用 S3 存储桶创建和配置 KubeBlocks 的备份仓库(BackupRepo)来存储备份数据。 + +## 前提条件 +- 已配置具有创建 S3 存储桶权限的 AWS CLI +- 拥有 Kubernetes 集群的 kubectl 访问权限 +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)并在 kb-system 命名空间中运行 + +## 步骤 1:创建 S3 存储桶 + +使用 AWS CLI 在目标区域创建 S3 存储桶。将 `` 替换为您所需的 AWS 区域(例如 `us-east-1`、`ap-southeast-1`)。 + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +示例(us-west-1 区域): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +示例输出: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +验证: +通过列出存储桶内容确认创建成功(初始应为空): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## 步骤 2:为 AWS 凭证创建 Kubernetes Secret + +将您的 AWS 凭证安全地存储在 Kubernetes Secret 中。将 `` 和 `` 替换为实际的 AWS 凭证: + +```bash +# 创建 secret 保存访问密钥 +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## 步骤 3:配置备份仓库 + +BackupRepo 是用于定义备份存储仓库的自定义资源。本步骤将通过创建 BackupRepo 资源将您的 S3 存储桶与 KubeBlocks 集成。 + +应用以下 YAML 创建 BackupRepo。请替换字段值(如存储桶名称、区域)为您的具体配置。 + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # 将此备份仓库标记为默认仓库 + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # 当前 KubeBlocks 支持配置多种对象存储服务作为备份仓库 + # - s3 (Amazon Simple Storage Service) + # - oss (阿里云对象存储服务) + # - cos (腾讯云对象存储) + # - gcs (Google 云存储) + # - obs (华为云对象存储) + # - minio 及其他 S3 兼容服务 + storageProviderRef: s3 + # 指定备份仓库的访问方式 + # - Tool + # - Mount + accessMethod: Tool + # 指定此备份仓库创建的 PV 回收策略 + pvReclaimPolicy: Retain + # 指定此备份仓库创建的 PVC 容量 + volumeCapacity: 100Gi + # 存储 StorageProvider 的非敏感配置参数 + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # 引用存储 StorageProvider 凭证的 secret + credential: + # name 是在命名空间内引用 secret 资源的唯一标识 + name: s3-credential-for-backuprepo + # namespace 定义了 secret 名称必须唯一的空间范围 + namespace: kb-system +``` + +:::note + +注解 `dataprotection.kubeblocks.io/is-default-repo: 'true'` 将此 BackupRepo 标记为默认仓库。创建备份时,如果未指定特定 BackupRepo,KubeBlocks 将使用标记为默认的仓库。 + +::: + +## 步骤 4:验证备份仓库状态 + +检查 BackupRepo 状态以确保其正确初始化: + +```bash +kubectl get backuprepo s3-repo -w +``` + +预期状态变化: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +**故障排除** + +创建新 BackupRepo 时,KubeBlocks 会运行预检查作业来测试连接和读写能力。如果 BackupRepo 状态显示 `Failed`,请按照以下步骤排查: + +1. 检查 BackupRepo 状态和错误详情: + ```bash + kubectl describe backuprepo + ``` + +2. 验证您的配置: + - 确认存储桶名称和区域与 S3 设置匹配 + - 再次检查 Secret 中的 AWS 凭证是否有效 + - 确保 KubeBlocks 与 AWS S3 之间存在网络连接 + +预检查作业必须成功完成,BackupRepo 才能变为 `Ready` 状态可供使用。 + +## 如何为其他存储提供商配置 BackupRepo + +KubeBlocks 支持以下存储提供商作为备份仓库: + +| 存储提供商 | 描述 | +|-----------------|-------------| +| OSS | 阿里云对象存储服务 | +| S3 | Amazon 简单存储服务 | +| COS | 腾讯云对象存储 | +| GCS | Google 云存储 | +| OBS | 华为云对象存储 | +| MinIO | 自托管对象存储 | +| S3-compatible | 其他 S3 兼容存储服务 | + +获取已安装的 `StorageProvider` 完整列表: +```bash +kubectl get storageproviders.dataprotection.kubeblocks.io +``` + +有关为其他存储提供商配置 BackupRepo 的详细说明,请参阅[备份仓库介绍](../../user_docs/backup-restore/backuprepo)。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..97072adc --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,254 @@ +--- +description: KubeBlocks 中通过 Backup API 和 OpsRequest API 创建并验证 PostgreSQL 集群完整备份的逐步指南 +keywords: +- PostgreSQL +- Full Backup +- KubeBlocks +- Kubernetes +- Database Backup +- XtraBackup +sidebar_label: 创建完整备份 +sidebar_position: 2 +title: 在KubeBlocks上为PostgreSQL集群创建完整备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在KubeBlocks上为PostgreSQL创建全量备份 + +本指南演示如何通过以下两种方式为KubeBlocks上的PostgreSQL集群创建和验证全量备份(使用`pg-basebackup`方法): +- Backup API(直接备份操作) +- OpsRequest API(带增强监控的托管备份操作) + +我们将在[从全量备份恢复](./05-restoring-from-full-backup)指南中介绍如何从备份恢复数据。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署PostgreSQL集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 备份前提条件 + +创建备份前请确保: +1. 备份仓库已配置: + - 存在`BackupRepo`资源 + - 集群与仓库之间的网络连通性 + - `BackupRepo`状态显示"Ready" + +2. 集群准备就绪: + - 集群状态为"Running" + - 没有正在进行的操作(扩缩容、升级等) + +## 识别备份配置 + +检查可用的备份策略和计划: + +```bash +# 列出备份策略 +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=pg-cluster + +# 列出备份计划 +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +预期输出: +```bash +NAME BACKUP-REPO STATUS AGE +pg-cluster-postgresql-backup-policy Available 58m + +NAME STATUS AGE +pg-cluster-postgresql-backup-schedule Available 60m +``` + +查看BackupPolicy CR 'pg-cluster-postgresql-backup-policy'中支持的备份方法: + +```bash +kubectl get backuppolicy pg-cluster-postgresql-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` + +示例输出: +```bash +pg-basebackup +wal-g +wal-g-incremental +archive-wal +wal-g-archive +``` + + +**备份方法列表** + +KubeBlocks PostgreSQL支持以下备份方法: + +| 功能 | 方法 | 描述 | +|-------------------|-----------------|-------------| +| 全量备份 | pg-basebackup | 使用PostgreSQL工具`pg_basebackup`创建基础备份 | +| 全量备份 | wal-g | 使用`wal-g`创建全量备份(需要WAL-G配置) | +| 持续备份 | postgresql-pitr | 定期将PostgreSQL预写日志(WAL)文件上传到备份仓库,通常与`pg-basebackup`配合使用| +| 持续备份 | wal-g-archive | 定期将PostgreSQL预写日志(WAL)文件上传到备份仓库,通常与`wal-g`配合使用| + +## 通过Backup API备份 + +### 1. 创建按需备份 + +`pg-basebackup`方法使用PostgreSQL原生工具`pg_basebackup`。 + +应用以下清单创建备份: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: pg-cluster-pg-basebackup + namespace: demo +spec: + backupMethod: pg-basebackup + backupPolicyName: pg-cluster-postgresql-backup-policy + # 决定当备份自定义资源(CR)被删除时,备份仓库中的备份内容是否应被删除。 + # 支持的值为`Retain`和`Delete`。 + # - `Retain`表示保留备份内容及其在备份仓库中的物理快照。 + # - `Delete`表示删除备份内容及其在备份仓库中的物理快照。 + deletionPolicy: Delete +``` + +### 2. 监控备份并验证完成 + +跟踪进度直到状态显示"Completed": + +```bash +kubectl get backup pg-cluster-pg-basebackup -n demo -w +``` + +示例输出: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +pg-cluster-pg-basebackup pg-cluster-postgresql-backup-policy pg-basebackup Completed 4722262 10s Delete 2025-05-16T02:53:45Z 2025-05-16T02:53:55Z +``` + +### 3. 验证备份 + +通过以下方式确认成功完成: +- 备份状态显示"Completed" +- 备份大小符合预期 +- 检查BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 备份文件大小 + + +## 通过OpsRequest API备份 + +### 1. 创建按需备份 + +使用OpsRequest API执行'pg-basebackup'方法备份: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pg-cluster-backup + namespace: demo +spec: + clusterName: pg-cluster + force: false + backup: + backupPolicyName: pg-cluster-postgresql-backup-policy + backupMethod: pg-basebackup + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. 监控备份进度 + +#### 1. 监控操作状态 + +实时跟踪备份进度: +```bash +kubectl get ops pg-cluster-backup -n demo -w +``` + +预期输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +pg-cluster-backup Backup pg-cluster Succeed -/- 35s +``` + +- STATUS为'Succeed'表示备份操作成功完成。 + +#### 2. 验证完成 + +检查最终备份状态: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=pg-cluster-backup +``` + +示例输出: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-pg-cluster-20250516025810 pg-cluster-postgresql-backup-policy pg-basebackup Completed 4725590 10s Delete 2025-05-16T02:58:10Z 2025-05-16T02:58:20Z 2025-06-15T02:58:20Z +``` +- 备份状态应显示'Completed'。 + +### 3. 验证备份 + +通过以下方式确认成功完成: +- 备份状态显示"Completed" +- 备份大小符合预期 +- BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 其他元数据 + +## 故障排除 + +当遇到备份问题时,例如备份状态为`Failed`或长时间卡在`Running`状态,请按照以下步骤诊断和解决问题: + +1. 检查Backup资源中的错误事件或状态更新: + ```bash + kubectl describe backup -n demo + ``` + +2. 验证备份作业状态并检查其日志: + ```bash + kubectl -n demo get job -l app.kubernetes.io/instance=pg-cluster,app.kubernetes.io/managed-by=kubeblocks-dataprotection + ``` + 并检查Pod日志: + ```bash + kubectl -n demo logs + ``` +3. 查看KubeBlocks控制器日志获取详细错误信息: + ```bash + kubectl -n kb-system logs deploy/kubeblocks -f + ``` + +## 总结 + +本指南涵盖: +1. 部署PostgreSQL复制集群 +2. 使用以下方式创建全量备份: + - 直接Backup API + - 托管OpsRequest API +3. 监控和验证备份 + +您的PostgreSQL数据现已安全备份,可在需要时进行恢复。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..a30d9be2 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,153 @@ +--- +description: 了解如何使用KubeBlocks部署PostgreSQL集群,并配置在S3存储库中保留的自动化定时备份。 +keywords: +- PostgreSQL +- Backup +- KubeBlocks +- Scheduled Backup +- Kubernetes +sidebar_label: 定时备份 +sidebar_position: 3 +title: 在KubeBlocks中设置带定时备份的PostgreSQL集群 +--- +# 在 KubeBlocks 中设置带定时备份的 PostgreSQL 集群 + +本指南演示如何使用 KubeBlocks 部署 PostgreSQL 集群,并配置保留到 S3 存储库的定时备份。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 备份前提条件 + +1. 已配置备份存储库: + - 配置好 `BackupRepo` + - 集群与存储库之间网络连通,`BackupRepo` 状态为 `Ready` + +2. 集群运行正常: + - 集群必须处于 `Running` 状态 + - 没有正在进行的操作(扩缩容、升级等) + +## 配置定时备份 + +KubeBlocks 在创建集群时会自动创建 `BackupSchedule` 资源。按照以下步骤启用和配置定时备份: + +1. 验证默认备份计划配置: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: pg-cluster-postgresql-backup-policy + schedules: + - backupMethod: pg-basebackup + # ┌───────────── 分钟 (0-59) + # │ ┌───────────── 小时 (0-23) + # │ │ ┌───────────── 月份中的天 (1-31) + # │ │ │ ┌───────────── 月 (1-12) + # │ │ │ │ ┌───────────── 星期中的天 (0-6) (0=周日) + # │ │ │ │ │ + # 0 18 * * * + # 每天18:00(UTC时间)执行此任务 + cronExpression: 0 18 * * * # 根据需要更新cron表达式 + enabled: false # 设置为`true`以定期执行基础备份 + retentionPeriod: 7d # 根据需要设置保留期限 +``` + +2. 启用并自定义备份计划: +```bash +kubectl edit backupschedule pg-cluster-postgresql-backup-schedule -n demo +``` + +更新以下关键参数: +- `enabled`:设置为 `true` 以激活定时备份 +- `cronExpression`:使用 cron 语法配置备份频率 +- `retentionPeriod`:设置备份保留时长(如 `7d`、`1mo`) + +每日 UTC 时间 18:00 备份并保留 7 天的示例配置: +```yaml +schedules: +- backupMethod: pg-basebackup + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. 验证计划配置: +```bash +# 检查计划状态 +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -w + +# 查看详细配置 +kubectl describe backupschedule pg-cluster-postgresql-backup-schedule -n demo +``` + +## 监控和管理备份 + +启用定时备份后,监控其执行情况并管理备份保留: + +1. 查看所有备份: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=pg-cluster +``` + +2. 检查备份详情: +```bash +kubectl describe backup -n demo +``` + +3. 验证备份文件: +- 状态应显示为 "Completed" +- 检查备份大小是否符合预期 +- 确认保留期限已应用 +- 验证存储库中存在备份文件 + +4. 管理备份保留: +- 手动删除旧备份: +```bash +kubectl delete backup -n demo +``` +- 修改保留期限: +```bash +kubectl edit backupschedule pg-cluster-postgresql-backup-schedule -n demo +``` + +## 清理 +要删除所有创建的资源,删除 PostgreSQL 集群及其命名空间: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南演示了: +1. PostgreSQL 自动备份配置 +2. 使用 cron 语法自定义计划 +3. 保留策略管理 +4. 备份验证流程 + +您的 PostgreSQL 集群现在具备: +- 定期自动备份 +- 可配置的保留策略 +- 完整的备份历史记录跟踪 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..75b29996 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,329 @@ +--- +description: 了解如何在KubeBlocks中设置PostgreSQL集群,并启用定时全量备份与持续增量备份功能。 +keywords: +- PostgreSQL +- Backup +- PITR +- KubeBlocks +- Kubernetes +sidebar_label: 定时持续备份 +sidebar_position: 4 +title: 在KubeBlocks中设置支持定时持续备份的PostgreSQL集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在 KubeBlocks 中配置启用定时持续备份的 PostgreSQL 集群 + +本指南将演示如何在 KubeBlocks 上配置 PostgreSQL 集群,实现以下功能: + +- 定时全量备份(基础备份) +- 持续 WAL(预写式日志)归档 +- 时间点恢复(PITR)能力 + +这种组合方案能以最小的恢复点目标(RPO)提供全面的数据保护。 + +## 什么是 PITR? +时间点恢复(Point-In-Time Recovery,简称 PITR)是一种通过结合全量备份与连续的二进制日志(binlog)/预写式日志(WAL)/归档日志(archive log)备份,将数据库恢复到特定时间点的技术。 + +关于如何从全量备份和连续二进制日志备份中恢复数据的详细操作,请参阅[从 PITR 恢复](restore-with-pitr.mdx)指南。 + +## 前提条件 + +在开始之前,请确保满足以下要求: + +- **环境准备**: + - 已部署并运行一个 Kubernetes 集群。 + - 已配置 `kubectl` CLI 工具,确保其可与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。具体安装步骤请参考链接中的说明。 + +- **命名空间准备**:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## 备份前提条件 + +1. 已配置备份仓库: + - 已配置 `BackupRepo` + - 集群与仓库间网络连通,且 `BackupRepo` 状态为 `Ready` + +2. 集群处于运行状态: + - 集群必须处于 `Running` 状态 + - 无正在进行的操作(如扩缩容、升级等) + +## 备份方法列表 + +KubeBlocks PostgreSQL 支持以下备份方法: + +| 功能特性 | 方法 | 描述说明 | +|-------------------|-------------------|----------| +| 全量备份 | pg-basebackup | 使用 PostgreSQL 工具 `pg_basebackup` 创建基础备份 | +| 全量备份 | wal-g | 使用 `wal-g` 工具创建全量备份(需配置 WAL-G) | +| 持续备份 | postgresql-pitr | 定期将 PostgreSQL 预写式日志(WAL)文件上传至备份仓库 | +| 持续备份 | wal-g-archive | 定期将 PostgreSQL 预写式日志(WAL)文件上传至备份仓库 | + +## 使用备份API部署PostgreSQL集群 + +部署一个2节点的PostgreSQL复制集群(1个主节点,1个从节点)并指定备份配置。 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + backup: + retentionPeriod: 7d + # 全量备份配置 + method: pg-basebackup # 全量备份方法名称 + enabled: true + cronExpression: 0 18 * * * # 全量备份调度时间 + # 持续备份配置 + continuousMethod: archive-wal # 持续备份方法 + pitrEnabled: true # 是否启用持续备份 + repoName: s3-repo # 指定备份仓库,若未指定则使用标注为`default`的BackupRepo +``` + +或者可以通过补丁方式为现有集群启用定时持续备份: +```bash +kubectl patch cluster pg-cluster -n demo --type='merge' -p=' +{ + "spec": { + "backup": { + "retentionPeriod": "7d", + "method": "pg-basebackup", + "enabled": true, + "cronExpression": "0 18 * * *", + "continuousMethod": "archive-wal", + "pitrEnabled": true, + "repoName": "s3-repo" + } + } +}' +``` + +**关键配置字段说明** + +| 字段 | 取值 | 说明 | +|-------|-------|-------------| +| `backup.enabled` | `true` | 启用定时备份功能 | +| `method` | `pg-basebackup` | 使用PostgreSQL原生工具进行全量备份 | +| `cronExpression` | `0 18 * * *` | 每天UTC时间18:00执行全量备份 | +| `retentionPeriod` | `7d` | 备份保留7天 | +| `repoName` | `s3-repo` | 备份仓库名称(S3兼容存储) | +| `pitrEnabled` | `true` | 启用WAL持续归档以实现时间点恢复(PITR) | +| `continuousMethod` | `wal-g-archive` | WAL持续归档方法 | + +## 验证部署 + +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +当集群状态显示为 Running 时,表示您的 PostgreSQL 集群已准备就绪可供使用。 + +## 监控持续备份 + +通过以下命令验证持续备份的运行状态: +```bash +# 获取持续备份 +kubectl get backup -l app.kubernetes.io/instance=pg-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# 获取处理持续备份的状态化集 +kubectl get sts -l app.kubernetes.io/instance=pg-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# 获取处理持续备份的Pod +kubectl get pod -l app.kubernetes.io/instance=pg-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +其中标签说明: +- `app.kubernetes.io/instance=pg-cluster` 用于通过集群名称标识资源 +- `dataprotection.kubeblocks.io/backup-type=Continuous` 用于按备份类型(持续/全量)进行标识 + +## 验证备份配置 + +KubeBlocks 会自动创建一个 `BackupSchedule` 资源。请检查以下配置: + +```bash +kubectl get backupschedule pg-cluster-postgresql-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +... +spec: + backupPolicyName: pg-cluster-postgresql-backup-policy + schedules: + - backupMethod: pg-basebackup + cronExpression: 0 18 * * * + enabled: true # + retentionPeriod: 7d + - backupMethod: archive-wal + cronExpression: '*/5 * * * *' + enabled: true + name: archive-wal + retentionPeriod: 7d +``` + +1. **全量备份** (pg-basebackup): + - 创建完整的集群快照 + - 按配置的计划运行(默认每日执行) + - 作为时间点恢复(PITR)的基础 + +2. **持续备份** (wal-g-archive): + - 每5分钟归档一次WAL日志 + - 支持恢复到任意时间点 + - 需要全量备份作为起始点 + + +- cronExpression -> 保持原样 +- retentionPeriod -> 保留周期 +- backupMethod -> 备份方法 +- enabled -> 启用状态 +其他技术名词均采用标准译法) + +## 配置 WAL-G 实现全量备份与持续备份 + +:::note + +如需使用任何基于 wal-g 的备份方法(例如用于持续备份的 wal-g-archive 和用于全量备份的 wal-g),您需要先在集群中配置 wal-g。 + +配置 wal-g 需要预先触发一次 `config-wal-g` 备份。 +::: + +### 创建 PostgreSQL 集群 + +部署一个 2 节点的 PostgreSQL 复制集群(1 主节点,1 从节点) + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 使用 `config-wal-g` 备份方法配置 wal-g + +配置 wal-g 需要创建一个采用 `config-wal-g` 备份方法的 `Backup` 资源。 +这是一种特殊备份方法,会为集群中的每个副本创建 wal-g 配置文件。 + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: pg-cluster-config-wal-g + namespace: demo +spec: + backupMethod: config-wal-g + backupPolicyName: pg-cluster-postgresql-backup-policy + deletionPolicy: Delete +``` + +如果忘记配置 wal-g,您会看到备份任务显示为 `Failed`,且在备份 Pod 中出现以下错误: +```bash +fatal: unable to switch to directory: /home/postgres/pgdata/wal-g/env: file does not exist +``` + +### 设置定时备份 + +通过补丁方式为现有集群启用定时持续备份: + +```bash +kubectl patch cluster pg-cluster -n demo --type='merge' -p=' +{ + "spec": { + "backup": { + "retentionPeriod": "7d", # 保留期限 + "method": "wal-g", # 全量备份方法 + "enabled": true, # 启用全量备份 + "cronExpression": "0 18 * * *", # 全量备份计划 + "continuousMethod": "wal-g-archive", # 持续备份方法 + "pitrEnabled": true, # 是否启用持续备份。时间点恢复需要全量备份支持,因此需先启用全量备份 + "repoName": "s3-repo" # 指定备份仓库,若未指定则使用标注为 `default` 的 BackupRepo + } + } +}' +``` + +集群补丁完成后,您会立即看到一个采用 `wal-g-archive` 方法的持续备份任务开始运行。 +```bash +kubectl get backup -l app.kubernetes.io/instance=pg-cluster,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +## 概述 + +本指南涵盖以下内容: +1. 使用 pg-basebackup 配置定时全量备份 +2. 通过 wal-g-archive 启用持续 WAL 归档 +3. 设置时间点恢复(PITR)能力 +4. 监控备份操作 + +核心优势: +- 定时全量备份确保定期恢复点 +- 持续 WAL 归档最大限度减少潜在数据丢失 +- 时间点恢复支持恢复到任意时间点 + diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..7621753d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,132 @@ +--- +description: 了解如何通过Cluster Annotation或OpsRequest API,在KubeBlocks中从现有备份恢复一个新的PostgreSQL集群。 +keywords: +- PostgreSQL +- Restore +- Backup +- KubeBlocks +- Kubernetes +sidebar_label: 恢复 PostgreSQL 集群 +sidebar_position: 5 +title: 从备份恢复 PostgreSQL 集群 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 从备份恢复 PostgreSQL 集群 + +本指南演示在 KubeBlocks 中从备份恢复 PostgreSQL 集群的两种方法: + +1. **集群注解法** - 使用 YAML 注解的声明式简单方法 +2. **OpsRequest API 法** - 支持进度监控的高级操作控制 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 恢复准备:定位完整备份 +开始恢复前,请确保存在可用的完整备份。恢复过程将使用该备份创建新的 PostgreSQL 集群。 + +- 新集群可访问的备份仓库 +- 状态为 `Completed` 的有效完整备份 +- 充足的 CPU/内存资源 +- 足够的存储容量 + +查找可用完整备份: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=pg-cluster # 获取完整备份列表 +``` + +选择状态为 `Completed` 的任意一个备份。 + +### 步骤1:创建恢复集群 +通过恢复配置创建新集群: + +关键参数: +- `kubeblocks.io/restore-from-backup` 注解 +- 从上一步获取的备份名称和命名空间 + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-restored + namespace: demo + annotations: + # 注意:将 替换为实际备份名称 + kubeblocks.io/restore-from-backup: '{"postgresql":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + disableExporter: true + labels: + apps.kubeblocks.postgres.patroni/scope: pg-restored-postgresql + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤3:监控恢复进度 +通过以下命令跟踪恢复进度: + +```bash +# 观察恢复状态 +kubectl get restore -n demo -w + +# 观察集群状态 +kubectl get cluster -n demo -w +``` + +:::note +目前不支持通过 `kbcli` 或 `OpsRequest` 恢复 PostgreSQL 集群。 + +您可以按照上述步骤通过 `kubectl` 恢复 PostgreSQL 集群。 + +::: + + +## 清理资源 +删除所有已创建的资源,包括 PostgreSQL 集群及其命名空间: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete cluster pg-cluster-restored -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南涵盖两种恢复方法: + +1. **集群注解法** - 基于 YAML 的简单方法 + - 获取系统凭证 + - 创建带恢复注解的集群 + - 监控进度 + +2. **OpsRequest API 法** - 增强的操作控制 + - 创建恢复请求 + - 跟踪操作状态 + - 验证完成情况 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..75c614e3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,162 @@ +--- +description: 了解如何在KubeBlocks上使用完整备份和持续binlog备份实现PostgreSQL集群的时间点恢复(PITR)。 +keywords: +- PostgreSQL +- Full Backup +- PITR +- KubeBlocks +sidebar_label: 使用 PITR 恢复 +sidebar_position: 6 +title: 在KubeBlocks上通过时间点恢复(PITR)从备份还原PostgreSQL集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在KubeBlocks中使用时间点恢复(PITR)从备份恢复PostgreSQL集群 + +本指南演示如何在KubeBlocks中为PostgreSQL集群执行时间点恢复(PITR),使用以下要素: +1. 完整基础备份 +2. 连续的WAL(预写日志)备份 +3. 两种恢复方法: + - 集群注解(声明式方法) + - OpsRequest API(操作控制) + +PITR允许恢复到指定`timeRange`内的任意时间点。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 准备PITR恢复 +要执行PITR恢复,需要同时具备完整备份和连续备份。如果尚未配置,请参考相关文档进行设置。 + +- 已完成完整备份 +- 活跃的连续WAL备份 +- 可访问的备份存储库 +- 新集群的充足资源 + +可通过以下步骤识别完整备份和连续备份列表: + +### 1. 验证连续备份 +确认您有一个正在运行或已完成的连续WAL备份: + +```bash +# 每个集群应有且仅有一个连续备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=pg-cluster +``` + +### 2. 检查备份时间范围 +获取有效的恢复时间窗口: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +预期输出: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. 识别完整备份 +查找符合条件的完整备份: +- 状态:已完成 +- 完成时间晚于连续备份开始时间 + +```bash +# 应有一个或多个完整备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=pg-cluster +``` + +:::tip +KubeBlocks会自动选择最近符合条件的完整备份作为基础。 +确保存在满足以下条件的完整备份:其`stopTime`/`completionTimestamp`必须**晚于**连续备份的`startTime`,否则PITR恢复将失败。 +::: + +## 方案一:集群注解恢复 + +### 步骤1:创建恢复集群 +在集群注解中配置PITR参数: + +关键参数: +- `name`: 连续备份名称 +- `restoreTime`: 目标恢复时间(需在备份`timeRange`范围内) + +应用以下YAML配置: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-restore-pitr + namespace: demo + annotations: + # 注意:将替换为连续备份名称 + # 注意:将替换为备份时间范围内的有效时间 + kubeblocks.io/restore-from-backup: '{"postgresql":{"name":"","namespace":"demo","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: "14.7.2" + disableExporter: true + labels: + # 注意:根据实际情况更新标签 + apps.kubeblocks.postgres.patroni/scope: pg-restore-pitr-postgresql + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤2:监控恢复过程 +通过以下命令跟踪恢复进度: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +:::note +目前不支持通过`kbcli`或`OpsRequest`恢复PostgreSQL集群。 + +您可以按照上述步骤通过`kubectl`恢复PostgreSQL集群。 + +::: + +## 清理资源 +要删除所有创建的资源,请删除PostgreSQL集群及其命名空间: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete cluster pg-cluster-restore -n demo +kubectl delete ns demo +``` + +## 总结 +本指南演示了如何在KubeBlocks中使用完整备份和连续备份进行PostgreSQL集群的时间点恢复(PITR)。关键步骤包括: +- 验证可用备份 +- 提取加密的系统账户凭证 +- 创建带有恢复配置的新PostgreSQL集群 +- 监控恢复过程 + +通过此方法,您可以将PostgreSQL集群恢复到特定时间点,确保数据丢失最小化和业务连续性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/_category_.yml new file mode 100644 index 00000000..09845f2d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 5 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..821c69eb --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,142 @@ +--- +description: 了解如何在KubeBlocks上部署PostgreSQL集群,并通过Kubernetes Secret安全配置自定义root密码。 +keywords: +- PostgreSQL +- KubeBlocks +- Custom Password +- Kubernetes +- Secrets +sidebar_label: 自定义密码 +sidebar_position: 1 +title: 在KubeBlocks上创建带自定义根密码的PostgreSQL集群 +--- +# 在 KubeBlocks 上创建带自定义密码的 PostgreSQL 集群 + +本指南演示如何在 KubeBlocks 中部署 PostgreSQL 集群,并将自定义的 root 密码存储在 Kubernetes Secret 中。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 复制集群 + +KubeBlocks 采用声明式方法管理 PostgreSQL 集群。以下是一个部署包含 2 个节点(1 个主节点,1 个副本节点)且使用自定义 root 密码的 PostgreSQL 集群配置示例。 + +### 步骤 1:为 root 账户创建 Secret + +自定义 root 密码存储在 Kubernetes Secret 中。通过应用以下 YAML 创建 Secret: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-pg-secret + namespace: demo +``` +- password: 将 custompassword 替换为您想要的密码,并使用 Base64 编码(`echo -n "custompassword" | base64`)。 +- username: 默认的 PostgreSQL postgres 用户是 'root',编码为 'cm9vdA=='。 + +### 步骤 2:部署 PostgreSQL 集群 + +应用以下清单文件部署 PostgreSQL 集群,并引用步骤 1 中创建的 Secret 作为 root 账户凭据: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + systemAccounts: + - name: postgres + secretRef: + name: custom-pg-secret + namespace: demo + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `systemAccounts`: 覆盖引用的 `ComponentDefinition` 中定义的系统账户。 + +:::tip + +在 KubeBlocks PostgreSQL 插件中,预定义了一系列系统账户。只有这些账户才能通过新 Secret 进行自定义配置。 + +::: + +获取账户列表: +```bash +kubectl get cmpd postgresql-16-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +预期输出: +```bash +postgres +kbadmin +... +``` + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 连接 PostgreSQL 集群 + +KubeBlocks 会自动创建包含 PostgreSQL postgres 凭据的 Secret。通过以下命令获取凭据: + +```bash +kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +使用自定义密码连接集群的主节点: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=postgres PGPASSWORD=custompassword psql +``` + +## 清理资源 +删除 PostgreSQL 集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete secret custom-pg-secret -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您完成了以下操作: +- 创建 Kubernetes Secret 安全存储自定义的 PostgreSQL postgres 密码 +- 在 KubeBlocks 中部署了使用自定义 root 密码的 PostgreSQL 集群 +- 验证了部署并使用 PostgreSQL 客户端连接到集群主节点 + +使用 Kubernetes Secret 可以确保 PostgreSQL 集群凭据的安全管理,而 KubeBlocks 则简化了部署和管理流程。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx new file mode 100644 index 00000000..f2b3e37f --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/02-custom-password-generation-policy.mdx @@ -0,0 +1,122 @@ +--- +description: 了解如何在KubeBlocks中部署PostgreSQL集群,并通过为root用户配置自定义密码生成策略以增强安全性。 +keywords: +- PostgreSQL +- KubeBlocks +- Password Policy +- Kubernetes +- Security +sidebar_label: 自定义密码策略 +sidebar_position: 2 +title: 在KubeBlocks上部署采用自定义密码生成策略的PostgreSQL集群 +--- +# 在 KubeBlocks 上创建采用自定义密码生成策略的 PostgreSQL 集群 + +本指南将介绍如何在 KubeBlocks 中部署一个 PostgreSQL 集群,并为 root 用户设置自定义密码生成策略。通过定义特定的密码规则,您可以确保集群使用强密码凭证。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 复制集群 + +KubeBlocks 采用声明式方法管理 PostgreSQL 集群。以下是一个部署包含 2 个节点(1 主节点,1 副本节点)且采用自定义密码生成策略的 PostgreSQL 集群配置示例。 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + systemAccounts: + - name: postgres + passwordConfig: + length: 20 # 密码长度:20个字符 + numDigits: 4 # 至少包含4位数字 + numSymbols: 2 # 至少包含2个符号 + letterCase: MixedCases # 包含大小写字母 + symbolCharacters: '!' # 设置密码生成时允许使用的符号 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键字段说明** +- `systemAccounts`: 覆盖所引用 `ComponentDefinition` 中定义的系统账户 +- `passwordConfig`: 为 `postgres` 用户自定义密码生成策略 +- `symbolCharacters`: 设置密码生成时允许使用的符号 + +:::tip + +在 KubeBlocks PostgreSQL 插件中,预定义了一系列系统账户。只有这些账户才能通过新密钥进行自定义配置。 + +::: + +获取账户列表: +```bash +kubectl get cmpd postgresql-16-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +预期输出: +```bash +postgres +kbadmin +... +``` + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-pg-replication-cluster.mdx' + + + +## 连接 PostgreSQL 集群 + +KubeBlocks 会自动创建一个包含 PostgreSQL postgres 凭证的 Secret。通过以下命令获取凭证: + +```bash +PASSWORD=$(kubectl get secrets -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 -d) +``` + +使用自定义密码连接到集群主节点: +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- env PGUSER=postgres PGPASSWORD=$PASSWORD psql +``` + +## 清理资源 +删除 PostgreSQL 集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您完成了以下操作: +- 在 KubeBlocks 中部署了采用自定义密码生成策略的 PostgreSQL 集群 +- 验证了部署并通过 PostgreSQL 客户端连接到集群主节点 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/_category_.yml new file mode 100644 index 00000000..76712392 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 自定义 Secret +position: 6 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx new file mode 100644 index 00000000..f7f569b2 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/01-tls-overview.mdx @@ -0,0 +1,200 @@ +--- +description: 了解如何在KubeBlocks上部署支持TLS加密的PostgreSQL集群以实现安全通信。本指南涵盖部署配置、安全连接及资源清理操作。 +keywords: +- KubeBlocks +- PostgreSQL +- Kubernetes +- TLS +- Secure Communication +sidebar_label: 支持 TLS 的 PostgreSQL 集群 +sidebar_position: 1 +title: 在KubeBlocks上部署支持TLS的PostgreSQL集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在 KubeBlocks 上部署启用 TLS 的 PostgreSQL 集群 + +本指南演示如何使用 KubeBlocks 部署启用 TLS 加密的 PostgreSQL 集群。传输层安全协议(TLS)通过加密传输中的数据,确保 PostgreSQL 客户端与服务器之间的通信安全,防止敏感信息被截获。您将学习如何: + +- 部署启用 TLS 的 PostgreSQL 集群 +- 使用不同 TLS 模式建立安全连接 +- 验证 TLS 配置 +- 测试完成后清理资源 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 PostgreSQL 复制集群 + +KubeBlocks 采用声明式方式管理 PostgreSQL 集群。以下是一个启用 TLS 的 PostgreSQL 集群(1 主 1 从)部署配置示例: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + tls: true # 启用 TLS 加密 + issuer: + name: KubeBlocks # 使用 KubeBlocks 内置证书颁发机构 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键配置字段**: +- `tls: true`:为所有连接启用 TLS 加密 +- `issuer: KubeBlocks`:使用 KubeBlocks 内置证书颁发机构(也可选择 `UserProvided` 使用自定义证书) + +## 验证部署 + +监控集群状态直至变为 `Running`: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` + +验证 PostgreSQL 实例上的 TLS 配置: +```sql +postgres=# show ssl; + ssl +----- + on +(1 row) + +postgres=# show ssl_ca_file; + ssl_ca_file +--------------------- + /etc/pki/tls/ca.pem +(1 row) + +postgres=# show ssl_cert_file; + ssl_cert_file +---------------------- + /etc/pki/tls/cert.pem +(1 row) + +postgres=# show ssl_key_file; + ssl_key_file +--------------------- + /etc/pki/tls/key.pem +(1 row) +``` + +验证 KubeBlocks 生成的 TLS 证书: +```bash +kubectl get secret -l app.kubernetes.io/instance=pg-cluster -n demo | grep tls +``` + +预期输出: +```bash +pg-cluster-postgresql-tls-certs Opaque 3 24m +``` + +## 安全访问 PostgreSQL 集群 + +### 步骤 1:获取凭据 + +KubeBlocks 创建了包含 PostgreSQL 凭据的 Secret: +```bash +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### 步骤 2:使用 TLS 连接 + +将 PostgreSQL 端口转发到本地: +```bash +kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo +``` + + + + +```bash +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=require" +``` + +示例输出: +```bash +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off, ALPN: none) +Type "help" for help. + +postgres=# +``` + + + + + +1. 获取并保存根证书: +```bash +kubectl get -n demo secrets pg-cluster-postgresql-tls-certs -oyaml | yq '.data."ca.pem"' | base64 -d > /tmp/ca.crt +``` + +2. 使用证书验证连接: +```bash +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=verify-full sslrootcert=/tmp/ca.crt" +``` + +示例输出: +```bash +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off, ALPN: none) +Type "help" for help. + +postgres=# +``` + + + + +## 清理 + +删除所有教程资源: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +在本指南中,您学会了如何: + +1. 使用 KubeBlocks 部署启用 TLS 加密的 PostgreSQL 集群 +2. 验证 TLS 配置和证书生成 +3. 使用不同 TLS 模式建立安全连接: + - `require`:基础加密 + - `verify-full`:完整证书验证 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx new file mode 100644 index 00000000..b499b172 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/02-tls-custom-cert.mdx @@ -0,0 +1,207 @@ +--- +description: 使用自定义TLS证书在KubeBlocks上部署PostgreSQL集群的逐步指南。涵盖证书生成、集群部署及连接验证,确保通信安全。 +keywords: +- KubeBlocks +- PostgreSQL +- Kubernetes +- TLS +- Security +- Custom Certificates +sidebar_label: 使用自定义TLS的PostgreSQL集群 +sidebar_position: 2 +title: 在KubeBlocks上部署带自定义TLS证书的PostgreSQL集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在 KubeBlocks 上部署使用自定义 TLS 证书的 PostgreSQL 集群 + +本指南演示如何使用 KubeBlocks 部署带有**自定义 TLS 证书**的 PostgreSQL 集群。通过提供自有证书,您可以完全控制客户端-服务端加密通信的安全配置。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 生成证书 + +使用 OpenSSL 生成所需证书: + +1. **根证书 (CA)** +```bash +# 生成受密码保护的CA私钥 +openssl genrsa -aes256 -out ca-key.pem 4096 + +# 创建自签名根证书(10年有效期) +openssl req -x509 -new -nodes -key ca-key.pem -sha256 -days 3650 -out ca.pem +# 输入证书信息(如 Common Name = "PostgreSQL Root CA") +``` + +2. **服务器证书** +```bash +# 生成服务器私钥 +openssl genrsa -out server-key.pem 4096 + +# 创建证书签名请求 +openssl req -new -key server-key.pem -out server-req.pem +# 输入服务器信息(Common Name必须匹配PostgreSQL服务地址) + +# 使用CA签发服务器证书(10年有效期) +openssl x509 -req -in server-req.pem -CA ca.pem -CAkey ca-key.pem \ + -CAcreateserial -out server-cert.pem -days 3650 -sha256 +``` + +:::重要提示 + +Common Name (CN) 必须与 PostgreSQL 服务地址匹配(例如服务名称 `pg-cluster-postgresql-postgresql`)。 + +::: + +3. **验证证书** +```bash +openssl verify -CAfile ca.pem server-cert.pem +# 示例输出:server-cert.pem: OK +``` + +## 创建 Kubernetes Secret + +将证书存入 Kubernetes Secret 供集群访问: + +```bash +kubectl create secret generic postgresql-tls-secret \ + --namespace=demo \ + --from-file=ca.crt=ca.pem \ + --from-file=tls.crt=server-cert.pem \ + --from-file=tls.key=server-key.pem \ + --type=kubernetes.io/tls +``` + +## 部署 PostgreSQL 集群 + +部署启用 TLS 的 2 节点 PostgreSQL 集群(1 主节点,1 副本): + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + tls: true + issuer: + name: UserProvided + secretRef: + name: postgresql-tls-secret + namespace: demo + ca: ca.crt + cert: tls.crt + key: tls.key + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键配置**: +- `tls: true`:启用 TLS 加密 +- `issuer.name: UserProvided`:指定使用自定义证书 +- `issuer.secretRef`:关联证书 Secret + +## 验证部署 + +监控集群状态直至变为 Running: + +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +在副本节点验证 SSL 配置: + +```sql +postgres=# show ssl; + ssl +----- + on + +postgres=# show ssl_ca_file; + ssl_ca_file +--------------------- + /etc/pki/tls/ca.pem + +postgres=# show ssl_cert_file; + ssl_cert_file +---------------------- + /etc/pki/tls/cert.pem + +postgres=# show ssl_key_file; + ssl_key_file +---------------------- + /etc/pki/tls/key.pem +``` + +## 访问 PostgreSQL 集群 + +### 获取凭据 + +```bash +NAME=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.username}' | base64 --decode) +PASSWD=$(kubectl get secret -n demo pg-cluster-postgresql-account-postgres -o jsonpath='{.data.password}' | base64 --decode) +``` + +### 安全连接 + + + + +```bash +kubectl port-forward svc/pg-cluster-postgresql-postgresql 5432:5432 -n demo + +psql "host=127.0.0.1 dbname=postgres user=${NAME} password=${PASSWD} sslmode=require" +# 输出显示SSL连接详情 +``` + + + + + +```bash +kubectl exec -it -n demo pg-cluster-postgresql-0 -c postgresql -- \ + env PGUSER=${NAME} PGPASSWORD=${PASSWD} \ + psql 'host=pg-cluster-postgresql-postgresql sslmode=verify-full sslrootcert=/etc/pki/tls/ca.pem' +# 输出显示SSL连接详情 +``` + + + + +## 总结 + +本指南中您完成了: +1. 生成自签名CA和服务器证书 +2. 将证书存入Kubernetes Secret +3. 部署启用TLS的PostgreSQL集群 +4. 验证安全连接 + +使用自定义TLS证书可确保PostgreSQL客户端与服务端之间的通信加密,保护传输中的敏感数据。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/07-tls/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/_category_.yml new file mode 100644 index 00000000..18f17cf0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/07-tls/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: TLS(传输层安全协议) +position: 7 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..e0fae833 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,271 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为PostgreSQL集群配置可观测性。设置监控并通过Grafana实现指标可视化。 +keywords: +- KubeBlocks +- PostgreSQL +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: PostgreSQL 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 PostgreSQL 集群可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 PostgreSQL + +本指南演示如何在 KubeBlocks 中为 PostgreSQL 集群配置全面的监控方案,包含以下组件: + +1. Prometheus Operator 用于指标采集 +2. 内置 PostgreSQL exporter 用于指标暴露 +3. Grafana 用于可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 安装监控套件 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 PostgreSQL 集群 + +import CreatePGCluster from '../_tpl/_create-pg-replication-cluster.mdx' + + + +**关键监控配置** +- `disableExporter: false` 启用内置指标导出器 +- 导出器以边车容器形式运行于每个 PostgreSQL Pod 中 +- 通过 9187 端口采集 PostgreSQL 指标 + +## 验证部署 +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +当集群状态显示为 Running 时,表示您的 PostgreSQL 集群已准备就绪可供使用。 + +## 配置指标收集 + +### 1. 验证Exporter端点 +确认指标已暴露: + +```bash +kubectl get po pg-cluster-postgresql-0 -n demo -oyaml | \ + yq '.spec.containers[] | select(.name=="exporter") | .ports' +``` + +示例输出: +```yaml +- containerPort: 9187 + name: http-metrics # 用于PodMonitor + protocol: TCP +``` + +测试指标端点: + +```bash +kubectl -n demo exec -it pods/pg-cluster-postgresql-0 -- \ + curl -s http://127.0.0.1:9187/metrics | head -n 50 +``` + +### 2. 创建PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: pg-cluster-pod-monitor + namespace: demo + labels: # 必须与'prometheus.spec.podMonitorSelector'中的设置匹配 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # 定义从关联的Kubernetes 'Pod'对象传输到采集指标的标签 + # 根据实际需求设置标签 + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: http-metrics # 必须与exporter端口名称匹配 + scheme: http + namespaceSelector: + matchNames: + - demo # 目标命名空间 + selector: + matchLabels: + app.kubernetes.io/instance: pg-cluster + apps.kubeblocks.io/component-name: postgresql +``` +**PodMonitor配置指南** + +| 参数 | 是否必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与exporter端口名称('http-metrics')匹配 | +| `namespaceSelector` | 是 | 指定PostgreSQL运行的命名空间 | +| `labels` | 是 | 必须与Prometheus的podMonitorSelector匹配 | +| `path` | 否 | 指标端点路径(默认:/metrics) | +| `interval` | 否 | 采集间隔(默认:30s) | + +## 验证监控配置 + +### 1. 检查 Prometheus 监控目标 +转发并访问 Prometheus 用户界面: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +在浏览器中打开: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的抓取任务(任务名称为 'demo/pg-cluster-pod-monitor')。 + +预期状态: +- 目标状态应为 UP(正常运行)。 +- 目标的标签应包含 podTargetLabels 中定义的标签(例如 'app_kubernetes_io_instance')。 + +### 2. 测试指标收集 +验证指标是否正在被采集: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="pg-cluster"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "pg-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "postgresql", + "apps_kubeblocks_io_pod_name": "pg-cluster-postgresql-1", + "container": "exporter", + "endpoint": "http-metrics", + "instance": "10.244.0.129:9187", + "job": "demo/pg-cluster-pod-monitor", + "namespace": "demo", + "pod": "pg-cluster-postgresql-1" + }, + "value": [ + 1747377596.792, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "pg-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "postgresql", + "apps_kubeblocks_io_pod_name": "pg-cluster-postgresql-0", + "container": "exporter", + "endpoint": "http-metrics", + "instance": "10.244.0.128:9187", + "job": "demo/pg-cluster-pod-monitor", + "namespace": "demo", + "pod": "pg-cluster-postgresql-0" + }, + "value": [ + 1747377596.792, + "1" + ] + } + ] + } +} +``` + +## 在 Grafana 中可视化监控数据 + +### 1. 访问 Grafana +通过端口转发登录 Grafana: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +在浏览器中访问 `http://localhost:3000`,使用默认凭据登录: +- 用户名:'admin' +- 密码:'prom-operator'(默认值) + +### 2. 导入仪表板 +导入 KubeBlocks PostgreSQL 监控仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 选择以下任一方式导入: + - 粘贴仪表板 URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/postgresql/dashboards/postgresql.json` + - 或直接上传 JSON 文件 + +**仪表板包含:** +- 集群状态概览 +- 查询性能指标 +- 连接数统计 +- 复制健康状况 + +![postgresql-monitoring-grafana-dashboard.png](/img/docs/en/postgresql-monitoring-grafana-dashboard.png) + + + +## 删除 +要删除所有已创建的资源,请运行以下命令: +```bash +kubectl delete cluster pg-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor pg-cluster-pod-monitor -n demo +``` + +## 概述 +在本教程中,我们使用 Prometheus Operator 为 KubeBlocks 中的 PostgreSQL 集群配置了可观测性方案。 +通过配置 `PodMonitor` 资源,我们实现了 Prometheus 对 PostgreSQL exporter 指标的自动抓取。 +最终在 Grafana 中完成了这些指标的可视化。该方案为监控 PostgreSQL 数据库的健康状态和性能表现提供了有效的数据洞察。 + diff --git a/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/_category_.yml new file mode 100644 index 00000000..1c23cd4d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for PostgreSQL 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_category_.yml new file mode 100644 index 00000000..cd891c2b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 (tpl) +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx new file mode 100644 index 00000000..bdc0e94f --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_create-pg-replication-cluster.mdx @@ -0,0 +1,37 @@ +KubeBlocks 采用声明式方法来管理 PostgreSQL 集群。以下是一个部署包含 2 个副本(1 个主节点,1 个从节点)的 PostgreSQL 集群的配置示例。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: pg-cluster-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..69249862 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续之前,请确保满足以下条件: +- 环境准备: + - 已有一个运行中的 Kubernetes 集群。 + - 已配置 kubectl CLI 工具与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。具体安装步骤请参考链接指引。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx new file mode 100644 index 00000000..7c161e81 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-postgresql/_tpl/_verify-pg-replication-cluster.mdx @@ -0,0 +1,18 @@ +监控集群状态直至其转为运行状态: +```bash +kubectl get cluster pg-cluster -n demo -w +``` + +预期输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +pg-cluster postgresql Delete Creating 50s +pg-cluster postgresql Delete Running 4m2s +``` +当集群状态显示为 Running 时,表示您的 PostgreSQL 集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/01-overview.mdx b/docs/zh/preview/kubeblocks-for-qdrant/01-overview.mdx new file mode 100644 index 00000000..038b4996 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/01-overview.mdx @@ -0,0 +1,63 @@ +--- +description: 了解KubeBlocks Qdrant插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- Qdrant +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks Qdrant 插件概述 +--- +# KubeBlocks Qdrant 插件概述 + +Qdrant 是一个开源的向量搜索引擎和向量数据库,专为高效相似性搜索和高维向量存储而设计。它针对AI驱动的应用场景进行了优化,例如语义搜索、推荐系统以及大语言模型(LLMs)中的检索增强生成(RAG)。 + +## 核心特性 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 Qdrant 运维: + +| 特性 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 动态增减副本来调整容量 | +| **垂直扩展** | 调整 Qdrant 实例的 CPU/内存资源 | +| **存储卷扩容** | 无需停机即可动态增加存储容量 | +| **重启操作** | 以最小影响实现集群受控重启 | +| **启动/停止** | 临时暂停/恢复集群操作 | +| **密码管理** | 支持在创建时设置并管理 Qdrant 集群的自定义 root 密码 | +| **自定义服务** | 暴露专用的数据库端点 | +| **副本管理** | 安全地停用或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 放置和资源分配 | +| **监控** | 集成 Prometheus 指标采集 | +| **日志** | 通过 Loki Stack 实现集中式日志管理 | + + +### 备份与恢复 + +KubeBlocks 支持 Qdrant 的多种备份策略: + +| 特性 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | 数据文件 | 使用 HTTP API `snapshot` 为所有集合创建快照 | + +### 支持版本 + +KubeBlocks Qdrant 插件支持以下 Qdrant 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 1.5 | 1.5.0 | +| 1.7 | 1.7.3 | +| 1.8 | 1.8.1,1.8.4 | +| 1.10| 1.10.0 | + +可通过以下命令查看支持的版本列表: +```bash +kubectl get cmpv qdrant +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-qdrant/02-quickstart.mdx new file mode 100644 index 00000000..c8e57ead --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/02-quickstart.mdx @@ -0,0 +1,443 @@ +--- +description: 使用KubeBlocks部署和管理Qdrant ReplicaSet集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- Qdrant +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: Qdrant 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Qdrant 快速入门 + +本指南提供了使用 **KubeBlocks Qdrant 插件** 部署和管理 Qdrant ReplicaSet 集群的完整流程,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 启停操作等运维管理 +- 连接方式与集群监控 + +## 前置条件 + +### 系统要求 + +开始前请确保您的环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装并配置好集群访问权限的 `kubectl` v1.21+ 工具 +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 Qdrant 插件 + +Qdrant 插件默认包含在 KubeBlocks 中。检查其状态: + +```bash +helm list -n kb-system | grep qdrant +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-qdrant kb-system 1 2025-05-21 deployed qdrant-1.0.0 +``` +
+ +如果插件未启用,请选择安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户如果 GitHub 访问困难或缓慢,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/qdrant --versions + # 安装指定版本(将 替换为您选择的版本号) + helm upgrade -i kb-addon-qdrant kubeblocks-addons/qdrant --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search qdrant + # 安装指定版本插件(将 替换为您选择的版本号) + kbcli addon install qdrant --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + qdrant 0.9.0 kubeblocks + qdrant 0.9.1 kubeblocks + qdrant 1.0.0 kubeblocks + ``` + 启用或禁用插件: + + ```bash + # 启用插件 + kbcli addon enable qdrant + # 禁用插件 + kbcli addon disable qdrant + ``` + + + + +:::note +**版本兼容性说明** + +请始终确保 Qdrant 插件版本与您的 KubeBlocks 主版本匹配,以避免兼容性问题。 + +::: + +### 验证支持的 Qdrant 版本 + +**列出可用 Qdrant 版本:** + +```bash +kubectl get cmpv qdrant +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +qdrant 1.14.0,1.10.0,1.8.4,1.8.1,1.7.3,1.5.0 Available 26d +``` +
+ +**检查 ComponentDefinitions 的版本兼容性** + +**步骤 1.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv qdrant -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +qdrant-1.0.0 +``` +
+ +**步骤 2.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv qdrant -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^qdrant"))) | .releases[]' +``` + +该命令返回与名为 `qdrant` 的 `ComponentDefinition` 兼容的版本: + +
+示例输出 +```text +1.5.0 +1.7.3 +1.8.1 +1.8.4 +1.10.0 +1.14.0 +``` +
+ +### 存储配置 + +Qdrant 需要持久化存储。验证可用选项: + +```bash +kubectl get storageclass +``` + +推荐的存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩容 +- 满足工作负载的性能需求 + +## 部署 Qdrant 集群 + +使用默认配置部署基础版 Qdrant 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/cluster.yaml +``` + +该操作将创建: +- 包含 3 个副本的 Qdrant 集群 +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + # 指定集群删除时的行为策略 + # 可选值: [DoNotTerminate, Delete, WipeOut] (KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`: 阻止集群删除,确保所有资源保留完整 + # - `Delete`: 在 `Halt` 策略基础上同时移除 PVC,实现包括持久化数据在内的彻底清理 + # - `WipeOut`: 激进策略,将删除所有集群资源(包括外部存储中的卷快照和备份)。该操作会导致数据完全清除,应谨慎使用(建议仅在非生产环境使用以避免不可逆数据丢失) + terminationPolicy: Delete + # 指定创建集群时使用的 ClusterDefinition 名称 + # 注意:请勿修改此字段 + # 必须设置为 `qdrant` 才能创建 Qdrant 集群 + clusterDef: qdrant + # 指定创建集群时使用的 ClusterTopology 名称 + # 可选值: [cluster] + topology: cluster + componentSpecs: + - name: qdrant + # 指定该组件需要部署的服务版本 + # 可选值: [1.10.0,1.5.0,1.7.3,1.8.1,1.8.4] + serviceVersion: 1.10.0 + # 按需调整副本数 + # 推荐值: [3,5,7] + replicas: 3 + # 指定组件所需的计算资源 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # 定义组件存储需求的持久卷声明模板列表 + volumeClaimTemplates: + # 引用 componentDefinition.spec.runtime.containers[*].volumeMounts 中定义的挂载卷名称 + - name: data + spec: + # 声明所需的存储类名称 + # 若未指定,默认使用标注了 `storageclass.kubernetes.io/is-default-class=true` 的存储类 + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储容量 + storage: 20Gi +``` + +更多 API 字段说明请参阅 [API 参考文档](../user_docs/references/api-reference/cluster)。 + +### 创建指定版本的 Qdrant 集群 + +在应用配置前,通过设置 `spec.componentSpecs.serviceVersion` 字段(主版本.次版本格式)可创建特定版本的集群: + + + + ```yaml + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 # 可选值: [1.10.0,1.5.0,1.7.3,1.8.1,1.8.4] + ``` + + + +## 验证集群状态 + +当部署一个包含3个副本的Qdrant集群时,请通过以下方式确认部署成功: + +1. 集群阶段显示为`Running`(运行中) +2. 所有Pod均处于正常运行状态 + +可通过以下任一方式检查状态: + + + +```bash +kubectl get cluster qdrant-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 27s +qdrant-cluster qdrant Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo +qdrant-cluster-qdrant-0 2/2 Running 0 92s +qdrant-cluster-qdrant-1 2/2 Running 0 77s +qdrant-cluster-qdrant-2 2/2 Running 0 63s +``` + + + + + 若已安装`kbcli`,可查看完整的集群信息: + +```bash +kbcli cluster describe qdrant-cluster -n demo + +名称: qdrant-cluster 创建时间: 2025年5月18日 23:05 UTC+0800 +命名空间 集群定义 拓扑结构 状态 终止策略 +demo qdrant cluster 运行中 Delete + +访问端点: +组件 内部地址 外部地址 +qdrant qdrant-cluster-qdrant-qdrant.demo.svc.cluster.local:6333 <无> + qdrant-cluster-qdrant-qdrant.demo.svc.cluster.local:6334 + +拓扑信息: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +qdrant 1.10.0 qdrant-cluster-qdrant-0 <无> 运行中 zone-x x.y.z 2025年5月18日 23:05 UTC+0800 +qdrant 1.10.0 qdrant-cluster-qdrant-1 <无> 运行中 zone-x x.y.z 2025年5月18日 23:06 UTC+0800 +qdrant 1.10.0 qdrant-cluster-qdrant-2 <无> 运行中 zone-x x.y.z 2025年5月18日 23:06 UTC+0800 + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +qdrant 500m / 500m 512Mi / 512Mi data:20Gi <无> + +镜像信息: +组件 组件定义 镜像 +qdrant qdrant-1.0.0 docker.io/qdrant/qdrant:v1.10.0 + docker.io/apecloud/curl-jq:0.1.0 + +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间 + +查看集群事件: kbcli cluster list-events -n demo qdrant-cluster +``` + + + + +## 停止 Qdrant 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)将被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运营成本降低 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-stop + namespace: demo + spec: + clusterName: qdrant-cluster + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: qdrant + stop: true # 设置为 true 停止组件 + replicas: 3 + ``` + + + +## 启动 Qdrant 集群 + +重启已停止的集群可恢复运行,所有数据和配置将保持完整。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群将恢复到之前的状态 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/qdrant/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-start + namespace: demo + spec: + clusterName: qdrant-cluster + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设为 false 来重启集群: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + +## 删除 Qdrant 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略类型 | 删除的资源范围 | 数据清除情况 | 适用场景 | +|-----------------|---------------------|--------------------|------------------------| +| DoNotTerminate | 不删除任何资源 | 保留所有数据 | 关键生产环境集群 | +| Delete | 删除所有资源 | 清除PVC数据 | 非关键环境 | +| WipeOut | 删除所有资源 | 彻底清除所有数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保存在必要的备份 +3. 验证terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster qdrant-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster qdrant-cluster -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..49b9b4dd --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,285 @@ +--- +description: 了解如何在KubeBlocks中管理Qdrant集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- Qdrant +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: Qdrant 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Qdrant 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 Qdrant 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运维成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|------------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 Qdrant 集群将: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 保持集群配置 + +此操作适用于: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + +选项 1:OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-stop-ops + namespace: demo +spec: + clusterName: qdrant-cluster + type: Stop +``` + + + + +选项 2:Cluster API 补丁 + +通过修改 stop 字段直接调整集群规格: + +```bash +kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Stopping 6m3s + qdrant-cluster qdrant Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=qdrant-cluster -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-qdrant-cluster-qdrant-0 Bound pvc-uuid 20Gi RWO 22m + data-qdrant-cluster-qdrant-1 Bound pvc-uuid 20Gi RWO 21m + data-qdrant-cluster-qdrant-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### 启动集群 + +启动已停止的 Qdrant 集群: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: qdrant-cluster + type: Start +``` + + + + + +修改集群规格以恢复运行: +1. 设置 stop: false,或 +2. 完全移除 stop 字段 + ```bash + kubectl patch cluster qdrant-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Updating 24m + qdrant-cluster qdrant Delete Running 24m + qdrant-cluster qdrant Delete Running 24m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + qdrant-cluster-qdrant-0 2/2 Running 0 55s + qdrant-cluster-qdrant-1 2/2 Running 0 44s + qdrant-cluster-qdrant-2 2/2 Running 0 33s + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +使用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**使用 OpsRequest API** + +针对特定组件 `qdrant` 进行重启: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-restart-ops + namespace: demo +spec: + clusterName: qdrant-cluster + type: Restart + restart: + - componentName: qdrant +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest qdrant-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-restart-ops Restart qdrant-cluster Running 0/3 4s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 1/3 28s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 2/3 56s + qdrant-cluster-restart-ops Restart qdrant-cluster Running 2/3 109s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳 + +3. 验证组件健康状态: + ```bash + kbcli cluster describe qdrant-cluster -n demo + ``` + +操作完成后,集群将返回 Running 状态。 + +## 总结 +在本指南中,您学习了如何: +1. 停止 Qdrant 集群以暂停运行同时保留持久化存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重建其 Pod 而无需停止整个集群 + +通过管理 Qdrant 集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了执行这些操作的无缝方式,确保高可用性和最小化中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..639630b6 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,178 @@ +--- +description: 了解如何在KubeBlocks管理的Qdrant集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- Qdrant +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: Qdrant 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks垂直扩缩Qdrant集群 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的Qdrant集群进行垂直扩缩,同时保持副本数量不变。 + +垂直扩缩会修改Qdrant实例的计算资源(CPU和内存)但保持副本数不变。主要特点: + +- **无中断性**:正确配置时,可在扩缩期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:根据需要随时进行扩容或缩容 + +KubeBlocks通过遵循受控的、角色感知的更新策略来确保扩缩操作期间的影响最小化: + +**角色感知副本(主/从副本)** +- 从副本优先更新 - 先升级非领导者Pod以最小化中断 +- 主副本最后更新 - 仅当所有从副本健康后才重启主Pod +- 集群状态在所有副本稳定后从"更新中"转为"运行中" + +**无角色副本(基于序号的扩缩)** +若副本未定义角色,更新遵循Kubernetes Pod序号顺序: +- 从最高序号开始(如pod-2 → pod-1 → pod-0)以确保确定性滚动更新 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Qdrant集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 垂直扩缩 + +**预期工作流程**: + +1. Pod按序号从高到低顺序更新(如pod-2 → pod-1 → pod-0) +1. 集群状态从"更新中"过渡到"运行中" + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为qdrant组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-vscale-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: VerticalScaling + verticalScaling: + - componentName: qdrant + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + 可通过以下命令查看扩缩操作进度: + + ```bash + kubectl -n demo get ops qdrant-cluster-vscale-ops -w + ``` + + 预期结果: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 0/3 32s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 1/3 55s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 2/3 82s + qdrant-cluster-vscale-ops VerticalScaling qdrant-cluster Running 3/3 2m13s + ``` + + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段来直接调整资源。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 3 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段进行扩缩 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有其他操作正在进行 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩后:** +- 监控资源利用率和应用性能 +- 根据需要调整Qdrant参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe qdrant-cluster -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +qdrant 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩的核心优势 +- 无缝扩缩:按特定顺序重建Pod确保最小中断 +- 动态资源调整:根据工作负载需求轻松调整CPU和内存 +- 灵活性:可选择OpsRequest动态扩缩或直接API更新实现精确控制 +- 高可用性:扩缩过程中集群保持运行状态 + +## 清理 +删除所有创建的资源,包括Qdrant集群及其命名空间: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中您学会了: +1. 部署由KubeBlocks管理的Qdrant集群 +2. 通过增减qdrant组件资源进行垂直扩缩 +3. 使用OpsRequest和直接Cluster API更新两种方式调整资源分配 + +垂直扩缩是优化资源利用率和适应工作负载变化的强大工具,可确保您的Qdrant集群始终保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..b8bfbfd3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,279 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的Qdrant集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- Qdrant +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现Qdrant集群的水平扩展 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 实现 Qdrant 集群水平扩缩容 + +本指南介绍如何对 KubeBlocks 管理的 Qdrant 集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用 **OpsRequest** 和直接修改 **Cluster API** 两种方式实现这一目标。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## 扩容(增加副本) + +**预期工作流程**: + +1. 新 Pod 被创建,状态从 `Pending` 转变为 `Running` +2. 集群状态从 `Updating` 转变为 `Running` + +:::note + +Qdrant 使用 **Raft 共识协议** 来维护集群拓扑和集合结构的一致性。 +建议保持副本数为奇数(如 3、5、7),以避免扩容/缩容后出现脑裂场景。 + +::: + + + + + + + 选项一:使用水平扩容 OpsRequest + + 为 qdrant 组件增加 1 个副本实现集群扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-scale-out-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + # 指定组件扩容的副本变更数 + scaleOut: + # 指定组件的副本变更数 + # 为当前组件增加 1 个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops qdrant-cluster-scale-out-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Running 0/1 9s + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Running 1/1 16s + qdrant-cluster-scale-out-ops HorizontalScaling qdrant-cluster Succeed 1/1 16s + ``` + + + + + 选项二:直接修改 Cluster API + + 您也可以直接更新 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 4 # 增加副本数实现扩容 + ... + ``` + + 或者使用命令修补集群 CR: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新创建的 Pod,且 Qdrant 集群状态从 `Updating` 变为 `Running`,新建的 Pod 会被赋予 `secondary` 角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 6m24s +qdrant-cluster-qdrant-1 2/2 Running 0 7m19s +qdrant-cluster-qdrant-2 2/2 Running 0 5m57s +qdrant-cluster-qdrant-3 2/2 Running 0 3m54s +``` + +## 缩容(减少副本) + +**预期工作流程**: + +1. 移除序号最大的副本 +3. Pod 被优雅终止 +4. 集群状态从 `Updating` 转变为 `Running` + +:::note + +Qdrant 缩容时,数据将在剩余副本间重新分配。请确保集群有足够容量容纳数据。 +数据重分配过程耗时取决于数据量,该过程由 Qdrant 的 `MemberLeave` 操作处理,在数据重分配(即 `MemberLeave` 操作)成功完成前,相关 Pod 不会被删除。 + +::: + + + + + + 选项一:使用水平缩容 OpsRequest + + 通过减少 1 个副本实现 Qdrant 集群缩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-scale-in-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + # 指定组件缩容的副本变更数 + scaleIn: + # 指定组件的副本变更数 + # 从当前组件移除 1 个副本 + replicaChanges: 1 + ``` + + 监控操作进度: + ```bash + kubectl get ops qdrant-cluster-scale-in-ops -n demo -w + ``` + + 预期输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Running 0/1 8s + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Running 1/1 24s + qdrant-cluster-scale-in-ops HorizontalScaling qdrant-cluster Succeed 1/1 24s + ``` + + + + + 选项二:直接修改 Cluster API + + 您也可以直接更新 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 1 # 减少副本数实现缩容 + ``` + + 或者使用命令修补集群 CR: + + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### 验证缩容结果 + +示例输出(保留 1 个 Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 18m +``` + +## 故障排查 + +缩容时,KubeBlocks Qdrant 会按以下步骤重分配数据: + +1. 集群信息收集: +- 识别待移除成员 +- 获取包含节点 ID 和领导者信息的集群状态 + +2. 数据迁移: +- 发现待移除成员上的所有集合 +- 为每个集合定位所有本地分片 +- 将每个分片迁移至集群领导者 +- 在继续操作前验证分片转移是否成功 + +3. 集群成员更新: +- 从集群成员中移除待离开节点 +- 使用文件锁防止并发移除操作 + +若缩容操作长时间停滞,请检查以下资源: + +```bash +# 检查 agent 日志 +kubectl logs -n demo -c kbagent + +# 检查集群错误事件 +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# 检查 kubeblocks 日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +## 最佳实践 + +执行水平扩缩容时建议: +- 尽可能在低流量时段操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够资源 +- 考虑新副本的存储需求 + +## 清理资源 +删除 Qdrant 集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您已学习如何: +- 通过增加副本实现 Qdrant 集群扩容 +- 通过减少副本实现 Qdrant 集群缩容 +- 使用 OpsRequest 和直接修改 Cluster API 两种方式进行水平扩缩容 + +KubeBlocks 能确保在最小化影响数据库服务的情况下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..0f64fee3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/04-volume-expansion.mdx @@ -0,0 +1,219 @@ +--- +description: 了解如何在KubeBlocks管理的Qdrant集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- Qdrant +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: 扩容 Qdrant 集群存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 扩展 Qdrant 集群的存储卷 + +本指南介绍如何在 **KubeBlocks** 管理的 Qdrant 集群中扩展持久卷声明(PVC)。存储卷扩展功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持时,此操作可以在不中断服务的情况下执行。 + +存储卷扩展允许您在创建持久卷声明(PVC)后增加其容量。该功能在 Kubernetes v1.11 中引入,并在 Kubernetes v1.24 中正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持卷扩展 + +列出所有可用存储类,并通过检查 `ALLOWVOLUMEEXPANSION` 字段验证是否支持卷扩展: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +确保您使用的存储类将 `ALLOWVOLUMEEXPANSION` 设置为 true。如果为 false,则该存储类不支持卷扩展。 + +## 使用 StorageClass 部署 Qdrant 集群 + +KubeBlocks 采用声明式方法管理 Qdrant 集群。以下是一个部署包含 3 个副本的 Qdrant 集群的配置示例。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩展的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `storageClassName`: 指定支持卷扩展的 `StorageClass` 名称。如果未设置,将使用标注为 `default` 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时,请确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 + +::: + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 扩展存储卷 + +:::note +1. 确保存储类支持卷扩展(检查 `ALLOWVOLUMEEXPANSION`)。 +2. 新容量必须大于当前容量。 +3. 根据存储提供商的不同,卷扩展可能需要额外配置。 +::: + +您可以通过以下两种方式之一扩展存储卷: + + + + + 选项 1:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 qdrant 组件增加存储卷容量: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: qdrant + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 使用以下命令监控扩展进度: + + ```bash + kubectl describe ops qdrant-cluster-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 完成后,PVC 容量将更新。 + + :::note + 如果使用的存储类不支持卷扩展,此 OpsRequest 会快速失败并显示类似信息: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 选项 2:直接更新 Cluster API + + 您也可以直接更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段至目标容量。 + + ```yaml + componentSpecs: + - name: qdrant + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量,确保大于当前容量 + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证 + +验证更新后的集群配置: +```bash +kbcli cluster describe qdrant-cluster -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +qdrant 500m / 500m 512Mi / 512Mi data:30Gi +``` +数据 PVC 的存储容量已更新至指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +qdrant-cluster-qdrant-data-0 Bound pvc-uuid 30Gi RWO 33m +qdrant-cluster-qdrant-data-1 Bound pvc-uuid 30Gi RWO 33m +qdrant-cluster-qdrant-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## 清理 +删除所有创建的资源,包括 Qdrant 集群及其命名空间: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +在本指南中您学习了如何: +1. 验证存储类对卷扩展的兼容性。 +2. 通过以下方式执行卷扩展: + - 使用 OpsRequest 进行动态更新。 + - 通过 Cluster API 进行手动更新。 +3. 验证更新后的 PVC 容量并确保扩容操作完成。 + +通过存储卷扩展功能,您可以高效地扩展 Qdrant 集群的存储容量而无需中断服务,确保数据库能够随着应用需求增长而扩展。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..a24e9dde --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,320 @@ +--- +description: 了解如何在KubeBlocks中通过负载均衡器及其他服务类型配置和管理Qdrant服务,实现内外网访问。 +keywords: +- KubeBlocks +- Qdrant +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理 Qdrant 服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建与销毁Qdrant服务 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 Qdrant 服务 + +本指南提供了逐步操作说明,指导如何对外部和内部暴露由 KubeBlocks 管理的 Qdrant 服务。您将学习如何通过云服务提供商的负载均衡器服务配置外部访问、管理内部服务,以及在不再需要时正确禁用外部暴露功能。 + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 查看网络服务 +列出为 Qdrant 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +示例服务输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +qdrant-cluster-qdrant-qdrant ClusterIP 10.96.111.81 6333/TCP,6334/TCP 28m +``` + +说明: +- 该命令会筛选出标签 `app.kubernetes.io/instance=qdrant-cluster` 且位于 `demo` 命名空间下的所有 Service 资源 +- 输出结果包含服务名称、类型、集群内部 IP、外部 IP(如有)、暴露端口以及创建时长 +- 对于 Qdrant 服务,默认会暴露 6333(gRPC)和 6334(HTTP/REST)两个端口 + +## 暴露 Qdrant 服务 + +外部服务地址允许公网访问 Qdrant,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云成本 | 安全性 | +|------|----------|------------|----------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发/测试 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + + 选项一:使用 OpsRequest + + 要通过 LoadBalancer 将 Qdrant 服务暴露到外部,创建一个 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: qdrant-cluster + expose: + - componentName: qdrant + services: + - name: internet + # 决定服务暴露方式,默认为 'ClusterIP' + # 有效选项为 'ClusterIP'、'NodePort' 和 'LoadBalancer' + serviceType: LoadBalancer + # 如果 ServiceType 是 LoadBalancer,则包含云服务商相关参数 + # 以下是 AWS EKS 的示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 或 "true" 表示内部 VPC IP + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops qdrant-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-expose-enable-ops Expose qdrant-cluster Succeed 1/1 31s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,在 Cluster 资源的 `spec.services` 部分更新以包含 LoadBalancer 服务: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: qdrant-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: qdrant + # 暴露一个外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 或 "true" 表示内部 VPC IP + componentSelector: qdrant + name: qdrant-internet + serviceName: qdrant-internet + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: tpc-qdrant + port: 6333 + protocol: TCP + targetPort: tcp-qdrant + type: LoadBalancer + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分添加了一个新的外部服务。这个 LoadBalancer 服务包含了 AWS 网络负载均衡器(NLB)的注解。 + + :::note + 云服务商注解 + + 使用 LoadBalancer 服务时,必须包含特定于云服务商的适当注解。以下是不同云服务商常用的注解列表: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 使用 "false" 表示面向互联网的 LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 使用 "false" 表示面向互联网的 LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 将 LoadBalancer 限制为仅内部 VPC 访问。默认情况下如果不指定则为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网的 LoadBalancer 优化 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 使用 "intranet" 表示内部面向的 LoadBalancer + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制 LoadBalancer 是内部还是面向互联网的。请注意,此注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 使用 "true" 表示内部 VPC IP + ``` + 如果在服务创建后将此注解从 "false" 更改为 "true",注解可能会在服务对象中更新,但 LoadBalancer 仍将保留其公共 IP。 + + 要正确修改此行为: + - 首先,删除现有的 LoadBalancer 服务。 + - 使用更新的注解(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true")重新创建服务。 + - 等待新的 LoadBalancer 使用正确的内部或外部 IP 进行配置。 + ::: + + + 使用以下命令等待 Cluster 状态变为 Running: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +qdrant-cluster-qdrant-internet LoadBalancer 172.20.60.24 6333:31243/TCP 1m +``` + +## 禁用外部暴露 + + + + + + 选项一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: qdrant-cluster + expose: + - componentName: qdrant + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops qdrant-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + qdrant-cluster-expose-disable-ops Expose qdrant-cluster Succeed 1/1 24s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster qdrant-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster qdrant-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + qdrant-cluster qdrant Delete Running 24m + ``` + + + +### 验证服务移除 + +确保 'qdrant-cluster-qdrant-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +预期结果:'qdrant-cluster-qdrant-internet' 服务应被移除。 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除 Qdrant 集群及其所在的命名空间: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 将 Qdrant 服务暴露至外部或内部网络 +- 通过云服务商特定注解配置 LoadBalancer 服务 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问的启用/禁用 + +KubeBlocks 为 Kubernetes 环境中的 MySQL 服务管理提供了灵活简化的解决方案。同时,该方案同样适用于 Qdrant 服务在 Kubernetes 环境中的管理。 + + +2. OpsRequest -> 保留英文(KubeBlocks专有概念) +3. Cluster API -> 保留英文(Kubernetes专有概念) +4. 重复出现的"Kubernetes"保持英文不变 +5. 调整了最后一句的语序以符合中文表达习惯,通过"同时"衔接两个服务场景) \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx new file mode 100644 index 00000000..22cc92fc --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/06-minior-version-upgrade.mdx @@ -0,0 +1,275 @@ +--- +description: 了解如何通过KubeBlocks部署和升级Qdrant集群,实现最小化停机时间。 +keywords: +- KubeBlocks +- Qdrant +- Upgrade +- Rolling Upgrade +- Kubernetes +sidebar_label: 次版本升级 +sidebar_position: 6 +title: 在KubeBlocks中升级Qdrant集群的次版本 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在KubeBlocks中升级Qdrant集群的次版本 + +本指南将带您完成由KubeBlocks管理的Qdrant集群的部署和次版本升级过程,确保升级期间服务中断最小化。 + +## 前提条件 + +开始前请确保: +- 环境准备: + - Kubernetes集群已就绪并正常运行 + - kubectl CLI工具已配置可访问集群 + - 已安装[KubeBlocks CLI](../../user_docs/references/install-kbcli)和[KubeBlocks Operator](../../user_docs/overview/install-kubeblocks),安装指引见链接 +- 命名空间准备:为隔离资源,请先创建专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## 部署Qdrant集群 + +KubeBlocks采用声明式方式管理Qdrant集群。以下是部署3副本Qdrant集群的配置示例。 + +应用以下YAML配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +## 验证部署 +监控集群状态直至转为Running状态: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 49s +qdrant-cluster qdrant Delete Running 62s +``` +当集群状态变为Running时,Qdrant集群即可使用。 + +:::tip +如果是首次创建集群,可能需要等待镜像拉取完成后才会运行。 + +::: + +## 列出所有可用Qdrant版本 + +使用以下命令查看当前KubeBlocks支持的Qdrant版本: +```bash +kubectl get cmpv qdrant +``` +预期输出: +```bash +NAME VERSIONS STATUS AGE +qdrant 1.14.0,1.10.0,1.8.4,1.8.1,1.7.3,1.5.0 Available 26d +``` + +注意:实际支持的版本列表可能因KubeBlocks版本而异。 + +## 升级Qdrant版本 + +### 检查相同ComponentDefinition下的兼容版本 + +**步骤1.** 获取与给定ComponentVersion关联的ComponentDefinition列表 + +```bash +kubectl get cmpv qdrant -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +qdrant-1.0.0 +``` +
+ +**步骤2.** 获取与qdrant ComponentDefinition兼容的版本列表 + +```bash +kubectl get cmpv qdrant -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^qdrant"))) | .releases[]' +``` + +这将返回与名为qdrant的ComponentDefinition兼容的版本: + +
+示例输出 +```text +1.5.0 +1.7.3 +1.8.1 +1.8.4 +1.10.0 +1.14.0 +``` +
+ +### 执行升级 + +要升级Qdrant版本,需修改Cluster资源中的serviceVersion字段。本示例将Qdrant版本从1.10.0升级至1.14.0 + + + + + + 选项1:使用OpsRequest + + 可通过OpsRequest升级集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-upgrade + namespace: demo + spec: + # 指定操作目标Cluster资源名称 + clusterName: qdrant-cluster + type: Upgrade + upgrade: + components: + - componentName: qdrant + # 指定组件目标服务版本 + serviceVersion: "1.14.0" + ``` + + + + 选项2:使用声明式Cluster API + + 也可通过直接修改集群配置中的spec.componentSpecs.serviceVersion字段实现升级: + + ```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.14.0 # 升级目标版本设为1.14.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + ``` + + + +## 验证过程 + + +### 监控升级进度 + +检查OpsRequest进度: +```bash +kubectl get ops -n demo qdrant-upgrade -w +``` + +示例输出: +``` +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-upgrade Upgrade qdrant-cluster Succeed 3/3 8m13s +``` + +检查Pod状态: +``` +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 1 (7m23s ago) 13m +qdrant-cluster-qdrant-1 2/2 Running 1 (7m49s ago) 12m +qdrant-cluster-qdrant-2 2/2 Running 1 (7m59s ago) 12m +``` + +**关键观察点:** +- Pod未被重建,RESTARTS计数器增加1 +- Pod按序号从高到低依次更新 + + +### 检查集群状态 +确认集群处于Running状态: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Running 17m +``` + +### 验证Qdrant版本 + +连接到升级后的实例验证版本: +```bash +kubectl exec -ti -n demo qdrant-cluster-qdrant-0 -c kbagent -- \ + curl http://127.0.0.1:6333 +``` + +预期输出: +``` +curl http://127.0.0.1:6333 +{"title":"qdrant - vector search engine","version":"1.14.0","commit":"3617a0111fc8590c4adcc6e88882b63ca4dda9e7"}% +``` + +## 总结 +通过本指南您学会了: +- 使用KubeBlocks部署Qdrant集群 +- 以最小停机时间完成Qdrant次版本的滚动升级 +- 验证升级是否成功 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..b9d13367 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,135 @@ +--- +description: 了解如何在由KubeBlocks管理的Qdrant集群中下线(停用)特定Pod。 +keywords: +- KubeBlocks +- Qdrant +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 下线 Qdrant 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的Qdrant集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 管理的 Qdrant 集群中的特定 Pod + +本指南说明如何在 KubeBlocks 管理的 Qdrant 集群中下线(停用)特定 Pod。通过精确控制集群资源的同时保持可用性,此功能适用于工作负载重新平衡、节点维护或故障处理场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法下线特定 Pod。StatefulSet 会确保 Pod 的顺序和身份标识,缩减规模时总是移除序号最高的 Pod(例如从 3 个副本缩减时优先移除 `Pod-2`)。这种限制导致无法精确控制下线目标,给维护、工作负载分配和故障处理带来不便。 + +KubeBlocks 通过允许管理员直接下线特定 Pod 解决了这一限制。这种细粒度控制既能保证高可用性,又能实现更优的资源管理,且不会影响整个集群。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 下线 Pod + +**预期工作流**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +要下线特定 Pod(例如 'qdrant-cluster-qdrant-1'),可采用以下任一方法: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 将 Pod 标记为下线: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: qdrant-cluster-decommission-ops + namespace: demo + spec: + clusterName: qdrant-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: qdrant + scaleIn: + onlineInstancesToOffline: + - 'qdrant-cluster-qdrant-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线过程 + 检查下线操作进度: + + ```bash + kubectl get ops qdrant-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Running 0/1 8s +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Running 1/1 31s +qdrant-cluster-decommission-ops HorizontalScaling qdrant-cluster Succeed 1/1 31s + ``` + + + + + + 方法二:使用 Cluster API + + 也可直接更新 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: qdrant + replicas: 1 # 下线后期望的副本数 + offlineInstances: + - qdrant-cluster-qdrant-1 # <----- 指定要下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,验证集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 25m +qdrant-cluster-qdrant-2 2/2 Running 0 24m +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向 Pod 下线 +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保持可用性的同时,提供了精细化的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..e18f8fa6 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,129 @@ +--- +description: 了解如何创建并配置一个使用S3存储桶保存备份数据的KubeBlocks BackupRepo。 +keywords: +- KubeBlocks +- Backup +- BackupRepo +- S3 +- Kubernetes +sidebar_label: 创建备份仓库 +sidebar_position: 1 +title: 为KubeBlocks创建备份仓库 +--- +# 为 KubeBlocks 创建备份存储库 + +本指南将引导您通过使用 S3 存储桶来创建和配置 KubeBlocks 中的 BackupRepo(备份存储库),用于存储备份数据。 + +## 先决条件 +- 已配置具有创建 S3 存储桶权限的 AWS CLI +- 拥有 Kubernetes 集群的 kubectl 访问权限 +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks))并在 kb-system 命名空间中运行 + +## 步骤 1:创建 S3 存储桶 + +使用 AWS CLI 在目标区域创建 S3 存储桶。将 `` 替换为您所需的 AWS 区域(例如 `us-east-1`、`ap-southeast-1`)。 + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +示例(us-west-1 区域): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +示例输出: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +验证: +通过列出存储桶内容确认创建成功(初始应为空): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## 步骤 2:创建 Kubernetes Secret 存储 AWS 凭证 + +将 AWS 凭证安全地存储在 Kubernetes Secret 中。将 `` 和 `` 替换为您的实际 AWS 凭证: + +```bash +# 创建 secret 保存访问密钥 +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## 步骤 3:配置备份存储库 + +BackupRepo 是用于定义备份存储位置的自定义资源。本步骤将通过创建 BackupRepo 资源将您的 S3 存储桶与 KubeBlocks 集成。 + +应用以下 YAML 创建 BackupRepo。请根据实际情况替换字段值(如存储桶名称、区域等)。 + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # 将此备份存储库标记为默认存储库 + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # 当前 KubeBlocks 支持配置多种对象存储服务作为备份存储库 + # - s3 (Amazon Simple Storage Service) + # - oss (阿里云对象存储服务) + # - cos (腾讯云对象存储) + # - gcs (Google 云存储) + # - obs (华为云对象存储) + # - minio 及其他 S3 兼容服务 + storageProviderRef: s3 + # 指定备份存储库的访问方式 + # - Tool + # - Mount + accessMethod: Tool + # 指定此备份存储库创建的 PV 回收策略 + pvReclaimPolicy: Retain + # 指定此备份存储库创建的 PVC 容量 + volumeCapacity: 100Gi + # 存储 StorageProvider 的非敏感配置参数 + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # 引用存储 StorageProvider 凭证的 secret + credential: + # name 是在命名空间内引用 secret 资源的唯一标识 + name: s3-credential-for-backuprepo + # namespace 定义了 secret 名称必须唯一的空间范围 + namespace: kb-system +``` + +## 步骤 4:验证备份存储库状态 + +检查 BackupRepo 状态以确保其正确初始化: + +```bash +kubectl get backuprepo s3-repo -w +``` + +预期状态变化: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +故障排除: + - 如果状态变为 Failed: + - 确认存储桶名称和区域与 S3 配置匹配 + - 检查 Secret 中的 AWS 凭证是否正确 + - 验证 KubeBlocks 与 AWS S3 之间的网络连接 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..e473fd24 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,222 @@ +--- +description: KubeBlocks 中利用 Backup API 和 OpsRequest API 创建与验证 Qdrant 集群完整备份的逐步指南 +keywords: +- Qdrant +- Full Backup +- KubeBlocks +- Kubernetes +- Database Backup +- XtraBackup +sidebar_label: 创建完整备份 +sidebar_position: 2 +title: 在KubeBlocks上为Qdrant集群创建完整备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在 KubeBlocks 上为 Qdrant 创建全量备份 + +本指南演示如何通过以下两种方式在 KubeBlocks 上为 Qdrant 集群创建和验证全量备份: +- 直接使用 Backup API(直接备份操作) +- 使用 OpsRequest API(带增强监控的托管备份操作) + +我们将在[从全量备份恢复](./05-restoring-from-full-backup)指南中介绍如何从备份恢复数据。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 备份前提条件 + +创建备份前请确保: +1. 备份仓库已配置: + - 存在 `BackupRepo` 资源 + - 集群与仓库之间网络连通 + - `BackupRepo` 状态显示为 "Ready" + +2. 集群准备就绪: + - 集群状态为 "Running" + - 没有正在进行的操作(扩缩容、升级等) + +## 查看备份配置 + +检查可用的备份策略和计划: + +```bash +# 列出备份策略 +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=qdrant-cluster + +# 列出备份计划 +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +预期输出: +```bash +NAME BACKUP-REPO STATUS AGE +qdrant-cluster-qdrant-backup-policy Available 36m + +NAME STATUS AGE +qdrant-cluster-qdrant-backup-schedule Available 36m +``` + +查看 BackupPolicy CR 'qdrant-cluster-qdrant-backup-policy' 中支持的备份方法: + +```bash +kubectl get backuppolicy qdrant-cluster-qdrant-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**备份方法列表** + +KubeBlocks Qdrant 支持以下备份方法: + +| 特性 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | datafile | 使用 HTTP API `snapshot` 为所有集合创建快照。 | + +## 通过 Backup API 备份 + +### 1. 创建按需备份 + +`datafile` 方法会备份数据库的数据文件 + +应用以下清单创建备份: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: qdrant-backup-datafile + namespace: demo +spec: + # 指定备份策略中定义的备份方法名称 + # - datafile + backupMethod: datafile + # 指定应用于此备份的备份策略 + backupPolicyName: qdrant-cluster-qdrant-backup-policy + # 决定当备份自定义资源(CR)被删除时,是否应删除备份仓库中的备份内容。支持的值是 `Retain` 和 `Delete` + # - `Retain` 表示保留备份内容及其在备份仓库中的物理快照 + # - `Delete` 表示删除备份内容及其在备份仓库中的物理快照 + deletionPolicy: Delete +``` + +### 2. 监控备份并验证完成状态 + +跟踪进度直到状态显示为 "Completed": + +```bash +kubectl get backup qdrant-backup-datafile -n demo -w +``` + +示例输出: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +qdrant-backup-datafile qdrant-cluster-qdrant-backup-policy datafile Completed 0 10s Delete 2025-05-18T15:43:53Z 2025-05-18T15:44:02Z +``` + +### 3. 验证备份 + +通过以下方式确认备份成功完成: +- 备份状态显示为 "Completed" +- 备份大小符合预期 +- 检查 BackupRepo 中的文件 + +`Backup` 资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 备份文件大小 + + +## 通过 OpsRequest API 备份 + +### 1. 创建按需备份 + +使用 OpsRequest API 执行 'pg-basebackup' 方法备份: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-backup + namespace: demo +spec: + clusterName: qdrant-cluster + force: false + backup: + backupPolicyName: qdrant-cluster-qdrant-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. 监控备份进度 + +#### 1. 监控操作状态 + +实时跟踪备份进度: +```bash +kubectl get ops qdrant-cluster-backup -n demo -w +``` + +预期输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +qdrant-cluster-backup Backup qdrant-cluster Running -/- 5s +qdrant-cluster-backup Backup qdrant-cluster Succeed -/- 10s +``` + +- 状态显示为 'Succeed' 表示备份操作成功完成。 + +#### 2. 验证完成状态 + +检查最终备份状态: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=qdrant-cluster-backup +``` + +示例输出: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-qdrant-cluster-20250518154515 qdrant-cluster-qdrant-backup-policy datafile Completed 0 10s Delete 2025-05-18T15:45:15Z 2025-05-18T15:45:25Z 2025-06-17T15:45:25Z +``` + +- 备份状态应显示为 'Completed'。 + +### 3. 验证备份 + +通过以下方式确认备份成功完成: +- 备份状态显示为 "Completed" +- 备份大小符合预期 +- 检查 BackupRepo 中的文件 + +`Backup` 资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 其他元数据 + +## 总结 + +本指南涵盖: +1. 部署 Qdrant 集群 +2. 使用以下方式创建全量备份: + - 直接 Backup API + - 托管 OpsRequest API +3. 监控和验证备份 + +您的 Qdrant 数据现已安全备份,可在需要时进行恢复。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..a7a47b83 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,153 @@ +--- +description: 了解如何使用KubeBlocks部署Qdrant集群,并配置在S3存储库中实现带保留策略的自动化定时备份。 +keywords: +- Qdrant +- Backup +- KubeBlocks +- Scheduled Backup +- Kubernetes +sidebar_label: 定时备份 +sidebar_position: 3 +title: 在KubeBlocks中设置带定时备份的Qdrant集群 +--- +# 在 KubeBlocks 中设置带定时备份的 Qdrant 集群 + +本指南演示如何使用 KubeBlocks 部署 Qdrant 集群,并配置在 S3 存储库中保留定时备份。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 备份先决条件 + +1. 已配置备份存储库: + - 配置好 `BackupRepo` + - 集群与存储库之间网络连通,`BackupRepo` 状态为 `Ready` + +2. 集群正在运行: + - 集群必须处于 `Running` 状态 + - 没有正在进行的操作(扩缩容、升级等) + +## 配置定时备份 + +KubeBlocks 在创建集群时会自动创建 `BackupSchedule` 资源。按照以下步骤启用和配置定时备份: + +1. 验证默认备份计划配置: + +```bash +kubectl get backupschedule qdrant-cluster-qdrant-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: qdrant-cluster-Qdrant-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── 分钟 (0-59) + # │ ┌───────────── 小时 (0-23) + # │ │ ┌───────────── 月份中的天 (1-31) + # │ │ │ ┌───────────── 月 (1-12) + # │ │ │ │ ┌───────────── 星期中的天 (0-6) (周日=0) + # │ │ │ │ │ + # 0 18 * * * + # 每天下午6点(18:00)执行此任务 + cronExpression: 0 18 * * * # 根据需要更新cronExpression + enabled: true # 设置为`true`以定期调度基础备份 + retentionPeriod: 7d # 根据需要设置保留期限 +``` + +2. 启用并自定义备份计划: +```bash +kubectl edit backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +更新以下关键参数: +- `enabled`:设置为 `true` 以激活定时备份 +- `cronExpression`:使用 cron 语法配置备份频率 +- `retentionPeriod`:设置备份保留时长(如 `7d`、`1mo`) + +每日下午6点UTC备份并保留7天的示例配置: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. 验证计划配置: +```bash +# 检查计划状态 +kubectl get backupschedule qdrant-cluster-qdrant-backup-schedule -n demo -w + +# 查看详细配置 +kubectl describe backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +## 监控和管理备份 + +启用定时备份后,监控其执行情况并管理备份保留: + +1. 查看所有备份: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=qdrant-cluster +``` + +2. 检查备份详情: +```bash +kubectl describe backup -n demo +``` + +3. 验证备份文件: +- 状态应显示"Completed" +- 检查备份大小是否符合预期 +- 确认保留期限已应用 +- 验证存储库中存在备份文件 + +4. 管理备份保留: +- 手动删除旧备份: +```bash +kubectl delete backup -n demo +``` +- 修改保留期限: +```bash +kubectl edit backupschedule qdrant-cluster-qdrant-backup-schedule -n demo +``` + +## 清理 +要删除所有创建的资源,删除 Qdrant 集群及其命名空间: + +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南演示了: +1. Qdrant 自动备份配置 +2. 使用 cron 语法自定义计划 +3. 保留策略管理 +4. 备份验证流程 + +您的 Qdrant 集群现在具备: +- 定期自动备份 +- 可配置的保留策略 +- 完整的备份历史记录跟踪 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..7c34ea5b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,164 @@ +--- +description: 了解如何通过集群注解或OpsRequest API,在KubeBlocks中从现有备份恢复一个新的Qdrant集群。 +keywords: +- Qdrant +- Restore +- Backup +- KubeBlocks +- Kubernetes +sidebar_label: 恢复 Qdrant 集群 +sidebar_position: 5 +title: 从备份恢复 Qdrant 集群 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 从备份恢复 Qdrant 集群 + +本指南演示在 KubeBlocks 中从备份恢复 Qdrant 集群的两种方法: + +1. **集群注解法** - 使用 YAML 注解的简单声明式方法 +2. **OpsRequest API 法** - 支持进度监控的增强型操作控制 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 恢复准备:定位完整备份 +开始恢复前,请确保存在可用的完整备份。恢复过程将使用该备份创建新的 Qdrant 集群。 + +- 新集群可访问的备份仓库 +- 状态为 `Completed` 的有效完整备份 +- 充足的 CPU/内存资源 +- 足够的存储容量 + +查找可用完整备份: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=qdrant-cluster # 获取完整备份列表 +``` + +从状态为 `Completed` 的备份中选择一个。 + +## 方案一:集群注解恢复法 + +### 步骤 1:创建恢复集群 +通过恢复配置创建新集群: + +关键参数: +- `kubeblocks.io/restore-from-backup` 注解 +- 上一步骤中定位的备份名称和命名空间 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster-restored + namespace: demo + annotations: + # 注意:将 替换为您的备份名称 + kubeblocks.io/restore-from-backup: '{"qdrant":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤 2:监控恢复进度 +通过以下命令跟踪恢复状态: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +## 方案二:OpsRequest API 恢复法 + +### 步骤 1:发起恢复操作 +通过 OpsRequest API 创建恢复请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: qdrant-cluster-restore + namespace: demo +spec: + clusterName: qdrant-cluster-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### 步骤 2:跟踪操作进度 +监控恢复状态: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +### 步骤 3:验证恢复集群 +确认恢复成功: +```bash +kubectl get cluster qdrant-cluster-restored -n demo +``` +示例输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster-restored qdrant Delete Running 3m3s +``` + +## 清理资源 +删除所有创建的资源,包括 Qdrant 集群及其命名空间: + +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete cluster qdrant-cluster-restored -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南涵盖两种恢复方法: + +1. **集群注解法** - 基于 YAML 的简单方案 + - 获取系统凭证 + - 创建带恢复注解的集群 + - 监控进度 + +2. **OpsRequest API 法** - 增强的操作控制 + - 创建恢复请求 + - 跟踪操作状态 + - 验证完成情况 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/_category_.yml b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/_category_.yml new file mode 100644 index 00000000..09845f2d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 5 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..b18f38fe --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,247 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为Qdrant集群配置可观测性。设置监控并通过Grafana可视化指标。 +keywords: +- KubeBlocks +- Qdrant +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: Qdrant 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 Qdrant 集群的可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 Qdrant + +本指南演示如何在 KubeBlocks 中为 Qdrant 集群配置全面的监控方案,包含以下组件: + +1. Prometheus Operator 用于指标采集 +2. Qdrant 内置导出器用于指标暴露 +3. Grafana 用于可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 安装监控套件 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 Qdrant 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 配置指标采集 + +### 1. 验证导出器端点 + +```bash +kubectl -n demo exec -it pods/qdrant-cluster-qdrant-0 -c kbagent -- \ + curl -s http://127.0.0.1:6333/metrics | head -n 50 +``` + +### 2. 创建 PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: qdrant-cluster-pod-monitor + namespace: demo + labels: # 必须与 'prometheus.spec.podMonitorSelector' 中的设置匹配 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # 定义从关联的 Kubernetes Pod 对象传输到采集指标的标签 + # 根据实际需求设置标签 + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: tcp-qdrant # 必须与导出器端口名称匹配 + scheme: http + namespaceSelector: + matchNames: + - demo # 目标命名空间 + selector: + matchLabels: + app.kubernetes.io/instance: qdrant-cluster +``` +**PodMonitor 配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与导出器端口名称 ('http-metrics') 匹配 | +| `namespaceSelector` | 是 | 指定 Qdrant 运行的命名空间 | +| `labels` | 是 | 必须与 Prometheus 的 podMonitorSelector 匹配 | +| `path` | 否 | 指标端点路径 (默认: /metrics) | +| `interval` | 否 | 采集间隔 (默认: 30s) | + +## 验证监控配置 + +### 1. 检查 Prometheus 目标 +端口转发并访问 Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +浏览器访问: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的采集任务(任务名称为 'demo/qdrant-cluster-pod-monitor')。 + +预期状态: +- 目标状态应为 UP +- 目标标签应包含 podTargetLabels 中定义的标签(如 'app_kubernetes_io_instance') + +### 2. 测试指标采集 +验证指标是否被正确采集: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="qdrant-cluster"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-3", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.64:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-3" + }, + "value": [ + 1747583924.040, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-0", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.62:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-0" + }, + "value": [ + 1747583924.040, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "qdrant-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "qdrant", + "apps_kubeblocks_io_pod_name": "qdrant-cluster-qdrant-2", + "container": "qdrant", + "endpoint": "tcp-qdrant", + "instance": "10.244.0.60:6333", + "job": "kubeblocks", + "namespace": "demo", + "pod": "qdrant-cluster-qdrant-2" + }, + "value": [ + 1747583924.040, + "1" + ] + } + ] + } +} +``` +## Grafana 可视化 + +### 1. 访问 Grafana +端口转发并登录: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +浏览器访问 http://localhost:3000,使用默认凭证登录: +- 用户名: 'admin' +- 密码: 'prom-operator' (默认) + +### 2. 导入仪表板 +导入 KubeBlocks Qdrant 仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 选择以下任一方式: + - 粘贴仪表板 URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/qdrant/dashboards/qdrant-overview.json` + - 或直接上传 JSON 文件 + +![qdrant-monitoring-grafana-dashboard.png](/img/docs/en/qdrant-monitoring-grafana-dashboard.png) + + +## 清理资源 +执行以下命令删除所有创建的资源: +```bash +kubectl delete cluster qdrant-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor qdrant-cluster-pod-monitor -n demo +``` + +## 总结 +本教程演示了如何在 KubeBlocks 中使用 Prometheus Operator 为 Qdrant 集群建立可观测性方案。通过配置 `PodMonitor`,我们实现了 Prometheus 对 Qdrant 导出器指标的采集,最终在 Grafana 中实现了指标可视化。该方案为监控 Qdrant 数据库的健康状态和性能表现提供了重要洞察。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/_category_.yml b/docs/zh/preview/kubeblocks-for-qdrant/_category_.yml new file mode 100644 index 00000000..a1e0d1fd --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for Qdrant 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_category_.yml new file mode 100644 index 00000000..82d8374c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..8e8950d5 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_create-cluster.mdx @@ -0,0 +1,42 @@ +KubeBlocks 采用声明式方法来管理 Qdrant 集群。 +以下是部署一个包含 3 个副本的 Qdrant 集群的示例配置。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: qdrant-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: qdrant + topology: cluster + componentSpecs: + - name: qdrant + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +注: +1. 配置中 `terminationPolicy: Delete` 表示删除集群时会同时删除相关资源 +2. `topology: cluster` 指定了集群拓扑模式 +3. 每个 Qdrant 副本申请了 0.5 核 CPU 和 0.5Gi 内存资源 +4. 通过 `volumeClaimTemplates` 为每个 Pod 声明了 20Gi 的持久卷存储 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..03c78b3c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续之前,请确保满足以下条件: +- 环境准备: + - 已有一个运行中的 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处提供的安装指南操作。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..21c12ded --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-qdrant/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster qdrant-cluster -n demo -w +``` + +预期输出: + +```bash +kubectl get cluster qdrant-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +qdrant-cluster qdrant Delete Creating 49s +qdrant-cluster qdrant Delete Running 62s +``` + +检查 Pod 状态及其角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=qdrant-cluster -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +qdrant-cluster-qdrant-0 2/2 Running 0 1m43s +qdrant-cluster-qdrant-1 2/2 Running 0 1m28s +qdrant-cluster-qdrant-2 2/2 Running 0 1m14s +``` + +当集群状态显示为 Running 时,您的 Qdrant 集群即准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/01-overview.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/01-overview.mdx new file mode 100644 index 00000000..0b6b21d4 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/01-overview.mdx @@ -0,0 +1,57 @@ +--- +description: 了解KubeBlocks RabbitMQ插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- RabbitMQ +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks RabbitMQ 插件概述 +--- +# KubeBlocks RabbitMQ 插件概述 + +RabbitMQ 是一款开源轻量级的消息代理,支持多种消息协议。 + +## 核心特性 + +### 生命周期管理 + +KubeBlocks 通过全面的生命周期管理简化 RabbitMQ 运维: + +| 特性 | 描述 | +|------------------------------|-----------------------------------------------------------------------------| +| **水平扩展** | 增减副本来调整容量 | +| **垂直扩展** | 调整 RabbitMQ 实例的 CPU/内存资源 | +| **存储卷扩容** | 动态增加存储容量而无需停机 | +| **重启操作** | 以最小影响进行受控的集群重启 | +| **启动/停止** | 临时暂停/恢复集群操作 | +| **密码管理** | 可在创建时为 RabbitMQ 集群设置和管理自定义 root 密码 | +| **自定义服务** | 暴露专用的数据库端点 | +| **副本管理** | 安全地停用或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 放置和资源分配 | +| **监控** | 集成的 Prometheus 指标收集 | +| **日志** | 通过 Loki Stack 实现集中式日志收集 | + +### 支持版本 + +KubeBlocks RabbitMQ 插件支持以下 RabbitMQ 版本: + +| 主版本 | 支持的次版本 | +|---------------|--------------------------------| +| 3.8 | 3.8.14| +| 3.9 | 3.9.29| +| 3.10 | 3.10.25| +| 3.11 | 3.11.28| +| 3.12 | 3.12.14| +| 3.13 | 3.13.2, 3.13.7| +| 4.0 | 4.0.9| + +可通过以下命令查看支持的版本列表: +```bash +kubectl get cmpv rabbitmq +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/02-quickstart.mdx new file mode 100644 index 00000000..309574da --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/02-quickstart.mdx @@ -0,0 +1,481 @@ +--- +description: 使用KubeBlocks部署和管理RabbitMQ副本集集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- RabbitMQ +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: RabbitMQ 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# RabbitMQ 快速入门 + +本指南提供了使用 **KubeBlocks RabbitMQ 插件** 部署和管理 RabbitMQ 副本集集群的完整指引,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 运维管理(包括启停流程) +- 连接方式与集群监控 + +## 前提条件 + +### 系统要求 + +开始前请确保您的环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装并配置好集群访问权限的 `kubectl` v1.21+ +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 RabbitMQ 插件 + +RabbitMQ 插件默认随 KubeBlocks 一同安装。检查其状态: + +```bash +helm list -n kb-system | grep rabbitmq +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-rabbitmq kb-system 1 2025-05-21 deployed rabbitmq-1.0.0 +``` +
+ +若插件未启用,请选择以下安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难,可使用以下镜像仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 查询可用插件版本 + helm search repo kubeblocks/rabbitmq --versions + # 安装指定版本(将替换为目标版本号) + helm upgrade -i kb-addon-rabbitmq kubeblocks-addons/rabbitmq --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 索引默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 插件搜索与安装: + + ```bash + # 搜索插件 + kbcli addon search rabbitmq + # 安装指定版本插件(将替换为目标版本号) + kbcli addon install rabbitmq --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + rabbitmq 0.9.0 kubeblocks + rabbitmq 0.9.1 kubeblocks + rabbitmq 1.0.0 kubeblocks + ``` + 插件启用/停用: + + ```bash + # 启用插件 + kbcli addon enable rabbitmq + # 停用插件 + kbcli addon disable rabbitmq + ``` + + + + +:::note +**版本兼容性说明** + +请始终确保 RabbitMQ 插件版本与 KubeBlocks 主版本相匹配,以避免兼容性问题。 + +::: + +### 验证支持的 RabbitMQ 版本 + +**列出可用 RabbitMQ 版本:** + +```bash +kubectl get cmpv rabbitmq +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +rabbitmq 4.0.9,3.13.7,3.13.2,3.12.14,3.11.28,3.10.25,3.9.29,3.8.14 Available 26d +``` +
+ +**检查 ComponentDefinitions 的版本兼容性** + +**步骤 1.** 获取与指定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv rabbitmq -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +rabbitmq-1.0.0 +``` +
+ +**步骤 2.** 获取与指定 `ComponentDefinition` 兼容的版本列表 + +```bash +kubectl get cmpv rabbitmq -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("rabbitmq"))) | .releases[]' +``` + +该命令返回与名为 `rabbitmq` 的 `ComponentDefinition` 兼容的版本: + +
+示例输出 +```text +4.0.9 +3.13.7 +3.13.2 +3.12.14 +3.11.28 +3.10.25 +3.9.29 +3.8.14 +``` +
+ +### 存储配置 + +RabbitMQ 需要持久化存储。请检查可用存储选项: + +```bash +kubectl get storageclass +``` + +推荐存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩容 +- 满足工作负载的性能需求 + +## 部署 RabbitMQ 集群 + +使用默认配置部署基础版 RabbitMQ 集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/cluster.yaml +``` + +该操作将创建: +- 包含 3 个副本的 RabbitMQ 集群 +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + # 定义集群删除时的行为策略 + # 可选值:[DoNotTerminate, Delete, WipeOut](KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`:阻止集群删除,确保所有资源保留完整 + # - `Delete`:在 `Halt` 策略基础上增加 PVC 清理,实现包括持久化数据在内的彻底清除 + # - `WipeOut`:激进策略,将删除包括外部存储中的卷快照和备份在内的所有集群资源,导致数据完全不可恢复。应谨慎使用,建议仅在非生产环境执行 + terminationPolicy: Delete + # 指定创建集群时使用的 ClusterDefinition 名称 + # 注意:禁止修改此字段 + # 必须设置为 `rabbitmq` 才能创建 RabbitMQ 集群 + clusterDef: rabbitmq + # 指定创建集群时采用的 ClusterTopology + # 可选值:[clustermode] + topology: clustermode + componentSpecs: + - name: rabbitmq + # 指定该组件需要部署的服务版本 + # 可选版本:[3.10.25,3.11.28,3.12.14,3.13.2,3.13.7,3.8.14,3.9.29] + serviceVersion: 3.13.7 + # 按需调整副本数 + # 推荐值:[3,5,7] + replicas: 3 + # 定义组件所需的计算资源 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + # 定义组件存储需求的持久卷声明模板列表 + volumeClaimTemplates: + # 对应 componentDefinition.spec.runtime.containers[*].volumeMounts 中定义的挂载卷名称 + - name: data + spec: + # 声明所需的存储类名称 + # 若未指定,默认使用标注了 storageclass.kubernetes.io/is-default-class=true 的存储类 + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储容量 + storage: 20Gi +``` + +更多 API 字段说明请参阅 [API 参考文档](../user_docs/references/api-reference/cluster)。 + +### 创建指定版本的 RabbitMQ 集群 + +在应用配置前,通过设置 `spec.componentSpecs.serviceVersion`(主版本.次版本)字段可创建特定版本的集群: + + + + ```yaml + componentSpecs: + - name: rabbitmq + serviceVersion: 4.0.9 + ``` + + + ```yaml + componentSpecs: + - name: rabbitmq + serviceVersion: 3.13.7 # 可选版本:[3.13.7,3.13.2,3.12.14,3.11.28,3.10.25,3.9.29,3.8.14] + ``` + + + +:::重要说明 + +RabbitMQ 需要具备 `peer discovery` 角色来创建事件和获取端点信息,这是发现其他节点并组建集群的关键机制。 + +KubeBlocks 在部署 RabbitMQ 集群时会自动创建具有相应权限(Roles)的服务账户(SA)。 + +::: + +## 验证集群状态 + +当部署一个包含3个副本的RabbitMQ集群时,请通过以下方式确认部署成功: + +1. 集群状态为`Running` +2. 所有Pod均正常运行 + +可通过以下任一方式检查状态: + + + +```bash +kubectl get cluster rabbitmq-cluster -n demo -w +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +rabbitmq-cluster rabbitmq Delete Creating 27s +rabbitmq-cluster rabbitmq Delete Running 64s + +kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 92s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 77s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 63s +``` + + + + + 若已安装`kbcli`,可查看完整的集群信息: + +```bash +kbcli cluster describe rabbitmq-cluster -n demo + +名称: rabbitmq-cluster 创建时间: 2025年5月18日 23:05 UTC+0800 +命名空间 集群定义 拓扑模式 状态 终止策略 +demo rabbitmq clustermode Running Delete + +访问端点: +组件 内部地址 外部地址 +rabbitmq rabbitmq-cluster-rabbitmq.demo.svc.cluster.local:5672 <无> + rabbitmq-cluster-rabbitmq.demo.svc.cluster.local:15672 + +拓扑结构: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-0 <无> Running zone-x x.y.z 2025年5月18日 23:05 UTC+0800 +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-1 <无> Running zone-x x.y.z 2025年5月18日 23:06 UTC+0800 +rabbitmq 3.17.7 rabbitmq-cluster-rabbitmq-2 <无> Running zone-x x.y.z 2025年5月18日 23:06 UTC+0800 + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +rabbitmq 500m / 500m 512Mi / 512Mi data:20Gi <无> + +镜像信息: +组件 组件定义 镜像 +rabbitmq rabbitmq-1.0.0 docker.io/library/rabbitmq:3.13.7-management + +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间 + +查看集群事件: kbcli cluster list-events -n demo rabbitmq-cluster +``` + + + + +## 访问 RabbitMQ 管理控制台 + +**获取凭证** +用户名和密码存储在名为 `--account-` 的集群 Secret 中。本例中 Secret 名称为 `rabbitmq-cluster-rabbitmq-account-root`。 + +```bash +# 获取用户名 +NAME=$(kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.username}' | base64 -d) +# 获取密码 +PASSWD=$(kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.password}' | base64 -d) +``` + +**端口转发服务** + +```bash +kubectl port-forward svc/rabbitmq-cluster-rabbitmq -ndemo 15672:15672 +``` + +**访问管理控制台** +使用获取的用户名和密码,通过 `http://:/` 地址登录 RabbitMQ 管理控制台。 + +## 停止 RabbitMQ 集群 + +停止集群会暂时暂停运行,同时保留所有数据和配置: + +**关键影响:** +- 计算资源(Pod)将被释放 +- 持久化存储(PVC)保持完整 +- 服务定义得以保留 +- 集群配置不会丢失 +- 运营成本降低 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-stop + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: Stop + ``` + + + + 也可以通过设置 `spec.componentSpecs.stop` 为 true 来停止集群: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: rabbitmq + stop: true # 设置为 true 以停止组件 + replicas: 3 + ``` + + + +## 启动 RabbitMQ 集群 + +重启已停止的集群将恢复所有数据和配置,继续正常运行。 + +**关键影响:** +- 计算资源(Pod)会被重新创建 +- 服务将再次可用 +- 集群恢复到之前的状态 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/rabbitmq/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-start + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: Start + ``` + + + + 通过将 `spec.componentSpecs.stop` 设置为 false 来重启集群: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + +## 删除 RabbitMQ 集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略类型 | 删除的资源 | 数据清除情况 | 适用场景 | +|-----------------|---------------------|--------------------|------------------------| +| DoNotTerminate | 无 | 无 | 关键生产集群 | +| Delete | 所有资源 | PVC存储卷被删除 | 非关键环境 | +| WipeOut | 所有资源 | 全部数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保已存在必要的备份 +3. 验证terminationPolicy配置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster rabbitmq-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster rabbitmq-cluster -n demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..686c63c4 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,286 @@ +--- +description: 了解如何在KubeBlocks中管理RabbitMQ集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- RabbitMQ +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: RabbitMQ 集群生命周期管理(停止、启动、重启) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# RabbitMQ 集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 RabbitMQ 集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运维成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|--------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护窗口 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 RabbitMQ 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 RabbitMQ 集群将: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 维持集群配置 + +此操作适用于: +- 临时节省成本 +- 维护窗口期 +- 开发环境暂停 + + + + + +选项 1:使用 OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-stop-ops + namespace: demo +spec: + clusterName: rabbitmq-cluster + type: Stop +``` + + + + +选项 2:使用 Cluster API 补丁 + +通过修改 stop 字段直接调整集群规格: + +```bash +kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +} +]' +``` + + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Stopping 6m3s + rabbitmq-cluster rabbitmq Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -l app.kubernetes.io/instance=rabbitmq-cluster -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE + data-rabbitmq-cluster-rabbitmq-0 Bound pvc-uuid 20Gi RWO 22m + data-rabbitmq-cluster-rabbitmq-1 Bound pvc-uuid 20Gi RWO 21m + data-rabbitmq-cluster-rabbitmq-2 Bound pvc-uuid 20Gi RWO 21m + ``` + +### 启动集群 + +启动已停止的 RabbitMQ 集群将: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: rabbitmq-cluster + type: Start +``` + + + + + +修改集群规格以恢复运行: +1. 设置 stop: false,或 +2. 完全移除 stop 字段 + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + } + ]' + ``` + + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Updating 24m + rabbitmq-cluster rabbitmq Delete Running 24m + rabbitmq-cluster rabbitmq Delete Running 24m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + rabbitmq-cluster-rabbitmq-0 2/2 Running 0 55s + rabbitmq-cluster-rabbitmq-1 2/2 Running 0 44s + rabbitmq-cluster-rabbitmq-2 2/2 Running 0 33s + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +适用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**使用 OpsRequest API** + +针对特定组件 `rabbitmq` 进行重启: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: rabbitmq-cluster-restart-ops + namespace: demo +spec: + clusterName: rabbitmq-cluster + type: Restart + restart: + - componentName: rabbitmq +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest rabbitmq-cluster-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 0/3 4s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 1/3 28s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 2/3 56s + rabbitmq-cluster-restart-ops Restart rabbitmq-cluster Running 2/3 109s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster + ``` + 注意:重启后 Pod 将显示新的创建时间戳 + +3. 验证组件健康状态: + ```bash + kbcli cluster describe rabbitmq-cluster -n demo + ``` + +操作完成后,集群将返回 Running 状态。 + +## 总结 +在本指南中,您学会了如何: +1. 停止 RabbitMQ 集群以暂停运行,同时保留持久化存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重建其 Pod,而无需停止整个集群 + +通过管理 RabbitMQ 集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了执行这些操作的无缝方式,确保高可用性和最小化中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..9f0f9b6d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,177 @@ +--- +description: 了解如何在KubeBlocks管理的RabbitMQ集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- RabbitMQ +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: RabbitMQ 集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks对RabbitMQ集群进行垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的RabbitMQ集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改RabbitMQ实例的计算资源(CPU和内存)但保持副本数不变。主要特点: + +- **无中断性**:正确配置时可在扩缩容期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:可根据需求进行扩容或缩容 + +KubeBlocks通过遵循受控的、角色感知的更新策略确保扩缩容操作影响最小: +**角色感知副本(主/从副本)** +- 从副本优先更新 - 非主节点Pod先升级以最小化影响 +- 主副本最后更新 - 仅当所有从副本健康后才重启主Pod +- 集群状态在所有副本稳定后从"更新中"转为"运行中" + +**无角色副本(基于序号的扩缩容)** +若副本未定义角色,更新遵循Kubernete Pod序号顺序: +- 最高序号优先(如pod-2 → pod-1 → pod-0)以确保确定性滚动更新 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署RabbitMQ集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. Pod按序号从高到低顺序更新(如pod-2 → pod-1 → pod-0) +1. 集群状态从"更新中"转为"运行中" + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为rabbitmq组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-vscale-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: VerticalScaling + verticalScaling: + - componentName: rabbitmq + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + + 可通过以下命令查看扩缩容操作进度: + + ```bash + kubectl -n demo get ops rabbitmq-cluster-vscale-ops -w + ``` + + 预期结果: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 0/3 32s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 1/3 55s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 2/3 82s + rabbitmq-cluster-vscale-ops VerticalScaling rabbitmq-cluster Running 3/3 2m13s + ``` + + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段来调整垂直扩缩容资源。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 3 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段进行扩缩容 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有其他操作在进行 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩容后:** +- 监控资源利用率和应用性能 +- 根据需要调整RabbitMQ参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe rabbitmq-cluster -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +rabbitmq 1 / 1 1Gi / 1Gi data:20Gi +``` + +## KubeBlocks垂直扩缩容的核心优势 +- 无缝扩缩容:按特定顺序重建Pod确保最小影响 +- 动态资源调整:根据工作负载需求轻松调整CPU和内存 +- 灵活性:可选择OpsRequest动态扩缩容或直接API更新精确控制 +- 提升可用性:扩缩过程中集群保持运行,维持高可用性 + +## 清理 +删除所有创建的资源,包括RabbitMQ集群及其命名空间: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您学习了如何: +1. 部署KubeBlocks管理的RabbitMQ集群 +2. 通过增减rabbitmq组件资源进行垂直扩缩容 +3. 使用OpsRequest和直接Cluster API更新两种方式调整资源分配 + +垂直扩缩容是优化资源利用率和适应工作负载变化的强大工具,可确保RabbitMQ集群保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..a9ad376b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,240 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的RabbitMQ集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- RabbitMQ +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks实现RabbitMQ集群的水平扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks实现RabbitMQ集群水平扩缩容 + +本指南介绍如何对KubeBlocks管理的RabbitMQ集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用**OpsRequest**和直接修改**Cluster API**两种方式实现这一目标。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署RabbitMQ集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + +## 扩容(增加副本数) + +**预期工作流程**: + +1. 新Pod被创建,状态从`Pending`转变为`Running` +2. 集群状态从`Updating`转变为`Running` + +:::note + +RabbitMQ的仲裁队列基于**Raft共识算法**设计。 +建议保持奇数个副本(如3、5、7个),以避免在集群扩缩容后出现脑裂情况。 + +::: + + + + + + + 选项1:使用水平扩容OpsRequest + + 为rabbitmq组件增加1个副本实现集群扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-scale-out-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + # 指定组件扩容的副本变化 + scaleOut: + # 指定组件的副本变化 + # 当前组件增加1个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops rabbitmq-cluster-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Running 0/1 9s + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Running 1/1 16s + rabbitmq-cluster-scale-out-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 16s + ``` + + + + + 选项2:直接修改Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 4 # 增加副本数实现扩容 + ... + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 4}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新Pod被创建,RabbitMQ集群状态从`Updating`转变为`Running`,且新创建的Pod具有`secondary`角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 6m24s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 7m19s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 5m57s +rabbitmq-cluster-rabbitmq-3 2/2 Running 0 3m54s +``` + +## 缩容(减少副本数) + +**预期工作流程**: + +1. 选择序号最大的副本进行移除 +3. Pod被优雅终止 +4. 集群状态从`Updating`转变为`Running` + + + + + + 选项1:使用水平缩容OpsRequest + + 为rabbitmq组件减少1个副本实现集群缩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-scale-in-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + # 指定组件缩容的副本变化 + scaleIn: + # 指定组件的副本变化 + # 当前组件减少1个副本 + replicaChanges: 1 + ``` + + 监控操作进度: + ```bash + kubectl get ops rabbitmq-cluster-scale-in-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Running 0/1 8s + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Running 1/1 24s + rabbitmq-cluster-scale-in-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 24s + ``` + + + + + 选项2:直接修改Cluster API + + 您也可以直接修改Cluster资源中的`replicas`字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 2 # 减少副本数实现缩容 + ``` + + 或者使用命令修补集群CR: + + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 2}]' + ``` + + + + +### 验证缩容结果 + +示例输出(保留1个Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 18m +``` + +## 最佳实践 + +执行水平扩缩容时: +- 尽可能选择业务低峰期进行操作 +- 扩缩容过程中监控集群健康状态 +- 扩容前确保有足够的资源 +- 考虑新副本的存储需求 + +## 清理资源 +删除RabbitMQ集群及其命名空间以清除所有创建的资源: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中您学会了: +- 执行扩容操作为RabbitMQ集群增加副本 +- 执行缩容操作为RabbitMQ集群减少副本 +- 使用OpsRequest和直接修改Cluster API两种方式进行水平扩缩容 + +KubeBlocks确保您的数据库操作在最小影响下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..6be8f194 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/04-volume-expansion.mdx @@ -0,0 +1,219 @@ +--- +description: 了解如何在KubeBlocks管理的RabbitMQ集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- RabbitMQ +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: 扩展 RabbitMQ 集群的存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# RabbitMQ 集群存储卷扩容指南 + +本文档介绍如何在 **KubeBlocks** 管理的 RabbitMQ 集群中扩展持久卷声明(PVC)。存储卷扩容功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持时,此操作可在不中断服务的情况下执行。 + +存储卷扩容允许您在创建持久卷声明(PVC)后增加其容量大小。该功能在 Kubernetes v1.11 中引入,并在 v1.24 版本正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持扩容 + +列出所有可用存储类,通过检查 `ALLOWVOLUMEEXPANSION` 字段确认是否支持卷扩容: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +请确保您使用的存储类 `ALLOWVOLUMEEXPANSION` 值为 true。若为 false,则表示该存储类不支持卷扩容。 + +## 使用支持扩容的存储类部署 RabbitMQ 集群 + +KubeBlocks 采用声明式方式管理 RabbitMQ 集群。以下是部署 3 副本 RabbitMQ 集群的配置示例。 + +应用以下 YAML 配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: rabbitmq + topology: cluster + componentSpecs: + - name: rabbitmq + serviceVersion: 1.10.0 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩容的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `storageClassName`: 指定支持卷扩容的存储类名称。若未设置,将使用标记为 default 的 StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION 注意事项** + +创建集群时,请确保所用存储类支持卷扩容(检查 `ALLOWVOLUMEEXPANSION` 字段)。 + +::: + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 执行存储卷扩容 + +:::note +1. 确保存储类支持卷扩容(检查 `ALLOWVOLUMEEXPANSION`) +2. 新容量必须大于当前容量 +3. 根据存储提供商不同,卷扩容可能需要额外配置 +::: + +可通过以下两种方式扩容存储卷: + + + + + 方法一:使用 VolumeExpansion OpsRequest + + 应用以下 YAML 为 rabbitmq 组件扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expand-volume-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: VolumeExpansion + volumeExpansion: + - componentName: rabbitmq + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩容进度: + + ```bash + kubectl describe ops rabbitmq-cluster-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 操作完成后,PVC 容量将更新。 + + :::note + 若使用的存储类不支持扩容,OpsRequest 将快速失败并提示类似信息: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 方法二:直接修改 Cluster API + + 您也可以直接更新 `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` 字段: + + ```yaml + componentSpecs: + - name: rabbitmq + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量(必须大于当前值) + storage: 30Gi + ``` + KubeBlocks 将根据新配置自动更新 PVC 容量。 + + + +## 验证扩容结果 + +检查更新后的集群配置: +```bash +kbcli cluster describe rabbitmq-cluster -n demo +``` +预期输出: +```bash +资源分配情况: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +rabbitmq 500m / 500m 512Mi / 512Mi data:30Gi +``` +data PVC 的存储容量已更新为指定值(本例中为 30Gi)。 + +确认 PVC 扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` +预期输出: +```bash +名称 状态 卷名 容量 访问模式 存储类 创建时间 +rabbitmq-cluster-rabbitmq-data-0 Bound pvc-uuid 30Gi RWO 33m +rabbitmq-cluster-rabbitmq-data-1 Bound pvc-uuid 30Gi RWO 33m +rabbitmq-cluster-rabbitmq-data-2 Bound pvc-uuid 30Gi RWO 33m +``` + +## 清理资源 +删除 RabbitMQ 集群及相关命名空间以释放所有资源: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## 总结 + +通过本指南您已学习: +1. 如何验证存储类对卷扩容的支持情况 +2. 两种扩容方法: + - 使用 OpsRequest 进行动态更新 + - 通过 Cluster API 手动更新 +3. 如何验证 PVC 新容量及确认扩容操作完成 + +借助存储卷扩容功能,您可以高效扩展 RabbitMQ 集群的存储容量而无需服务中断,确保数据库能够随应用需求同步增长。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..3e5640bc --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,330 @@ +--- +description: 了解如何在KubeBlocks中通过负载均衡器(LoadBalancer)及其他服务类型配置和管理RabbitMQ服务,实现内外部访问。 +keywords: +- KubeBlocks +- RabbitMQ +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理RabbitMQ服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建与销毁RabbitMQ服务 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 声明式集群 API 管理 RabbitMQ 服务 + +本指南提供了逐步操作说明,指导如何对外部和内部暴露由 KubeBlocks 管理的 RabbitMQ 服务。您将学习如何: +- 使用云服务提供商的 LoadBalancer 服务配置外部访问 +- 管理内部服务 +- 在不再需要时正确禁用外部暴露功能 + + + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 部署 RabbitMQ 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + + + +## 查看网络服务 +列出为 RabbitMQ 集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +示例服务输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rabbitmq-cluster-rabbitmq ClusterIP 10.96.6.67 5672/TCP,15672/TCP 33m +``` + +## 暴露 RabbitMQ 服务 + +外部服务地址允许公网访问 RabbitMQ,而内部服务地址将访问限制在用户的 VPC 内。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云成本 | 安全性 | +|------|----------|------------|----------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + 选项1:使用 OpsRequest + + 要通过 LoadBalancer 将 RabbitMQ 服务暴露到外部,创建一个 OpsRequest 资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: rabbitmq-cluster + expose: + - componentName: rabbitmq + services: + - name: internet + # 决定服务如何暴露。默认为 'ClusterIP'。 + # 有效选项为 'ClusterIP'、'NodePort' 和 'LoadBalancer'。 + serviceType: LoadBalancer + ports: + - name: managment + port: 15672 + targetPort: management + # 如果 ServiceType 是 LoadBalancer,则包含云提供商相关参数。 + # 以下是 AWS EKS 的示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 或 "true" 表示内部 VPC IP + switch: Enable + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops rabbitmq-cluster-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-expose-enable-ops Expose rabbitmq-cluster Succeed 1/1 31s + ``` + + + + + + 选项2:使用 Cluster API + + 或者,更新 Cluster 资源中的 `spec.services` 部分以包含 LoadBalancer 服务: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: rabbitmq-cluster + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: rabbitmq + # 暴露一个外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 或 "true" 表示内部 VPC IP + componentSelector: rabbitmq + name: rabbitmq-internet + serviceName: rabbitmq-internet + spec: # 定义 K8s 服务的行为。 + ipFamilyPolicy: PreferDualStack + ports: + - name: tcp-rabbitmq + # 暴露的端口 + port: 15672 # 端口 15672 用于 rabbitmq 管理控制台 + protocol: TCP + targetPort: management + type: LoadBalancer + componentSpecs: + ... + ``` + 上述 YAML 配置在 services 部分添加了一个新的外部服务。此 LoadBalancer 服务包含 AWS 网络负载均衡器 (NLB) 的注解。 + + :::note + 云提供商注解 + + 使用 LoadBalancer 服务时,必须包含特定于云提供商的适当注解。以下是不同云提供商的常用注解列表: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 使用 "false" 表示面向互联网的 LoadBalancer + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 使用 "false" 表示面向互联网的 LoadBalancer + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 将 LoadBalancer 限制为仅内部 VPC 访问。默认情况下,如果未指定,则为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网的 LoadBalancer 的优化 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 使用 "intranet" 表示内部面向的 LoadBalancer + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal` 注解控制 LoadBalancer 是内部还是面向互联网的。请注意,此注解在服务创建后无法动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 使用 "true" 表示内部 VPC IP + ``` + 如果在服务创建后将此注解从 "false" 更改为 "true",注解可能会在服务对象中更新,但 LoadBalancer 仍将保留其公共 IP。 + + 要正确修改此行为: + - 首先,删除现有的 LoadBalancer 服务。 + - 使用更新的注解重新创建服务 (`service.beta.kubernetes.io/aws-load-balancer-internal`: "true")。 + - 等待新的 LoadBalancer 配置正确的内部或外部 IP。 + ::: + + + 使用以下命令等待 Cluster 状态变为 Running: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情以确认 LoadBalancer 服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +rabbitmq-cluster-rabbitmq-internet LoadBalancer 172.20.60.24 15672:31243/TCP 1m +``` + +## 访问 RabbitMQ 管理控制台 + +**获取凭证** + +KubeBlocks 会自动创建一个包含 RabbitMQ 管理员凭证的 Secret。通过以下命令获取凭证: +```bash +NAME=`kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo rabbitmq-cluster-rabbitmq-account-root -o jsonpath='{.data.password}' | base64 -d` +``` + +**访问管理控制台** + +使用获取的用户名和密码,通过 `http://:/` 地址登录 RabbitMQ 管理控制台。 + +## 禁用外部暴露 + + + + + + 选项一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-expose-disable-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + expose: + - componentName: rabbitmq + services: + - name: internet + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops rabbitmq-cluster-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + rabbitmq-cluster-expose-disable-ops Expose rabbitmq-cluster Succeed 1/1 24s + ``` + + + + + + 选项二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster rabbitmq-cluster -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster rabbitmq-cluster -n demo -w + ``` + + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + rabbitmq-cluster rabbitmq Delete Running 44m + ``` + + + +### 验证服务移除 + +确保 'rabbitmq-cluster-rabbitmq-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +预期结果:'rabbitmq-cluster-rabbitmq-internet' 服务应被移除。 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除RabbitMQ集群及其所在的命名空间: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +``` + +## 概述 +本指南演示了如何: +- 使用 KubeBlocks 对外或对内暴露 RabbitMQ 服务 +- 通过云服务商特定注解配置负载均衡器服务 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问,实现服务的启用或禁用 + +KubeBlocks 为 Kubernetes 环境中的 RabbitMQ 服务管理提供了灵活且简化的解决方案。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..711b18f0 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,149 @@ +--- +description: 了解如何对由KubeBlocks管理的RabbitMQ集群中特定Pod执行下线(停用)操作。 +keywords: +- KubeBlocks +- RabbitMQ +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 下线 RabbitMQ 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的RabbitMQ集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 管理的 RabbitMQ 集群中的特定 Pod + +本指南介绍如何在 KubeBlocks 管理的 RabbitMQ 集群中下线(停用)特定 Pod。下线操作可在保持可用性的同时实现对集群资源的精确控制,适用于工作负载再平衡、节点维护或故障处理等场景。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法下线特定 Pod。StatefulSet 会确保 Pod 的顺序和身份标识,缩容操作总是移除序号最高的 Pod(例如从 3 个副本缩容时,会优先移除 `Pod-2`)。这一限制导致无法精确控制要下线的 Pod,使得维护、工作负载分配或故障处理变得复杂。 + +KubeBlocks 通过允许管理员直接下线特定 Pod 来突破这一限制。这种细粒度控制既能确保高可用性,又能在不中断整个集群的情况下实现更好的资源管理。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 RabbitMQ 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 下线 Pod + +**预期工作流程**: +1. 从 `onlineInstancesToOffline` 指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转为 `Running` + +在下线组件中的特定 Pod 前,请确保该组件拥有多个副本。若不符合条件,请先进行扩容操作。 + +例如,可以通过以下命令修改集群 CR,将 querynode 组件的副本数声明为 3: + +```bash +kubectl patch cluster milvus-cluster -n demo --type='json' -p='[ + { + "op": "replace", + "path": "/spec/componentSpecs/2/replicas", + "value": 3 + } +]' +``` + +要下线特定 Pod(如 'rabbitmq-cluster-rabbitmq-1'),可采用以下任一方法: + + + + + + 方法一:使用 OpsRequest + + 创建 OpsRequest 将 Pod 标记为下线状态: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: rabbitmq-cluster-decommission-ops + namespace: demo + spec: + clusterName: rabbitmq-cluster + type: HorizontalScaling + horizontalScaling: + - componentName: rabbitmq + scaleIn: + onlineInstancesToOffline: + - 'rabbitmq-cluster-rabbitmq-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作的执行状态: + + ```bash + kubectl get ops rabbitmq-cluster-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Running 0/1 8s +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Running 1/1 31s +rabbitmq-cluster-decommission-ops HorizontalScaling rabbitmq-cluster Succeed 1/1 31s + ``` + + + + + + 方法二:使用 Cluster API + + 也可直接更新 Cluster 资源来下线指定 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: rabbitmq + replicas: 2 # 下线后的预期副本数 + offlineInstances: + - rabbitmq-cluster-rabbitmq-1 # <----- 指定待下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=rabbitmq-cluster +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 25m +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 24m +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制 +- KubeBlocks 支持定向 Pod 下线 +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保持可用性的同时,提供了细粒度的集群管理能力。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..17aee427 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,226 @@ +--- +description: 了解如何在KubeBlocks中通过Prometheus Operator为RabbitMQ集群配置可观测性。设置监控并通过Grafana实现指标可视化。 +keywords: +- KubeBlocks +- RabbitMQ +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: RabbitMQ 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 RabbitMQ 集群的可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 RabbitMQ + +本指南演示如何在 KubeBlocks 中为 RabbitMQ 集群配置全面的监控方案: + +1. 使用 Prometheus Operator 进行指标采集 +2. 通过内置 RabbitMQ exporter 暴露指标 +3. 使用 Grafana 实现可视化监控 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 安装监控组件栈 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 RabbitMQ 集群 + +import CreateCluster from '../_tpl/_create-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-cluster.mdx' + + + +## 配置指标采集 + +### 1. 验证 Exporter 端点 + +```bash +# 端口转发 +kubectl -n demo port-forward pods/rabbitmq-cluster-rabbitmq-0 15692:15692 +# 检查指标 +curl -s http://127.0.0.1:15692/metrics | head -n 50 +``` + +### 2. 创建 PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: rabbitmq-cluster-pod-monitor + namespace: demo + labels: # 必须与 'prometheus.spec.podMonitorSelector' 中的设置匹配 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # 定义从关联的 Kubernetes Pod 对象传输到采集指标的标签 + # 根据实际需求设置标签 + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: prometheus # 必须与 exporter 端口名称匹配 + scheme: http + namespaceSelector: + matchNames: + - demo # 目标命名空间 + selector: + matchLabels: + app.kubernetes.io/instance: rabbitmq-cluster +``` +**PodMonitor 配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与 exporter 端口名称 ('http-metrics') 匹配 | +| `namespaceSelector` | 是 | 指定 RabbitMQ 运行的命名空间 | +| `labels` | 是 | 必须与 Prometheus 的 podMonitorSelector 匹配 | +| `path` | 否 | 指标端点路径 (默认: /metrics) | +| `interval` | 否 | 采集间隔 (默认: 30s) | + +## 验证监控配置 + +### 1. 检查 Prometheus 采集目标 +端口转发并访问 Prometheus UI: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +浏览器访问: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的采集任务(任务名应为 'demo/rabbitmq-cluster-pod-monitor')。 + +预期状态: +- 目标状态应为 UP +- 目标标签应包含 podTargetLabels 中定义的标签(如 'app_kubernetes_io_instance') + +### 2. 测试指标采集 +验证指标是否被正确采集: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=up{app_kubernetes_io_instance="rabbitmq-cluster"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "rabbitmq-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "rabbitmq", + "apps_kubeblocks_io_pod_name": "rabbitmq-cluster-rabbitmq-0", + "container": "rabbitmq", + "endpoint": "prometheus", + "instance": "10.244.0.78:15692", + "job": "kubeblocks", + "namespace": "demo", + "pod": "rabbitmq-cluster-rabbitmq-0" + }, + "value": [ + 1747622160.396, + "1" + ] + }, + { + "metric": { + "__name__": "up", + "app_kubernetes_io_instance": "rabbitmq-cluster", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "rabbitmq", + "apps_kubeblocks_io_pod_name": "rabbitmq-cluster-rabbitmq-1", + "container": "rabbitmq", + "endpoint": "prometheus", + "instance": "10.244.0.80:15692", + "job": "kubeblocks", + "namespace": "demo", + "pod": "rabbitmq-cluster-rabbitmq-1" + }, + "value": [ + 1747622160.396, + "1" + ] + } + ] + } +} +``` +## Grafana 可视化 + +### 1. 访问 Grafana +端口转发并登录: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +浏览器访问 http://localhost:3000,使用默认凭据登录: +- 用户名: 'admin' +- 密码: 'prom-operator' (默认) + +### 2. 导入仪表板 +导入 KubeBlocks RabbitMQ 仪表板: + +1. 在 Grafana 中导航至 "+" → "Import" +2. 从 [Grafana RabbitMQ-Overview](https://grafana.com/grafana/dashboards/10991-rabbitmq-overview/) 导入仪表板 + +![rabbitmq-monitoring-grafana-dashboard.png](/img/docs/en/rabbitmq-monitoring-grafana-dashboard.png) + +## 清理资源 +执行以下命令删除所有创建的资源: +```bash +kubectl delete cluster rabbitmq-cluster -n demo +kubectl delete ns demo +kubectl delete podmonitor rabbitmq-cluster-pod-monitor -n demo +``` + +## 总结 +本教程演示了如何在 KubeBlocks 中使用 Prometheus Operator 为 RabbitMQ 集群建立可观测性方案。通过配置 `PodMonitor`,我们实现了 Prometheus 对 RabbitMQ exporter 指标的采集,最终在 Grafana 中实现了指标可视化。该方案为监控 RabbitMQ 数据库的健康状态和性能表现提供了重要洞察。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/_category_.yml b/docs/zh/preview/kubeblocks-for-rabbitmq/_category_.yml new file mode 100644 index 00000000..b3485d4e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for RabbitMQ 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_category_.yml new file mode 100644 index 00000000..82d8374c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx new file mode 100644 index 00000000..c00fbadd --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_create-cluster.mdx @@ -0,0 +1,36 @@ +KubeBlocks 采用声明式方式管理 RabbitMQ 集群。以下是一个部署包含 3 个副本的 RabbitMQ 集群的配置示例。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: rabbitmq-cluster + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: rabbitmq + topology: clustermode + componentSpecs: + - name: rabbitmq + serviceVersion: 3.13.7 + replicas: 3 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..69249862 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续之前,请确保满足以下条件: +- 环境准备: + - 已有一个运行中的 Kubernetes 集群。 + - 已配置 kubectl CLI 工具与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。具体安装步骤请参考链接指引。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx new file mode 100644 index 00000000..be87eac7 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-rabbitmq/_tpl/_verify-cluster.mdx @@ -0,0 +1,33 @@ +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster rabbitmq-cluster -n demo -w +``` + +预期输出: + +```bash +kubectl get cluster rabbitmq-cluster -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +rabbitmq-cluster rabbitmq Delete Creating 15s +rabbitmq-cluster rabbitmq Delete Running 83s +``` + +检查 Pod 状态及其角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=rabbitmq-cluster -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +rabbitmq-cluster-rabbitmq-0 2/2 Running 0 106s +rabbitmq-cluster-rabbitmq-1 2/2 Running 0 82s +rabbitmq-cluster-rabbitmq-2 2/2 Running 0 47s +``` + +当集群状态显示为 Running 时,表示您的 RabbitMQ 集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/01-overview.mdx b/docs/zh/preview/kubeblocks-for-redis/01-overview.mdx new file mode 100644 index 00000000..ca4fa377 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/01-overview.mdx @@ -0,0 +1,72 @@ +--- +description: 了解KubeBlocks Redis插件的功能特性,包括部署拓扑、生命周期管理、备份恢复以及支持的版本。 +keywords: +- Redis +- KubeBlocks +- database +- features +- lifecycle management +- backup +- restore +sidebar_label: 概述 +sidebar_position: 1 +title: KubeBlocks Redis 插件概述 +--- +# KubeBlocks Redis 插件概述 + +Redis 是一款开源(BSD 许可)的内存数据结构存储系统,可用作数据库、缓存和消息代理。本文展示如何通过 KubeBlocks 在 Kubernetes 中管理 Redis。 + +## 核心特性 + +### 支持拓扑架构 + +| 拓扑类型 | 数据分布 | 扩展性 | 高可用性 | 适用场景 | +|----------------|------------------|--------------|---------------|-----------------------------| +| **单机模式** | 单节点存储 | 不支持 | 不支持 | 开发测试环境、小型数据集 | +| **主从复制**(含哨兵) | 主从节点数据复制 | 读扩展 | 支持 | 读密集型场景、需要数据冗余 | +| **集群模式** | 分片存储 | 读写扩展 | 支持 | 大型数据集、高并发生产环境 | + +### 生命周期管理 + +KubeBlocks 提供完善的 Redis 生命周期管理能力: + +| 功能 | 说明 | +|---------------------------|---------------------------------------------------------------------| +| **水平扩展** | 动态增减副本数量以调整容量 | +| **垂直扩展** | 调整 Redis 实例的 CPU/内存资源 | +| **存储卷扩容** | 无需停机动态扩展存储容量 | +| **重启操作** | 以最小影响执行受控集群重启 | +| **启停控制** | 临时暂停/恢复集群运行 | +| **密码管理** | 创建时可设置并管理 Redis 集群的 root 密码 | +| **动态配置** | 无需重启即可修改 Redis 参数 | +| **自定义服务** | 暴露专用数据库访问端点 | +| **主从切换** | 计划内的主从角色变更 | +| **副本管理** | 安全下线或重建特定副本 | +| **版本升级** | 无缝执行次版本升级 | +| **高级调度** | 自定义 Pod 部署位置和资源分配 | +| **TLS 加密** | 启用/禁用传输层安全协议 | +| **监控指标** | 集成 Prometheus 指标采集 | +| **日志收集** | 通过 Loki Stack 实现集中式日志管理 | + +### 备份与恢复 + +KubeBlocks 支持多种 Redis 备份策略: + +| 功能 | 方式 | 说明 | +|-------------|--------|------------| +| 全量备份 | 数据文件 | 使用 `redis-cli BGSAVE` 命令备份数据 | +| 持续备份 | AOF 文件 | 通过归档 Append-Only 文件实现持续增量备份 | + +### 支持版本 + +KubeBlocks Redis 插件支持以下版本: + +| 主版本 | 支持的次版本 | +|--------|--------------------| +| 7.0 | 7.0.6 | +| 7.2 | 7.2.4, 7.2.7 | + +可通过以下命令查询支持的完整版本列表: +```bash +kubectl get cmpv redis +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/02-quickstart.mdx b/docs/zh/preview/kubeblocks-for-redis/02-quickstart.mdx new file mode 100644 index 00000000..da34a98e --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/02-quickstart.mdx @@ -0,0 +1,561 @@ +--- +description: KubeBlocks 部署与管理 Redis 复制集群的完整指南,涵盖安装、配置及运维最佳实践。 +keywords: +- Kubernetes +- Redis +- KubeBlocks +- Helm +- Cluster Management +- QuickStart +sidebar_label: 快速入门 +sidebar_position: 2 +title: Redis 快速入门 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis 快速入门 + +本指南将全面介绍如何使用 **KubeBlocks Redis 插件** 部署和管理 Redis 复制集群,内容包括: +- 系统前提条件与插件安装 +- 集群创建与配置 +- 启停操作管理 +- 连接方式与集群监控 + +## 前提条件 + +### 系统要求 + +开始前请确保环境满足以下要求: + +- 可用的 Kubernetes 集群(推荐 v1.21+ 版本) +- 已安装配置 `kubectl` v1.21+ 并具备集群访问权限 +- 已安装 Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks)) + +### 验证 Redis 插件 + +Redis 插件默认随 KubeBlocks 安装。检查其状态: + +```bash +helm list -n kb-system | grep redis +``` + +
+示例输出: + +```bash +NAME NAMESPACE REVISION UPDATED STATUS CHART +kb-addon-redis kb-system 1 2025-05-21 deployed redis-1.0.0 +``` +
+ +若插件未启用,可选择安装方式: + + + + + ```bash + # 添加 Helm 仓库 + helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + # 中国大陆用户若 GitHub 访问困难或缓慢,可使用此替代仓库: + #helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + + # 更新 Helm 仓库 + helm repo update + # 搜索可用插件版本 + helm search repo kubeblocks/redis --versions + # 安装指定版本(将 替换为所选版本) + helm upgrade -i kb-addon-redis kubeblocks-addons/redis --version -n kb-system + ``` + + + + + ```bash + # 添加索引(kubeblocks 默认已添加) + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + # 更新索引 + kbcli addon index update kubeblocks + # 更新所有索引 + kbcli addon index update --all + ``` + + 搜索并安装插件: + + ```bash + # 搜索插件 + kbcli addon search redis + # 安装指定版本插件(将 替换为所选版本) + kbcli addon install redis --version + ``` + **示例输出:** + ```bash + ADDON VERSION INDEX + redis 0.9.0 kubeblocks + redis 0.9.1 kubeblocks + redis 1.0.0 kubeblocks + ``` + 启用或禁用插件: + + ```bash + # 启用插件 + kbcli addon enable redis + # 禁用插件 + kbcli addon disable redis + ``` + + + + +:::note +**版本兼容性** + +请始终确保 Redis 插件版本与 KubeBlocks 主版本匹配以避免兼容性问题。 + +::: + +### 验证支持的 Redis 版本 + +**列出可用 Redis 版本:** + +```bash +kubectl get cmpv redis +``` +
+示例输出 +```text +NAME VERSIONS STATUS AGE +redis 7.2.7,7.2.4,7.0.6 Available 33d +``` +
+ +**检查 ComponentDefinitions 的版本兼容性** + +**步骤 1.** 获取与给定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv redis -ojson | jq -r '.metadata.annotations."componentversion.kubeblocks.io/compatible-definitions"' | tr ',' '\n' +``` + +
+示例输出 +```text +redis-7-1.0.0 +``` +
+ +**步骤 2.** 获取与给定 `ComponentVersion` 关联的 `ComponentDefinition` 列表 + +```bash +kubectl get cmpv redis -o json | jq -r '.spec.compatibilityRules[] | select(.compDefs | any(startswith("^redis-7"))) | .releases[]' +``` + +这将返回与名为 `redis-14` 的 `ComponentDefinition` 兼容的版本: + +
+示例输出 +```text +7.2.7 +7.2.4 +7.0.6 +``` +
+ +### 存储配置 + +Redis 需要持久化存储。验证可用选项: + +```bash +kubectl get storageclass +``` + +推荐存储特性: +- 最小 20Gi 容量 +- ReadWriteOnce 访问模式 +- 支持存储卷扩展 +- 满足工作负载的性能需求 + + +## 部署 Redis 复制集群 + +使用默认配置部署基础 Redis 复制集群: + +```bash +kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/cluster.yaml +``` + +此操作将创建: +- 包含两个组件的 Redis 复制集群:Redis(2 副本)和 Redis Sentinel(3 副本) +- 默认资源分配(0.5 CPU,0.5Gi 内存) +- 20Gi 持久化存储 +- 自动主从配置 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + # 指定删除 Cluster 时的行为 + # 有效选项:[DoNotTerminate, Delete, WipeOut](KB 0.9 起弃用 `Halt`) + # - `DoNotTerminate`:阻止删除 Cluster。此策略确保所有资源保持完整 + # - `Delete`:扩展 `Halt` 策略,同时移除 PVC,实现彻底清理并删除所有持久化数据 + # - `WipeOut`:激进策略,删除所有 Cluster 资源,包括外部存储中的卷快照和备份 + # 这将导致数据完全删除,应谨慎使用,主要在非生产环境以避免不可逆数据丢失 + terminationPolicy: Delete + # 指定创建 Cluster 时使用的 ClusterDefinition 名称 + # 注意:请勿更新此字段 + # 值必须为 `redis` 才能创建 Redis 集群 + clusterDef: redis + # 指定创建 Cluster 时使用的 ClusterTopology 名称 + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + # 决定是否在 Component 的无头服务上注解指标导出器信息 + # 有效选项:[true, false] + disableExporter: false + # 指定 Component 中期望的副本数 + replicas: 2 + # 指定 Component 所需的资源 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储大小 + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + # 指定 Component 所需的资源 + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + # 按需设置存储大小 + storage: 20Gi +``` + +更多 API 字段和描述,请参阅 [API 参考](../user_docs/references/api-reference/cluster)。 + +### 创建特定版本的 Redis 复制集群 + +要创建指定版本的集群,在应用前配置 `spec.componentSpecs.serviceVersion`(主.次版本)字段: + + + + ```yaml + componentSpecs: + - name: redis + serviceVersion: 7.2.4 # 有效选项:[7.0.6, 7.2.4, 7.2.7] + ``` + + + +## 验证集群状态 + +当部署一个包含5个副本(2个Redis实例和3个Redis Sentinel实例)的Redis复制集群时: +- Redis运行时会包含1个主副本(支持读写操作)和1个从副本(仅支持读操作) + +通过以下检查确认部署成功: +1. 集群状态为`Running` +2. 所有Pod正常运行 +3. 副本角色分配正确 + +可通过以下任一方式检查状态: + + + +```bash +kubectl get cluster redis-replication -n demo +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 3m49s + +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + + + + + 安装`kbcli`后,可查看完整的集群信息: + +```bash +kbcli cluster describe redis-replication -n demo + +名称: redis-replication 创建时间: 2025年5月17日 15:45 UTC+0800 +命名空间 集群定义 拓扑结构 状态 终止策略 +demo redis replication Running Delete + +端点: +组件 内部地址 外部地址 +redis redis-replication-redis-redis.demo.svc.cluster.local:6379 +redis-sentinel redis-replication-redis-sentinel-redis-sentinel.demo.svc.cluster.local:26379 + +拓扑结构: +组件 服务版本 实例名称 角色 状态 可用区 节点 创建时间 +redis 7.2.4 redis-replication-redis-0 primary Running zone-x x.y.z MM/DD +redis 7.2.4 redis-replication-redis-1 secondary Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-0 Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-1 Running zone-x x.y.z MM/DD +redis-sentinel 7.2.7 redis-replication-redis-sentinel-2 Running zone-x x.y.z MM/DD + +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +redis 500m / 500m 512Mi / 512Mi data:20Gi +redis-sentinel 500m / 500m 512Mi / 512Mi data:20Gi + +镜像: +组件 组件定义 镜像 +redis redis-7-1.0.0 docker.io/redis/redis-stack-server:7.2.0-v10 + docker.io/apecloud/agamotto:0.1.2-beta.1 + docker.io/redis/redis-stack-server:7.2.0-v14 +redis-sentinel redis-sentinel-7-1.0.0 docker.io/redis/redis-stack-server:7.2.0-v14 + +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间 + +查看集群事件: kbcli cluster list-events -n demo redis-replication +``` + + + + + +## 访问Redis复制集群 + +KubeBlocks自动提供: +1. 凭证存储在Secret `redis-replication-redis-account-default`中 +2. ClusterIP服务 `redis-replication-redis-redis` + +### 获取凭证 +```bash +# 获取用户名 +NAME=$(kubectl get secret -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 --decode) +# 获取密码 +PASSWD=$(kubectl get secret -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 --decode) +``` + +### 连接方式 + + + + + 直接连接到Pod: + ```bash + kubectl exec -ti -n demo redis-replication-redis-0 -- \ + redis-cli -h redis-replication-redis-redis -a ${PASSWD} + ``` + + + + + 1. 转发服务端口: + ```bash + kubectl port-forward svc/redis-replication-redis-redis 6379:6379 -n demo + ``` + + 2. 通过本地连接: + ```bash + redis-cli -h 127.0.0.1 -a ${PASSWD} + ``` + + + +:::note +**生产环境注意事项** + +生产环境中应避免使用`kubectl exec`和`port-forward`,建议采用: +- 通过LoadBalancer或NodePort服务提供外部访问 +- 使用网络策略限制访问 +- 启用TLS加密确保连接安全 +- 使用连接池提升性能 +::: + + +## 停止Redis复制集群 + +停止集群会暂时暂停操作,同时保留所有数据和配置: + +**关键影响:** +- 释放计算资源(Pod) +- 持久化存储(PVC)保持完整 +- 服务定义保留 +- 集群配置保留 +- 降低运营成本 + + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/stop.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-stop + namespace: demo + spec: + clusterName: redis-replication + type: Stop + ``` + + + + 也可以通过设置`spec.componentSpecs.stop`为true来停止: + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true + }, + { + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true + } + ]' + ``` + + ```yaml + spec: + componentSpecs: + - name: redis + stop: true # 设置为停止组件 + replicas: 2 + ``` + + + + +## 启动Redis复制集群 + +重启已停止的集群会恢复操作,所有数据和配置保持不变。 + +**关键影响:** +- 重新创建计算资源(Pod) +- 服务重新可用 +- 集群恢复到之前状态 + + + + ```bash + kubectl apply -f https://raw.githubusercontent.com/apecloud/kubeblocks-addons/refs/heads/main/examples/redis/start.yaml + ``` + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-start + namespace: demo + spec: + clusterName: redis-replication + type: Start + ``` + + + + 通过将`spec.componentSpecs.stop`设置为false来重启: + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + +## 删除 Redis 复制集群 + +请根据数据保留需求谨慎选择删除策略: + +| 策略 | 删除的资源 | 删除的数据 | 适用场景 | +|-----------------|---------------------|--------------------------|-----------------------------| +| DoNotTerminate | 无 | 无 | 关键生产集群 | +| Delete | 所有资源 | 删除PVC数据 | 非关键环境 | +| WipeOut | 所有资源 | 全部数据* | 仅限测试环境 | + +*包含外部存储中的快照和备份 + +**删除前检查清单:** +1. 确认没有应用正在使用该集群 +2. 确保已存在必要的备份 +3. 验证terminationPolicy设置正确 +4. 检查是否存在依赖资源 + +对于测试环境,可使用以下命令进行完整清理: + +```bash +kubectl patch cluster redis-replication -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -n demo +kubectl delete cluster redis-replication -n demo +``` + +## 为什么Redis Sentinel先于Redis启动 + +Redis Sentinel是Redis的高可用性解决方案,为Redis实例提供监控、通知和自动故障转移功能。 + +Redis组件中的每个副本在启动时,都会连接Redis Sentinel实例以获取当前的主从节点信息。它需要确定: +- 自身是否应作为主节点运行 +- 如果不是主节点,当前主节点是哪个以便进行复制 + +具体来说,每个Redis副本会执行以下操作: + +1. 检查现有主节点 + - 查询Redis Sentinel确认是否已选举出主节点 + - 获取主节点的地址和端口信息 +2. 必要时初始化为主节点 + - 如果未找到主节点(例如集群初始部署时),将当前Redis实例配置为主节点 + - 更新Redis配置禁用复制功能 +3. 配置为从节点(如果主节点存在) + - 如果存在主节点,则将当前Redis实例设置为从节点 + - 在Redis配置中添加`replicaof`指令指向主节点地址和端口 + - 启动数据复制以从主节点同步数据 + +KubeBlocks确保Redis Sentinel优先启动,以便为Redis副本提供正确的初始化信息。这种依赖关系在KubeBlocks的CRD `ClusterDefinition`中有明确表达,确保了正确的启动顺序。 + +有关`replication`拓扑结构组件的启动和升级顺序的更多详情,可通过以下命令查看: + +```bash +kubectl get cd redis -oyaml | yq '.spec.topologies[] | select(.name=="replication") | .orders' +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/03-topologies/01-standlone.mdx b/docs/zh/preview/kubeblocks-for-redis/03-topologies/01-standlone.mdx new file mode 100644 index 00000000..db56fc94 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/03-topologies/01-standlone.mdx @@ -0,0 +1,95 @@ +--- +description: 了解如何使用KubeBlocks部署Redis单机模式集群。本指南涵盖配置、验证、故障转移测试及超时设置等内容。 +keywords: +- KubeBlocks +- Redis +- Kubernetes +- High Availability +sidebar_label: Redis 单机模式集群 +sidebar_position: 1 +title: 使用KubeBlocks部署Redis单机集群 +--- +# 使用 KubeBlocks 部署 Redis 单机集群 + +Redis 单机部署模式由独立运行的单个 Redis 服务器实例组成,不涉及任何复制或集群功能。这是最简单且最轻量级的部署模型。 + +**适用场景** +- 开发与测试环境 +- 低流量的小型应用 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 单机集群 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-standalone + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis # 设置为 redis + topology: standalone # 设置拓扑为 standalone + componentSpecs: + - name: redis + replicas: 1 # 设置副本数为 1 + serviceVersion: 7.2.4 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键配置说明**: +- `clusterDef: redis`:指定集群使用的 ClusterDefinition CR +- `topology: standalone`:配置集群使用单机拓扑 +- `componentSpecs`:定义集群中的组件: + - 'redis' 组件: + - `serviceVersion: 7.2.4`:指定要部署的 Redis 服务版本 + +## 验证部署 + +### 检查集群状态 +集群部署完成后,检查其状态: +```bash +kubectl get cluster redis-standalone -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-standalone redis Delete Running 34s +``` + +### 验证组件状态 +```bash +kubectl get component redis-standalone-redis -n demo +``` +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-standalone-redis redis-7-1.0.0 7.2.4 Running 90s +``` + +## 清理资源 +删除本教程中创建的所有资源: + +```bash +kubectl delete cluster redis-standalone -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/03-topologies/02-replication.mdx b/docs/zh/preview/kubeblocks-for-redis/03-topologies/02-replication.mdx new file mode 100644 index 00000000..c77995eb --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/03-topologies/02-replication.mdx @@ -0,0 +1,133 @@ +--- +description: 了解如何使用KubeBlocks部署Redis复制集群。本指南涵盖配置、验证、故障转移测试及超时设置等内容。 +keywords: +- KubeBlocks +- Redis +- Kubernetes +- High Availability +sidebar_label: Redis 复制集群 +sidebar_position: 1 +title: 使用KubeBlocks部署Redis复制集群 +--- +# 使用 KubeBlocks 部署 Redis 复制集群 + +Redis 复制(Replication)集群由一个处理写入的主节点(primary/master)和一个或多个从节点(replica/slave)组成,从节点通过复制主节点数据来实现读取扩展和故障转移。 + +**适用场景** +- 读密集型应用(如分析类工作负载) +- 需要高可用性的场景(配合 Redis Sentinel 实现自动故障转移) + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 复制集群 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +**关键配置说明**: +- `clusterDef: redis`:指定集群使用的 ClusterDefinition CR +- `topology: replication`:配置集群使用复制拓扑结构 +- `componentSpecs`:定义集群中的组件: + - 'redis' 组件: + - `serviceVersion: 7.2.4`:指定要部署的 Redis 服务版本 + - 'redis-sentinel' 组件: + - Redis Sentinel 是 Redis 的高可用性解决方案,建议部署 3 个副本以确保高可用性 + +## 验证部署 + +### 检查集群状态 +集群部署完成后,检查其状态: +```bash +kubectl get cluster redis-replication -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 66s +``` + +### 验证组件和 Pod 状态 +```bash +kubectl get component redis-replication-redis -n demo +``` +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-replication-redis redis-7-1.0.0 7.2.4 Running 90s +``` + +检查 Pod 及其角色 + +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + +## 清理资源 +删除本教程中创建的所有资源: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/03-topologies/03-sharding.mdx b/docs/zh/preview/kubeblocks-for-redis/03-topologies/03-sharding.mdx new file mode 100644 index 00000000..592685f9 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/03-topologies/03-sharding.mdx @@ -0,0 +1,239 @@ +--- +description: 了解如何使用KubeBlocks部署Redis分片集群。本指南涵盖配置、验证、故障转移测试及超时设置等内容。 +keywords: +- KubeBlocks +- Redis +- Kubernetes +- High Availability +sidebar_label: Redis 分片集群 +sidebar_position: 1 +title: 使用KubeBlocks部署Redis分片集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 KubeBlocks 部署 Redis 分片集群(集群模式) + +Redis 集群通过基于哈希的分区机制将数据分布到多个节点(分片)上,实现读写能力的水平扩展。 + +**适用场景** +- 需要高吞吐量的大规模应用 +- 分布式缓存和会话存储 +- 写密集型工作负载(如实时分析) + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 分片集群 + +创建一个包含 3 个分片、每个分片 2 个副本的 Redis 分片集群(集群模式): + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-sharding + namespace: demo +spec: + terminationPolicy: Delete + shardings: + - name: shard # 分片名称 + shards: 3 # 为集群创建的分片数量 + template: + name: redis + componentDef: redis-cluster-7 # 每个分片使用的组件定义名称 + replicas: 2 # 每个分片的副本数量 + resources: + limits: + cpu: '1' + memory: 1Gi + requests: + cpu: '1' + memory: 1Gi + serviceVersion: 7.2.4 + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + services: + # 服务 `redis-advertised` 在 `ComponentDefinition` 中定义 + # 用于解析 Redis Pod 的广播端点 + - name: redis-advertised # 这是一个每 Pod 服务,用于解析广播端点 + podService: true + # - NodePort + # - LoadBalancer + serviceType: NodePort +``` + +**关键配置说明**: +- `shardings`: 指定 ShardingSpec 对象列表,用于配置集群组件的分片拓扑 +- `shards`: 指定为集群创建的分片数量 +- `serviceType`: 指定 `redis-advertised` 服务的类型,该服务用于解析 Redis Pod 的广播端点 + 默认服务类型为 `NodePort`。如需将服务暴露到集群外部,可根据需求将服务类型覆盖为 `NodePort` 或 `LoadBalancer` + +:::tip + +Redis 集群至少需要 **三个** 主节点来确保高可用性并防止数据不一致。 + +生产环境推荐的 Redis 集群通常至少包含六个节点:三个主节点用于分片和故障转移共识,三个副本节点作为每个主节点的备份。 + +创建或缩容 Redis 集群时,请确保 `shards` 参数值大于等于 **3**。 +::: + +## 验证部署 + +### 检查集群状态 +集群部署完成后,检查其状态: +```bash +kubectl get cluster redis-sharding -n demo -w +``` +预期输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-sharding Delete Running 103s +``` + +### 验证组件和 Pod 状态 + +获取该集群的所有工作组件: +```bash +kubectl get cmp -l app.kubernetes.io/instance=redis-sharding -n demo +``` + +预期输出: +```bash +NAME DEFINITION SERVICE-VERSION STATUS AGE +redis-sharding-shard-5cd redis-cluster-7-1.0.0 7.2.4 Running 2m34s +redis-sharding-shard-drg redis-cluster-7-1.0.0 7.2.4 Running 2m34s +redis-sharding-shard-tgf redis-cluster-7-1.0.0 7.2.4 Running 2m34s +``` +每个组件代表一个分片,后缀为哈希 ID。 + +检查 Pod 及其角色: + +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-sharding -L kubeblocks.io/role -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-sharding-shard-5cd-0 2/2 Running 0 3m55s primary +redis-sharding-shard-5cd-1 2/2 Running 0 3m35s secondary +redis-sharding-shard-drg-0 2/2 Running 0 3m53s primary +redis-sharding-shard-drg-1 2/2 Running 0 3m35s secondary +redis-sharding-shard-tgf-0 2/2 Running 0 3m54s primary +redis-sharding-shard-tgf-1 2/2 Running 0 3m36s secondary +``` +集群中共有六个副本,每个组件两个(一个主节点和一个从节点)。 + +## 分片扩缩容 + +### 扩展分片(增加分片) +**预期工作流程**: + +1. 新组件被创建,包含两个副本(一个主节点和一个从节点) +2. 当所有组件就绪(状态为 `Running`)时,集群状态从 `Updating` 变为 `Running` + + + + + + 选项 1:使用水平扩缩容 OpsRequest + + 将分片数量增加到 `4`,可使用以下 OpsRequest: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-sharding-scale-out-ops + namespace: demo + spec: + clusterName: redis-sharding + type: HorizontalScaling + horizontalScaling: + - componentName: shard + shards: 4 + ``` + + 监控扩缩容操作进度: + + ```bash + kubectl get ops redis-sharding-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-sharding-scale-out-ops HorizontalScaling redis-sharding Running 0/1 35s + redis-sharding-scale-out-ops HorizontalScaling redis-sharding Succeed 1/1 2m35s + ``` + + + + + 选项 2:直接更新 Cluster API + + 也可以直接更新 Cluster 资源中的 `shards` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: shard + shards: 4 + # 其余字段与原集群 CR 相同,为简洁起见省略 + ... + ``` + + 或使用命令修补集群 CR: + + ```bash + kubectl patch cluster redis-sharding -n demo --type=json -p='[{"op": "replace", "path": "/spec/shardings/0/shards", "value": 4}]' + ``` + + + + +与扩展类似,也可以通过减少 Cluster 资源中的 `shards` 字段来缩容集群。但请确保 `shards` 值大于等于 3。 + +## 主从切换 + +要对名为 `redis-sharding-shard-5cd` 的分片执行主从切换,可使用以下 OpsRequest: + +```yaml +kind: OpsRequest +metadata: + name: redis-sharding-switchover-ops + namespace: demo +spec: + clusterName: redis-sharding # redis-sharding 是集群名称 + switchover: + - componentObjectName: redis-sharding-shard-5cd # componentObjectName 是某个分片的名称 + candidateName: redis-sharding-shard-5cd-0 # candidateName 是候选实例名称 + instanceName: redis-sharding-shard-5cd-1 # instanceName 是主实例名称 + type: Switchover +``` + +:::note + +`componentObjectName` 是某个分片的名称,即组件对象的完整名称。 + +::: + +## 清理资源 +删除本教程创建的所有资源: + +```bash +kubectl delete cluster redis-sharding -n demo +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/03-topologies/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/03-topologies/_category_.yml new file mode 100644 index 00000000..8b10f9b7 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/03-topologies/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 拓扑结构 +position: 3 diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx new file mode 100644 index 00000000..ab8776a3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/01-stop-start-restart.mdx @@ -0,0 +1,316 @@ +--- +description: 了解如何在KubeBlocks中管理Redis复制集群状态,包括停止、启动和重启操作,以优化资源使用。 +keywords: +- KubeBlocks +- Redis +- Cluster Management +- Stop +- Start +- Restart +sidebar_label: 生命周期管理 +sidebar_position: 1 +title: "Redis 复制集群生命周期管理(停止、启动、重启)" +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis 复制集群生命周期管理 + +本指南演示如何在 **KubeBlocks** 中管理 Redis 复制集群的运行状态,包括: + +- 停止集群以节省资源 +- 启动已停止的集群 +- 重启集群组件 + +这些操作有助于优化 Kubernetes 环境中的资源使用并降低运营成本。 + +KubeBlocks 中的生命周期管理操作: + +| 操作 | 效果 | 使用场景 | +|------------|--------------------------|--------------------------| +| 停止 | 暂停集群,保留存储 | 成本节约、维护窗口 | +| 启动 | 恢复集群运行 | 暂停后恢复服务 | +| 重启 | 重建组件 Pod | 配置变更、故障排查 | + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 复制集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 集群生命周期操作 + +### 停止集群 + +在 KubeBlocks 中停止 Redis 复制集群将: + +1. 终止所有运行中的 Pod +2. 保留持久化存储(PVC) +3. 保持集群配置 + +此操作适用于: +- 临时成本节约 +- 维护窗口 +- 开发环境暂停 + + + + + +选项 1:使用 OpsRequest API + +创建停止操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-stop-ops + namespace: demo +spec: + clusterName: redis-replication + type: Stop +``` + + + + +选项 2:使用 Cluster API 补丁 + +通过修改 stop 字段直接调整集群规格: + +```bash +kubectl patch cluster redis-replication -n demo --type='json' -p='[ +{ + "op": "add", + "path": "/spec/componentSpecs/0/stop", + "value": true +}, +{ + "op": "add", + "path": "/spec/componentSpecs/1/stop", + "value": true +} +]' +``` + + + + + +### 验证集群停止 + +确认停止操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Stopping 6m3s + redis-replication redis Delete Stopped 6m55s + ``` + +2. 验证无运行中的 Pod: + ```bash + kubectl get pods -n demo + ``` + 示例输出: + ```bash + No resources found in demo namespace. + ``` + +3. 确认持久卷仍然存在: + ```bash + kubectl get pvc -n demo + ``` + 示例输出: + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES + data-redis-replication-redis-0 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-1 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-0 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-1 Bound pvc-uuid 20Gi RWO + data-redis-replication-redis-sentinel-2 Bound pvc-uuid 20Gi RWO + ``` + +### 启动集群 + +启动已停止的 Redis 复制集群将: +1. 重新创建所有 Pod +2. 重新挂载持久化存储 +3. 恢复服务端点 + +预期行为: +- 集群恢复到之前状态 +- 不会发生数据丢失 +- 服务自动恢复 + + + + +发起启动操作请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-start-ops + namespace: demo +spec: + # 指定此操作目标集群资源的名称 + clusterName: redis-replication + type: Start +``` + + + + + +修改集群规格以恢复运行: +1. 设置 stop: false,或 +2. 完全移除 stop 字段 + + ```bash + kubectl patch cluster redis-replication -n demo --type='json' -p='[ + { + "op": "remove", + "path": "/spec/componentSpecs/0/stop" + }, + { + "op": "remove", + "path": "/spec/componentSpecs/1/stop" + } + ]' + ``` + + + + + +### 验证集群启动 + +确认启动操作成功: + +1. 检查集群状态转换: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + 示例输出: + ```bash + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Updating 22m + redis-replication redis Delete Running 22m + ``` + +2. 验证 Pod 重建: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication + ``` + 示例输出: + ```bash + NAME READY STATUS RESTARTS AGE + redis-replication-redis-0 1/1 Running 0 2m + redis-replication-redis-1 1/1 Running 0 1m + ``` + +3. 检查服务端点: + ```bash + kubectl get endpoints redis-replication-redis-redis -n demo + ``` + +### 重启集群 + +重启操作提供: +- 无需完全停止集群即可重建 Pod +- 组件级粒度控制 +- 最小化服务中断 + +使用场景: +- 需要重启的配置变更 +- 资源刷新 +- 故障排查 + +**使用 OpsRequest API** + +针对特定组件 `redis` 和 `redis-sentinel` 进行重启: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restart-ops + namespace: demo +spec: + clusterName: redis-replication + type: Restart + restart: + - componentName: redis + - componentName: redis-sentinel +``` + +**验证重启完成** + +确认组件重启成功: + +1. 跟踪 OpsRequest 进度: + ```bash + kubectl get opsrequest redis-replication-restart-ops -n demo -w + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-restart-ops Restart redis-replication Running 0/2 10s + redis-replication-restart-ops Restart redis-replication Running 1/2 65s + redis-replication-restart-ops Restart redis-replication Running 2/2 2m5s + redis-replication-restart-ops Restart redis-replication Succeed 2/2 2m5s + ``` + +2. 检查 Pod 状态: + ```bash + kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication + ``` + 注意:重启后 Pod 将显示新的创建时间戳 + +3. 验证组件健康状态: + ```bash + kbcli cluster describe redis-replication -n demo + ``` + +操作完成后,集群将返回 Running 状态。 + +若只需重启 Redis 组件的 Pod,可以使用: +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restart-redis + namespace: demo +spec: + clusterName: redis-replication + type: Restart + restart: + - componentName: redis +``` + +## 总结 +在本指南中,您学会了如何: +1. 停止 Redis 复制集群以暂停操作,同时保留持久化存储 +2. 启动已停止的集群使其重新上线 +3. 重启特定集群组件以重建其 Pod 而无需停止整个集群 + +通过管理 Redis 复制集群的生命周期,您可以优化资源利用率、降低成本并在 Kubernetes 环境中保持灵活性。KubeBlocks 提供了执行这些操作的无缝方式,确保高可用性和最小化中断。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx new file mode 100644 index 00000000..e3bacdb3 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/02-vertical-scaling.mdx @@ -0,0 +1,181 @@ +--- +description: 了解如何在KubeBlocks管理的Redis复制集群中执行垂直扩展,以优化资源利用率并提升性能。 +keywords: +- KubeBlocks +- Redis +- Vertical Scaling +- Kubernetes +- Resources +sidebar_label: 垂直扩展 +sidebar_position: 2 +title: Redis 复制集群中的垂直扩展 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks对Redis复制集群进行垂直扩缩容 + +本指南演示如何通过调整计算资源(CPU和内存)对KubeBlocks管理的Redis复制集群进行垂直扩缩容,同时保持副本数量不变。 + +垂直扩缩容会修改Redis实例的计算资源(CPU和内存)同时维持副本数不变。主要特点: + +- **无中断性**:正确配置时可在扩缩容期间保持可用性 +- **精细化**:可独立调整CPU、内存或两者 +- **可逆性**:可根据需求进行扩容或缩容 + +KubeBlocks以最小影响协调扩缩容过程: +1. 从节点副本优先更新 +2. 主节点在所有从节点健康后最后更新 +3. 集群状态从`更新中`过渡到`运行中` + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Redis复制集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 垂直扩缩容 + +**预期工作流程**: + +1. 从节点副本优先更新(每次更新一个) +2. 主节点在所有从节点健康后最后更新 +3. 集群状态从`更新中`过渡到`运行中` + + + + 选项1:使用VerticalScaling OpsRequest + + 应用以下YAML为redis组件扩容资源: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-vscale-ops + namespace: demo + spec: + clusterName: redis-replication + type: VerticalScaling + verticalScaling: + - componentName: redis + requests: + cpu: '1' + memory: 1Gi + limits: + cpu: '1' + memory: 1Gi + ``` + 垂直扩缩容期间会发生什么? + - 从节点Pod优先重建以确保主节点Pod保持可用 + - 所有从节点Pod更新完成后,主节点Pod将以新资源配置重启 + + + 可通过以下命令查看扩缩容操作进度: + + ```bash + kubectl -n demo get ops redis-replication-vscale-ops -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-vscale-ops VerticalScaling redis-replication Running 0/2 11s + redis-replication-vscale-ops VerticalScaling redis-replication Running 1/2 36s + redis-replication-vscale-ops VerticalScaling redis-replication Running 2/2 52s + redis-replication-vscale-ops VerticalScaling redis-replication Running 2/2 52s + redis-replication-vscale-ops VerticalScaling redis-replication Succeed 2/2 52s + ``` + + + + + 选项2:直接更新Cluster API + + 也可通过更新`spec.componentSpecs.resources`字段实现垂直扩缩容。 + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + requests: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + limits: + cpu: "1" # 按需更新资源 + memory: "1Gi" # 按需更新资源 + ... + ``` + + + +## 最佳实践与注意事项 + +**规划阶段:** +- 在维护窗口或低流量时段进行扩缩容 +- 确认Kubernetes集群有足够资源 +- 开始前检查是否有其他进行中的操作 + +**执行阶段:** +- 保持CPU与内存的平衡比例 +- 设置相同的requests/limits以保证QoS + +**扩缩容后:** +- 监控资源利用率和应用性能 +- 根据需要调整Redis参数 + +## 验证 +通过检查集群配置或Pod详情验证更新后的资源: +```bash +kbcli cluster describe redis-replication -n demo +``` + +预期输出: +```bash +资源分配: +组件 实例模板 CPU(请求/限制) 内存(请求/限制) 存储大小 存储类 +redis 1 / 1 1Gi / 1Gi data:20Gi +redis-sentinel 500m / 500m 512Mi / 512Mi data:20Gi +``` + +仅Redis组件的资源被更新,redis-sentinel组件保持不变。 + +## KubeBlocks垂直扩缩容的核心优势 +- 无缝扩缩容:按特定顺序重建Pod确保最小中断 +- 动态资源调整:根据工作负载需求轻松调整CPU和内存 +- 灵活性:可选择动态扩缩容的OpsRequest或精确控制的直接API更新 +- 高可用性:扩缩容过程中集群保持运行状态 + +## 清理 +删除Redis复制集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您学会了如何: +1. 部署KubeBlocks管理的Redis复制集群 +2. 通过增减redis组件的资源进行垂直扩缩容 +3. 使用OpsRequest和直接Cluster API更新两种方式调整资源分配 + +垂直扩缩容是优化资源利用率和适应工作负载变化的强大工具,可确保Redis复制集群始终保持高性能和弹性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx new file mode 100644 index 00000000..24652602 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/03-horizontal-scaling.mdx @@ -0,0 +1,289 @@ +--- +description: 了解如何通过OpsRequest和直接Cluster API更新,对KubeBlocks管理的Redis集群执行水平扩缩容(扩容与缩容)。 +keywords: +- KubeBlocks +- Redis +- Horizontal Scaling +- Scale-Out +- Scale-In +- Kubernetes +sidebar_label: 水平扩展 +sidebar_position: 3 +title: 使用KubeBlocks水平扩展Redis集群 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用 KubeBlocks 实现 Redis 集群的水平扩缩容 + +本指南介绍如何对 KubeBlocks 管理的 Redis 集群执行水平扩缩容(扩容和缩容)操作。您将学习如何使用 **OpsRequest** 和直接修改 **Cluster API** 两种方式来实现这一目标。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + +## 部署 Redis 复制集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + + +## 扩容(增加副本) + +**预期工作流程**: + +1. 新 Pod 被创建,状态从 `Pending` 转为 `Running`,角色为 `secondary` +2. 数据从主节点同步到新副本 +3. 集群状态从 `Updating` 变为 `Running` + + + + + + 选项一:使用水平扩容 OpsRequest + + 通过为 redis 组件增加 1 个副本来扩容 Redis 集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-scale-out-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + # 指定组件扩容的副本变更数量 + scaleOut: + # 指定组件的副本变更数量 + # 为当前组件增加 1 个副本 + replicaChanges: 1 + ``` + + 监控扩容操作进度: + + ```bash + kubectl get ops redis-replication-scale-out-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-scale-out-ops HorizontalScaling redis-replication Running 0/1 9s + redis-replication-scale-out-ops HorizontalScaling redis-replication Running 1/1 20s + redis-replication-scale-out-ops HorizontalScaling redis-replication Succeed 1/1 20s + ``` + + + + + 选项二:直接更新 Cluster API + + 您也可以直接修改 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 3 # 增加副本数量实现扩容 + ... + ``` + + 或者使用命令修补集群 CR: + + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 3}]' + ``` + + + +### 验证扩容结果 + +操作完成后,您将看到新 Pod 被创建,Redis 集群状态从 `Updating` 变为 `Running`,新建 Pod 的角色为 `secondary`。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication +``` + +示例输出(3 个 Pod): +```bash +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 9m47s +redis-replication-redis-1 3/3 Running 0 10m +redis-replication-redis-2 3/3 Running 0 4m48s +redis-replication-redis-sentinel-0 2/2 Running 0 16m +redis-replication-redis-sentinel-1 2/2 Running 0 16m +redis-replication-redis-sentinel-2 2/2 Running 0 17m +``` + +新副本会自动作为从节点加入集群。 +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 10m secondary +redis-replication-redis-1 3/3 Running 0 11m primary +redis-replication-redis-2 3/3 Running 0 5m27s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 17m +redis-replication-redis-sentinel-1 2/2 Running 0 17m +redis-replication-redis-sentinel-2 2/2 Running 0 17m +``` + + +## 缩容(减少副本) + +**预期工作流程**: + +1. 移除序号最大的副本 +2. 如果移除的是主副本,会先触发自动故障转移 +3. Pod 被优雅终止 +4. 集群状态从 `Updating` 变为 `Running` + +:::note +如果被缩容的副本恰好是主副本,KubeBlocks 会触发故障转移操作。在该操作成功前,该 Pod 不会被终止。 +::: + + + + + + 选项一:使用水平缩容 OpsRequest + + 通过减少 1 个副本来缩容 Redis 集群: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-scale-in-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + # 指定组件缩容的副本变更数量 + scaleIn: + # 指定组件的副本变更数量 + # 从当前组件移除 1 个副本 + replicaChanges: 1 + ``` + + 监控进度: + ```bash + kubectl get ops redis-replication-scale-in-ops -n demo -w + ``` + + 预期结果: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-scale-in-ops HorizontalScaling redis-replication Running 0/1 8s + redis-replication-scale-in-ops HorizontalScaling redis-replication Running 1/1 24s + redis-replication-scale-in-ops HorizontalScaling redis-replication Succeed 1/1 24s + ``` + + + + + 选项二:直接更新 Cluster API + + 您也可以直接修改 Cluster 资源中的 `replicas` 字段: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 1 # 减少副本数量实现缩容 + ``` + + 或者使用命令修补集群 CR: + + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[{"op": "replace", "path": "/spec/componentSpecs/0/replicas", "value": 1}]' + ``` + + + + +### 验证缩容结果 + +示例输出(1 个 Pod): +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 16m +``` + + +## 故障排查 +如果缩容操作长时间卡住,请检查以下资源: + +```bash +# 检查当前主节点和候选节点的 agent 日志 +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# 检查集群事件中的错误 +kubectl get events -n demo --field-selector involvedObject.name=pg-cluster + +# 检查 kubeblocks 日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +如果从主副本收到如下错误: +```text +INFO Action Executed {"action": "switchover", "result": "exit code: 1: failed"} +INFO HTTP API Called {"user-agent": "Go-http-client/1.1", "method": "POST", "path": "/v1.0/action", "status code": 200, "cost": 7} +``` + +可能是故障转移错误,请检查 KubeBlocks 日志获取更多详情。 +``` + +## 最佳实践 + +进行水平扩展时: +- 尽可能选择低流量时段执行扩缩容操作 +- 扩缩容过程中持续监控集群健康状态 +- 扩容前确保有足够的资源供给新副本 +- 考虑新增副本的存储需求 + +## 清理资源 +要删除所有已创建的资源,请执行以下命令删除Redis集群及其命名空间: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## 总结 +本指南中您已学习到: +- 通过扩容操作为Redis集群添加副本 +- 通过缩容操作从Redis集群移除副本 +- 使用OpsRequest和直接Cluster API更新两种方式进行水平扩展 + +KubeBlocks能确保在最小化影响数据库服务的情况下实现无缝扩缩容。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx new file mode 100644 index 00000000..e1b0f042 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/04-volume-expansion.mdx @@ -0,0 +1,238 @@ +--- +description: 了解如何在KubeBlocks管理的Redis集群中无停机扩展持久卷声明(PVC)。 +keywords: +- KubeBlocks +- Redis +- Volume Expansion +- Kubernetes +- PVC +sidebar_label: 存储卷扩容 +sidebar_position: 4 +title: Redis 集群中扩展存储卷 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# Redis集群存储卷扩容指南 + +本文档详细说明如何在**KubeBlocks**管理的Redis集群中扩展持久卷声明(PVC)。存储卷扩容功能允许动态增加存储容量,使您的数据库能够随着数据增长无缝扩展。当底层存储类支持此功能时,该操作可在不中断服务的情况下执行。 + +存储卷扩容允许您在创建持久卷声明(PVC)后增加其容量大小。该功能在Kubernetes v1.11中引入,并于Kubernetes v1.24版本正式发布(GA)。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +### 检查存储类是否支持扩容 + +列出所有可用存储类,通过检查`ALLOWVOLUMEEXPANSION`字段确认是否支持卷扩容: +```bash +kubectl get storageclass +``` + +示例输出: +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 4d10h +kb-default-sc ebs.csi.aws.com Delete WaitForFirstConsumer true 3d7h +sc-s3-repo-2qsxfh ru.yandex.s3.csi Retain Immediate false 3d7h +``` +请确保您使用的存储类将`ALLOWVOLUMEEXPANSION`设置为true。若为false,则表示该存储类不支持卷扩容。 + +## 使用支持扩容的StorageClass部署Redis复制集群 + +KubeBlocks采用声明式方式管理Redis集群。以下是部署包含2个副本(1主1从)的Redis集群配置示例。 + +应用以下YAML配置部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩容的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + # 指定支持卷扩容的存储类名称 + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `storageClassName`: 指定支持卷扩容的`StorageClass`名称。若未设置,将使用标注为`default`的StorageClass。 + +:::note +**ALLOWVOLUMEEXPANSION** + +创建集群时请确保存储类支持卷扩容(检查`ALLOWVOLUMEEXPANSION`字段)。 + +::: + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 执行存储卷扩容 + +:::note +1. 确保存储类支持卷扩容(检查`ALLOWVOLUMEEXPANSION`) +2. 新容量必须大于当前容量 +3. 根据存储提供商不同,卷扩容可能需要额外配置 +::: + +可通过以下两种方式执行扩容: + + + + + 方式一:使用VolumeExpansion运维请求 + + 应用以下YAML为redis组件扩容: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expand-volume-ops + namespace: demo + spec: + clusterName: redis-replication + type: VolumeExpansion + volumeExpansion: + - componentName: redis + volumeClaimTemplates: + - name: data + storage: 30Gi + ``` + + 通过以下命令监控扩容进度: + + ```bash + kubectl describe ops redis-replication-expand-volume-ops -n demo + ``` + + 预期结果: + ```bash + Status: + Phase: Succeed + ``` + 操作完成后,PVC容量将更新。 + + :::note + 若使用的存储类不支持扩容,此OpsRequest将快速失败并提示类似信息: + `storageClass: [STORAGE_CLASS_NAME] of volumeClaimTemplate: [VOLUME_NAME]] not support volume expansion in component [COMPONENT_NAME]` + ::: + + + + + + 方式二:直接更新Cluster API + + 您也可以直接修改`spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage`字段为期望容量。 + + ```yaml + componentSpecs: + - name: redis + volumeClaimTemplates: + - name: data + spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + # 指定新容量,确保大于当前容量 + storage: 30Gi + ``` + KubeBlocks将根据新配置自动更新PVC容量。 + + + +## 验证扩容结果 + +检查更新后的集群配置: +```bash +kbcli cluster describe redis-replication -n demo +``` +预期输出: +```bash +Resources Allocation: +COMPONENT INSTANCE-TEMPLATE CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS +redis 500m / 500m 512Mi / 512Mi data:30Gi +``` +数据PVC的容量已更新为指定值(本例中为30Gi)。 + +确认PVC扩容完成: +```bash +kubectl get pvc -l app.kubernetes.io/instance=redis-replication -n demo +``` +预期输出: +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +redis-replication-redis-data-0 Bound pvc-xxxxxxxx 30Gi RWO 33m +redis-replication-redis-data-1 Bound pvc-xxxxxxxx 30Gi RWO 33m +``` + +## 清理资源 +删除Redis集群及其命名空间以释放所有资源: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南中您已学习: +1. 验证存储类对卷扩容的支持情况 +2. 通过两种方式执行扩容: + - 使用OpsRequest进行动态更新 + - 通过Cluster API手动更新 +3. 验证更新后的PVC容量并确认扩容操作完成 + +通过存储卷扩容功能,您可以高效扩展Redis集群的存储容量而无需服务中断,确保数据库能够随着应用需求同步增长。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx new file mode 100644 index 00000000..841e91bf --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/05-manage-loadbalancer.mdx @@ -0,0 +1,341 @@ +--- +description: 了解如何通过负载均衡器及其他服务类型,在KubeBlocks中配置和管理Redis服务以实现内外网访问。 +keywords: +- KubeBlocks +- Redis +- LoadBalancer +- External Service +- Expose +- Kubernetes +sidebar_label: 管理Redis服务 +sidebar_position: 5 +title: 使用KubeBlocks声明式集群API创建和销毁Redis服务 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 使用KubeBlocks声明式集群API管理Redis服务 + +本指南将逐步指导您如何通过KubeBlocks暴露Redis服务(包括外部和内部访问)。您将学习如何配置云服务商负载均衡器实现外部访问、管理内部服务,以及在不需要时正确关闭外部暴露。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + +## 部署Redis复制集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + + +## 查看网络服务 +列出为Redis集群创建的服务: +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +示例服务输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +redis-replication-redis-redis ClusterIP 10.96.102.140 6379/TCP 31s +redis-replication-redis-sentinel-redis-sentinel ClusterIP 10.96.157.4 26379/TCP 51s +``` + + +## 暴露Redis服务 + +外部服务地址允许公网访问Redis,而内部服务地址仅限用户VPC内访问。 + +### 服务类型对比 + +| 类型 | 使用场景 | 云成本 | 安全性 | +|------|----------|------------|----------| +| ClusterIP | 内部服务通信 | 免费 | 最高 | +| NodePort | 开发测试 | 低 | 中等 | +| LoadBalancer | 生产环境外部访问 | 高 | 通过安全组管理 | + + + + + + + 方案一:使用OpsRequest + + 通过创建OpsRequest资源使用负载均衡器暴露Redis服务: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expose-enable-ops + namespace: demo + spec: + type: Expose + clusterName: redis-replication + expose: + - componentName: redis + services: + - name: internet + # 决定服务暴露方式,默认为'ClusterIP' + # 可选值:'ClusterIP'、'NodePort'和'LoadBalancer' + serviceType: LoadBalancer + # 当ServiceType为LoadBalancer时,包含云服务商相关参数 + # 以下是AWS EKS的配置示例 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为"true"表示使用内部VPC IP + # 指定服务目标角色 + # 若指定,则服务仅暴露给具有匹配角色的Pod + roleSelector: primary + switch: Enable + ``` + + 等待OpsRequest完成: + ```bash + kubectl get ops redis-replication-expose-enable-ops -n demo + ``` + + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-expose-enable-ops Expose redis-replication Succeed 1/1 31s + ``` + + + + + + 方案二:使用Cluster API + + 另一种方式是在Cluster资源的`spec.services`部分添加负载均衡器服务: + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + metadata: + name: redis-replication + namespace: demo + spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + # 暴露外部服务 + services: + - annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为"true"表示使用内部VPC IP + componentSelector: redis + name: redis-internet + serviceName: redis-internet + roleSelector: primary + spec: + ipFamilyPolicy: PreferDualStack + ports: + - name: redis + port: 6379 + protocol: TCP + targetPort: redis + type: LoadBalancer + componentSpecs: + ... + ``` + 上述YAML配置在services部分新增了一个外部服务。该负载均衡器服务包含了AWS网络负载均衡器(NLB)的注解。 + + :::note + 云服务商注解说明 + + 使用负载均衡器服务时,必须包含对应云服务商的特定注解。以下是常用云服务商的注解示例: + + - AWS + ```yaml + service.beta.kubernetes.io/aws-load-balancer-type: nlb # 使用网络负载均衡器 + service.beta.kubernetes.io/aws-load-balancer-internal: "true" # 设为"false"表示面向互联网的负载均衡器 + ``` + + - Azure + ```yaml + service.beta.kubernetes.io/azure-load-balancer-internal: "true" # 设为"false"表示面向互联网的负载均衡器 + ``` + + - GCP + ```yaml + networking.gke.io/load-balancer-type: "Internal" # 限制负载均衡器仅限内部VPC访问。默认不指定时为面向互联网。 + cloud.google.com/l4-rbs: "enabled" # 面向互联网负载均衡器的优化配置 + ``` + + - 阿里云 + ```yaml + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # 设为"intranet"表示内部负载均衡器 + ``` + ::: + + + :::note + `service.beta.kubernetes.io/aws-load-balancer-internal`注解控制负载均衡器是内部还是面向互联网。注意该注解在服务创建后不能动态修改。 + ```yaml + service.beta.kubernetes.io/aws-load-balancer-internal: "false" # 设为"true"表示使用内部VPC IP + ``` + 如果在服务创建后将该注解从"false"改为"true",虽然Service对象中的注解会更新,但负载均衡器仍会保留其公网IP。 + + 正确修改方式: + - 首先删除现有的负载均衡器服务 + - 使用更新后的注解重新创建服务(`service.beta.kubernetes.io/aws-load-balancer-internal`: "true") + - 等待新负载均衡器分配正确的内部/外部IP + ::: + + + 使用以下命令等待集群状态变为Running: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Running 18m + ``` + + + + +### 验证暴露的服务 +检查服务详情确认负载均衡器服务已创建: + +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +示例输出: +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +redis-replication-redis-internet LoadBalancer 172.20.60.24 6379:31243/TCP 1m +``` + +### 等待DNS解析 + +负载均衡器DNS名称可能需要2-5分钟才能解析。验证解析状态: + +```bash +nslookup # 将替换为实际输出的IP地址 +``` + +## 外部连接 Redis + +### 获取凭据 + +KubeBlocks 会自动创建一个包含 Redis 默认凭据的 Secret。获取 Redis 默认凭据: +```bash +NAME=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d` +``` + +### 使用 Redis 客户端连接 + +现在可以从外部(例如您的笔记本电脑或 EC2)连接到 Redis 数据库: +```bash +redis-cli -h -a ${PASSWD} +``` + + +## 禁用外部暴露 + + + + + + 方法一:使用 OpsRequest + + 要禁用外部访问,创建一个 OpsRequest: + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-expose-disable-ops + namespace: demo + spec: + clusterName: redis-replication + expose: + - componentName: redis + services: + - name: internet + roleSelector: primary + serviceType: LoadBalancer + switch: Disable + preConditionDeadlineSeconds: 0 + type: Expose + ``` + + 等待 OpsRequest 完成: + ```bash + kubectl get ops redis-replication-expose-disable-ops -n demo + ``` + 示例输出: + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-expose-disable-ops Expose redis-replication Succeed 1/1 12s + ``` + + + + + + 方法二:使用 Cluster API + + 或者,从 Cluster 资源中移除 `spec.services` 字段: + ```bash + kubectl patch cluster redis-replication -n demo --type=json -p='[ + { + "op": "remove", + "path": "/spec/services" + } + ]' + ``` + + 监控集群状态直到变为 Running: + ```bash + kubectl get cluster redis-replication -n demo -w + ``` + + ``` + NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE + redis-replication redis Delete Running 23m + ``` + + + +### 验证服务移除 + +确保 'redis-replication-redis-internet' 服务已被移除: + +```bash +kubectl get service -l app.kubernetes.io/instance=redis-replication -n demo +``` + +预期结果:'redis-replication-redis-internet' 服务应被移除。 + + +## 清理 +要删除所有创建的资源,删除 Redis 集群及其命名空间: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + + +## 总结 +本指南演示了如何: +- 使用 KubeBlocks 将 Redis 服务暴露到外部或内部。 +- 使用云提供商特定的注解配置 LoadBalancer 服务。 +- 通过 OpsRequest 或直接更新 Cluster API 来管理外部访问的启用或禁用。 + +KubeBlocks 为在 Kubernetes 环境中管理 Redis 服务提供了灵活性和简便性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx new file mode 100644 index 00000000..d80e63f2 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/07-modify-parameters.mdx @@ -0,0 +1,131 @@ +--- +description: 了解如何通过Reconfiguring OpsRequest在KubeBlocks中修改Redis的动态与静态参数,以优化数据库性能与可用性。 +keywords: +- Redis +- KubeBlocks +- OpsRequest +- dynamic parameters +- static parameters +- database configuration +sidebar_label: 修改Redis参数 +sidebar_position: 7 +title: 修改Redis参数 +--- +# 修改 Redis 参数 + +数据库重新配置涉及修改参数、设置或配置以优化性能、安全性或可用性。参数变更分为两种类型: + +| 类型 | 需要重启 | 生效范围 | 示例参数 | +|------|------------------|-------|--------------------| +| **动态参数** | 否 | 立即生效 | `max_connections` | +| **静态参数** | 是 | 重启后生效 | `shared_buffers` | + +对于静态参数,KubeBlocks 通过以下方式最小化停机时间: +1. 先修改并重启副本节点 +2. 执行切换操作,将更新后的副本提升为主节点(通常可在毫秒级完成) +3. 重启原主节点 + +:::note + +KubeBlocks Redis 插件未实现任何针对动态参数的重新加载操作,因此任何参数变更都将导致重启。 + +::: + +本指南演示如何使用重新配置操作请求(Reconfiguring OpsRequest)修改 KubeBlocks 管理的 Redis 集群的静态参数。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 集群 + +import CreatePGCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyPGCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 检查参数值 + +### 获取凭证 +KubeBlocks 会自动创建包含 Redis root 凭证的 Secret。通过以下命令获取凭证: +```bash +NAME=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.username}' | base64 -d` +PASSWD=`kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d` +``` + +### 访问 Redis 集群 +使用 Redis 客户端连接集群主节点: +```bash +kubectl exec -it -n demo redis-replication-redis-0 -c redis -- redis-cli -a ${PASSWD} +``` + +### 查询参数值 + +连接成功后,可以查询 'aof-timestamp-enabled' 的当前值: +```sql +127.0.0.1:6379> CONFIG GET aof-timestamp-enabled +1) "aof-timestamp-enabled" +2) "no" +``` + +## 静态参数示例:修改 aof-timestamp-enabled + +创建重新配置操作请求。应用以下 OpsRequest YAML 将 'aof-timestamp-enabled' 设置为 'yes': + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-reconfigure-static + namespace: demo +spec: + clusterName: redis-replication + reconfigures: + - componentName: redis + parameters: + - key: aof-timestamp-enabled + value: 'yes' + type: Reconfiguring +``` + +检查操作请求状态直至完成: + +```bash +kubectl get ops redis-reconfigure-static -n demo -w +``` + +示例输出: +```bash +redis-reconfigure-static Reconfiguring redis-replication Running -/- 5s +redis-reconfigure-static Reconfiguring redis-replication Succeed -/- 33s +``` + +**验证配置变更** + +登录 Redis 实例确认 `aof-timestamp-enabled` 参数已更新: + +```sql +127.0.0.1:6379> CONFIG GET aof-timestamp-enabled +1) "aof-timestamp-enabled" +2) "yes" +``` + +## 清理资源 +删除 Redis 集群及其命名空间以移除所有创建的资源: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## 总结 +本指南介绍了通过 KubeBlocks 修改 Redis 参数的方法: +- 静态变更需要重启但停机时间极短 +- 所有变更在应用前都会经过验证 +- 配置遵循声明式管理原则 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/08-switchover.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/08-switchover.mdx new file mode 100644 index 00000000..9b91d0a6 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/08-switchover.mdx @@ -0,0 +1,184 @@ +--- +description: 使用KubeBlocks在Redis集群中执行计划内角色切换,实现最短停机时间和可控维护 +keywords: +- Redis +- KubeBlocks +- Switchover +- High Availability +- Role Transition +- Kubernetes +sidebar_label: Redis 切换 +sidebar_position: 8 +title: Redis 集群切换 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Redis 集群切换 + +**切换(Switchover)** 是一种有计划的操作,用于将主节点角色从一个 Redis 实例转移到另一个实例。与故障转移(failover)不同,切换操作具有以下特点: +- 可控的角色转换 +- 极短的中断时间(通常仅数百毫秒) +- 可预测的维护窗口 + +切换操作适用于以下场景: +- 节点维护/升级 +- 工作负载重新平衡 +- 测试高可用性 +- 有计划的基础设施变更 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 检查角色 +列出 Pod 及其角色(主节点或从节点): + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis -L kubeblocks.io/role +``` + +示例输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 4/4 Running 0 9m59s primary +redis-replication-redis-1 4/4 Running 0 11m secondary +``` + +## 执行计划内切换 + +要发起计划内切换,请创建如下所示的 OpsRequest 资源: + + + + 选项1:自动切换(无指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-switchover-ops + namespace: demo + spec: + clusterName: redis-replication + type: Switchover + switchover: + - componentName: redis + instanceName: redis-replication-redis-0 + ``` + **关键参数:** + - `instanceName`:指定切换操作前作为主节点(leader)的实例(Pod)。 + + + + 选项2:定向切换(指定候选节点) + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-switchover-targeted + namespace: demo + spec: + clusterName: redis-replication + type: Switchover + switchover: + - componentName: redis + # 指定需要转移角色的实例 + # 典型用法是在共识系统中转移 leader 角色 + instanceName: redis-replication-redis-0 + # 如果指定 candidateName,角色将转移到该实例 + # 名称必须匹配组件中的某个 Pod + # 详情请参考 ComponentDefinition 的 Switchover 生命周期操作 + candidateName: redis-replication-redis-1 + ``` + + **关键参数:** + - `instanceName`:指定切换操作前作为主节点(leader)的实例(Pod)。 + - `candidateName`:如果指定候选节点名称,角色将转移到该实例。 + + + +## 监控切换过程 + +监控切换进度: + +```bash +kubectl get ops redis-switchover-ops -n demo -w +``` + +预期结果: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +redis-switchover-ops Switchover redis-replication Succeed 1/1 33s +``` + +## 验证切换结果 + +切换操作执行后,指定的实例将被提升为主节点角色,而原先的主节点实例将转为从节点角色。 + +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication,apps.kubeblocks.io/component-name=redis -L kubeblocks.io/role +``` + +预期输出: + +```text +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 4/4 Running 0 19m59s secondary +redis-replication-redis-1 4/4 Running 0 21m primary +``` + +在本示例中: +- Pod 'redis-replication-redis-1' 已被提升为主节点角色 +- Pod 'redis-replication-redis-0' 已转为从节点角色 + +## 故障排查 + +### 常见切换问题 + +如果切换操作卡住,请检查以下资源: +```bash +# 检查当前主节点和候选节点的 agent 日志 +kubectl logs -n demo -c kbagent +kubectl logs -n demo -c kbagent + +# 检查集群事件中的错误信息 +kubectl get events -n demo --field-selector involvedObject.name=redis-replication + +# 检查 kubeblocks 日志 +kubectl -n kb-system logs deploy/kubeblocks +``` + +## 总结 + +本指南演示了如何: +1. 部署 Redis 高可用集群 +2. 执行自动和定向两种切换操作 +3. 验证角色转换 + +**关键要点:** +- 切换操作可实现可控维护,中断时间极短(约100-500毫秒) +- KubeBlocks 提供声明式操作实现可靠的角色转换 +- 切换后务必验证: + - 集群状态 + - 应用连接性 + - 复制健康状况 +- 排查问题时检查以下日志: + - KubeBlocks operator(位于 kb-system 命名空间) + - 数据库 Pod 上的 kbagent \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx b/docs/zh/preview/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx new file mode 100644 index 00000000..31575fac --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/09-decommission-a-specific-replica.mdx @@ -0,0 +1,132 @@ +--- +description: 了解如何在KubeBlocks管理的Redis集群中下线(停用)特定Pod。 +keywords: +- KubeBlocks +- Redis +- Decommission Pod +- Horizontal Scaling +- Kubernetes +sidebar_label: 下线 Redis 副本 +sidebar_position: 9 +title: 在KubeBlocks管理的Redis集群中下线特定Pod +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 下线 KubeBlocks 托管的 Redis 集群中特定 Pod + +本文档介绍如何在 KubeBlocks 托管的 Redis 集群中下线(停用)特定 Pod。通过精确控制集群资源的下线操作,可以在保持服务可用性的同时实现工作负载重平衡、节点维护或故障处理。 + +## 为什么选择 KubeBlocks 下线 Pod? + +在传统的基于 StatefulSet 的部署中,Kubernetes 无法直接下线特定 Pod。StatefulSet 会严格保证 Pod 的顺序和身份标识,缩容操作总是优先移除序号最大的 Pod(例如从 3 个副本缩容时,会先移除 `Pod-2`)。这种限制使得运维人员无法精确控制需要下线的目标 Pod,给维护工作、负载均衡或故障处理带来不便。 + +KubeBlocks 突破了这一限制,允许管理员直接指定需要下线的 Pod。这种细粒度控制能力既能保障集群高可用性,又能实现更精细的资源管理。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署状态 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 下线指定 Pod + +**预期工作流程**: +1. `onlineInstancesToOffline` 中指定的副本被移除 +2. Pod 优雅终止 +3. 集群状态从 `Updating` 转变为 `Running` + +以下两种方法均可用于下线特定 Pod(例如 'redis-replication-redis-1'): + + + + + + 方法一:使用 OpsRequest + + 创建运维请求标记需要下线的 Pod: + + ```yaml + apiVersion: operations.kubeblocks.io/v1alpha1 + kind: OpsRequest + metadata: + name: redis-replication-decommission-ops + namespace: demo + spec: + clusterName: redis-replication + type: HorizontalScaling + horizontalScaling: + - componentName: redis + scaleIn: + onlineInstancesToOffline: + - 'redis-replication-redis-1' # 指定需要下线的实例名称 + ``` + + #### 监控下线进度 + 查看下线操作执行状态: + + ```bash + kubectl get ops redis-replication-decommission-ops -n demo -w + ``` + 示例输出: + + ```bash + NAME TYPE CLUSTER STATUS PROGRESS AGE + redis-replication-decommission-ops HorizontalScaling redis-replication Succeed 1/1 71s + ``` + + + + + + 方法二:使用 Cluster API + + 也可以直接修改 Cluster 资源来下线 Pod: + + ```yaml + apiVersion: apps.kubeblocks.io/v1 + kind: Cluster + spec: + componentSpecs: + - name: redis + replicas: 1 # 下线后期望的副本数 + offlineInstances: + - redis-replication-redis-1 # <----- 指定需要下线的 Pod + ... + ``` + + + + +### 验证下线结果 + +应用更新配置后,检查集群中剩余的 Pod: +```bash +kubectl get pods -n demo -l app.kubernetes.io/instance=redis-replication +``` + +示例输出: +```bash +NAME READY STATUS RESTARTS AGE +redis-replication-redis-0 3/3 Running 0 33m33s +``` + +## 总结 +核心要点: +- 传统 StatefulSet 缺乏精确的 Pod 移除控制能力 +- KubeBlocks 支持定向下线特定 Pod +- 两种实现方式:OpsRequest 或 Cluster API + +该功能在保障服务可用性的同时,为集群管理提供了更精细的控制维度。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/04-operations/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/04-operations/_category_.yml new file mode 100644 index 00000000..a7461723 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/04-operations/_category_.yml @@ -0,0 +1,4 @@ +collapsed: false +collapsible: true +label: 操作 +position: 4 diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx new file mode 100644 index 00000000..6426bd61 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/01-create-backuprepo.mdx @@ -0,0 +1,129 @@ +--- +description: 了解如何创建并配置一个使用S3存储桶保存备份数据的KubeBlocks备份仓库(BackupRepo)。 +keywords: +- KubeBlocks +- Backup +- BackupRepo +- S3 +- Kubernetes +sidebar_label: 创建备份仓库 +sidebar_position: 1 +title: 为KubeBlocks创建备份仓库 +--- +# 为 KubeBlocks 创建备份存储库 + +本指南将引导您通过使用 S3 存储桶来创建和配置 KubeBlocks 中的备份存储库(BackupRepo),用于存储备份数据。 + +## 前提条件 +- 已配置具有创建 S3 存储桶权限的 AWS CLI +- 拥有 Kubernetes 集群的 kubectl 访问权限 +- 已安装 KubeBlocks([安装指南](../user_docs/overview/install-kubeblocks))并在 kb-system 命名空间中运行 + +## 步骤 1:创建 S3 存储桶 + +使用 AWS CLI 在目标区域创建 S3 存储桶。将 `` 替换为您所需的 AWS 区域(例如 `us-east-1`、`ap-southeast-1`)。 + +```bash + aws s3api create-bucket --bucket kubeblocks-backup-repo --region --create-bucket-configuration LocationConstraint= +``` + +示例(us-west-1 区域): +```bash +aws s3api create-bucket \ + --bucket kubeblocks-backup-repo \ + --region us-west-1 \ + --create-bucket-configuration LocationConstraint=us-west-1 +``` + +示例输出: + +```json +{ +"Location": "http://kubeblocks-backup-repo.s3.amazonaws.com/" +} +``` + +验证: +通过列出存储桶内容确认创建成功(初始应为空): + +```bash +aws s3 ls s3://kubeblocks-backup-repo +``` + +## 步骤 2:创建 Kubernetes Secret 存储 AWS 凭证 + +将 AWS 凭证安全地存储在 Kubernetes Secret 中。将 `` 和 `` 替换为实际的 AWS 凭证: + +```bash +# 创建 secret 保存访问密钥 +kubectl create secret generic s3-credential-for-backuprepo \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= \ + -n kb-system +``` + +## 步骤 3:配置备份存储库 + +BackupRepo 是定义备份存储位置的自定义资源。本步骤将通过创建 BackupRepo 资源将您的 S3 存储桶与 KubeBlocks 集成。 + +应用以下 YAML 创建 BackupRepo。请替换字段(如存储桶名称、区域)为您的具体配置。 + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupRepo +metadata: + name: s3-repo + annotations: + # 将此备份存储库标记为默认存储库 + dataprotection.kubeblocks.io/is-default-repo: 'true' +spec: + # 当前 KubeBlocks 支持配置多种对象存储服务作为备份存储库 + # - s3 (Amazon Simple Storage Service) + # - oss (阿里云对象存储服务) + # - cos (腾讯云对象存储) + # - gcs (Google 云存储) + # - obs (华为云对象存储) + # - minio 及其他 S3 兼容服务 + storageProviderRef: s3 + # 指定备份存储库的访问方式 + # - Tool + # - Mount + accessMethod: Tool + # 指定此备份存储库创建的 PV 回收策略 + pvReclaimPolicy: Retain + # 指定此备份存储库创建的 PVC 容量 + volumeCapacity: 100Gi + # 存储 StorageProvider 的非敏感配置参数 + config: + bucket: kubeblocks-backup-repo + endpoint: '' + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: us-west-1 + # 引用存储 StorageProvider 凭证的 secret + credential: + # name 是在命名空间内引用 secret 资源的唯一标识 + name: s3-credential-for-backuprepo + # namespace 定义了 secret 名称必须唯一的空间范围 + namespace: kb-system +``` + +## 步骤 4:验证备份存储库状态 + +检查 BackupRepo 状态以确保其正确初始化: + +```bash +kubectl get backuprepo s3-repo -w +``` + +预期状态变化: +```bash +NAME STATUS STORAGEPROVIDER ACCESSMETHOD DEFAULT AGE +s3-repo PreChecking s3 Tool true 5s +s3-repo Ready s3 Tool true 35s +``` + +故障排除: + - 如果状态变为 Failed: + - 确认存储桶名称和区域与 S3 配置匹配 + - 检查 Secret 中的 AWS 凭证是否正确 + - 验证 KubeBlocks 与 AWS S3 之间的网络连接 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx new file mode 100644 index 00000000..79bae197 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/02-create-full-backup.mdx @@ -0,0 +1,220 @@ +--- +description: 使用KubeBlocks中的Backup API和OpsRequest API为Redis集群创建及验证完整备份的逐步指南 +keywords: +- Redis +- Full Backup +- KubeBlocks +- Kubernetes +- Database Backup +- XtraBackup +sidebar_label: 创建完整备份 +sidebar_position: 2 +title: 在KubeBlocks上为Redis集群创建完整备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 在KubeBlocks上为Redis创建全量备份 + +本指南演示如何通过以下两种方式在KubeBlocks上为Redis集群创建和验证全量备份: +- Backup API(直接备份操作) +- OpsRequest API(带增强监控的托管备份操作) + +我们将在[从全量备份恢复](./05-restoring-from-full-backup)指南中介绍如何从备份恢复数据。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署Redis集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 备份前提条件 + +创建备份前请确保: +1. 备份仓库已配置: + - 存在`BackupRepo`资源 + - 集群与仓库之间的网络连通性 + - `BackupRepo`状态显示"Ready" + +2. 集群准备就绪: + - 集群状态为"Running" + - 没有正在进行的操作(扩缩容、升级等) + +## 查看备份配置 + +检查可用的备份策略和计划: + +```bash +# 列出备份策略 +kubectl get backuppolicy -n demo -l app.kubernetes.io/instance=redis-replication + +# 列出备份计划 +kubectl get backupschedule -n demo -l app.kubernetes.io/instance=redis-replication +``` + +预期输出: +```bash +NAME BACKUP-REPO STATUS AGE +redis-replication-redis-backup-policy Available 17m + +NAME STATUS AGE +redis-replication-redis-backup-schedule Available 60m +``` + +查看BackupPolicy CR 'redis-replication-redis-backup-policy'中支持的备份方法: + +```bash +kubectl get backuppolicy redis-replication-redis-backup-policy -n demo -oyaml | yq '.spec.backupMethods[].name' +``` +**备份方法列表** + +KubeBlocks Redis支持以下备份方法: + +| 功能 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | datafile | 使用`redis-cli BGSAVE`命令备份数据 | +| 持续备份 | aof | 通过归档AOF文件(Append-Only Files)持续执行增量备份 | + +## 通过Backup API备份 + +### 1. 创建按需备份 + +`datafile`方法使用redis `BGSAVE`命令执行全量备份,并通过`datasafed push`上传备份文件。 + +应用以下清单创建备份: + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: Backup +metadata: + name: redis-backup-datafile + namespace: demo +spec: + backupMethod: datafile + backupPolicyName: redis-replication-redis-backup-policy + # 决定当备份自定义资源(CR)被删除时,是否应删除备份仓库中的备份内容。 + # 支持的值为`Retain`和`Delete`。 + # - `Retain`表示保留备份内容及其在备份仓库中的物理快照。 + # - `Delete`表示删除备份内容及其在备份仓库中的物理快照。 + deletionPolicy: Delete +``` + +### 2. 监控备份并验证完成 + +跟踪进度直到状态显示"Completed": + +```bash +kubectl get backup redis-backup-datafile -n demo -w +``` + +示例输出: + +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +redis-backup-datafile redis-replication-redis-backup-policy datafile Completed 3412 10s Delete 2025-05-17T09:24:59Z 2025-05-17T09:25:08Z +``` + +### 3. 验证备份 + +通过以下方式确认备份成功完成: +- 备份状态显示"Completed" +- 备份大小符合预期 +- 检查BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 备份文件大小 + + +## 通过OpsRequest API备份 + +### 1. 创建按需备份 + +使用OpsRequest API执行备份: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-backup + namespace: demo +spec: + clusterName: redis-replication + force: false + backup: + backupPolicyName: redis-replication-redis-backup-policy + backupMethod: datafile + deletionPolicy: Delete + retentionPeriod: 1mo + type: Backup +``` + +### 2. 监控备份进度 + +#### 1. 监控操作状态 + +实时跟踪备份进度: +```bash +kubectl get ops redis-replication-backup -n demo -w +``` + +预期输出: +```bash +NAME TYPE CLUSTER STATUS PROGRESS AGE +redis-replication-backup Backup redis-replication Succeed -/- 35s +``` + +- 状态为'Succeed'表示备份操作成功完成。 + +#### 2. 验证完成 + +检查最终备份状态: + +```bash +kubectl get backup -n demo -l operations.kubeblocks.io/ops-name=redis-replication-backup +``` + +示例输出: +```bash +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +backup-demo-redis-replication-20250517092706 redis-replication-redis-backup-policy datafile Completed 3458 10s Delete 2025-05-17T09:27:06Z 2025-05-17T09:27:16Z 2025-06-16T09:27:16Z +``` + +- 备份状态应显示'Completed'。 + +### 3. 验证备份 + +通过以下方式确认备份成功完成: +- 备份状态显示"Completed" +- 备份大小符合预期 +- 检查BackupRepo中的文件 + +`Backup`资源记录以下详细信息: +- 存储路径 +- 时间范围 +- 其他元数据 + +## 总结 + +本指南涵盖: +1. 部署Redis复制集群 +2. 使用以下方式创建全量备份: + - 直接Backup API + - 托管OpsRequest API +3. 监控和验证备份 + +您的Redis数据现已安全备份,可在需要时进行恢复。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx new file mode 100644 index 00000000..b956817a --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/03-scheduled-full-backup.mdx @@ -0,0 +1,153 @@ +--- +description: 了解如何使用KubeBlocks部署Redis集群,并配置在S3存储库中保留自动定时备份。 +keywords: +- Redis +- Backup +- KubeBlocks +- Scheduled Backup +- Kubernetes +sidebar_label: 定时备份 +sidebar_position: 3 +title: 在KubeBlocks中设置带定时备份的Redis集群 +--- +# 在 KubeBlocks 中设置带定时备份的 Redis 集群 + +本指南演示如何使用 KubeBlocks 部署 Redis 集群,并配置定时备份到 S3 存储库的保留策略。 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 备份前提条件 + +1. 已配置备份存储库: + - 配置好 `BackupRepo` + - 集群与存储库之间网络连通,`BackupRepo` 状态为 `Ready` + +2. 集群运行正常: + - 集群必须处于 `Running` 状态 + - 没有正在进行的操作(扩缩容、升级等) + +## 配置定时备份 + +KubeBlocks 在创建集群时会自动创建 `BackupSchedule` 资源。按照以下步骤启用和配置定时备份: + +1. 验证默认备份计划配置: + +```bash +kubectl get backupschedule redis-replication-redis-backup-schedule -n demo -oyaml +``` + +示例输出: +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: redis-replication-redis-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── 分钟 (0-59) + # │ ┌───────────── 小时 (0-23) + # │ │ ┌───────────── 月份中的天 (1-31) + # │ │ │ ┌───────────── 月份 (1-12) + # │ │ │ │ ┌───────────── 星期中的天 (0-6) (周日=0) + # │ │ │ │ │ + # 0 18 * * * + # 每天下午6点(18:00)执行此任务 + cronExpression: 0 18 * * * # 根据需要更新cron表达式 + enabled: false # 设置为`true`以定期执行基础备份 + retentionPeriod: 7d # 根据需要设置保留期限 +``` + +2. 启用并自定义备份计划: +```bash +kubectl edit backupschedule redis-replication-redis-backup-schedule -n demo +``` + +更新以下关键参数: +- `enabled`:设置为 `true` 以激活定时备份 +- `cronExpression`:使用 cron 语法配置备份频率 +- `retentionPeriod`:设置备份保留时长(如 `7d`、`1mo`) + +每天 UTC 时间 18:00 执行备份并保留 7 天的示例配置: +```yaml +schedules: +- backupMethod: datafile + enabled: true + cronExpression: "0 18 * * *" + retentionPeriod: 7d +``` + +3. 验证计划配置: +```bash +# 检查计划状态 +kubectl get backupschedule redis-replication-redis-backup-schedule -n demo -w + +# 查看详细配置 +kubectl describe backupschedule redis-replication-redis-backup-schedule -n demo +``` + +## 监控和管理备份 + +启用定时备份后,监控其执行情况并管理备份保留: + +1. 查看所有备份: +```bash +kubectl get backup -n demo -l app.kubernetes.io/instance=redis-replication +``` + +2. 检查备份详情: +```bash +kubectl describe backup -n demo +``` + +3. 验证备份文件: +- 状态应显示"Completed" +- 检查备份大小是否符合预期 +- 确认保留策略正在应用 +- 验证存储库中存在备份文件 + +4. 管理备份保留: +- 手动删除旧备份: +```bash +kubectl delete backup -n demo +``` +- 修改保留期限: +```bash +kubectl edit backupschedule redis-replication-redis-backup-schedule -n demo +``` + +## 清理 +要删除所有创建的资源,请删除 Redis 集群及其命名空间: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南演示了: +1. Redis 自动备份配置 +2. 使用 cron 语法自定义计划 +3. 保留策略管理 +4. 备份验证流程 + +您的 Redis 集群现在具备: +- 定期自动备份 +- 可配置的保留策略 +- 完整的备份历史记录跟踪 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx new file mode 100644 index 00000000..f2150f4b --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/04-scheduled-continuous-backup.mdx @@ -0,0 +1,163 @@ +--- +description: 了解如何在KubeBlocks中设置Redis集群,并启用定时全量备份与持续增量备份功能。 +keywords: +- Redis +- Backup +- PITR +- KubeBlocks +- Kubernetes +sidebar_label: 定时持续备份 +sidebar_position: 4 +title: 在KubeBlocks中设置支持定时持续备份的Redis集群 +--- +# 在KubeBlocks中配置支持定时持续备份的Redis集群 + +本指南将演示如何在KubeBlocks上配置Redis集群,实现以下功能: +- 定时全量备份(基础备份) +- 持续WAL(预写日志)归档 +- 时间点恢复(PITR)能力 + +这种组合方案能提供全面的数据保护,并实现最小的恢复点目标(RPO)。 + +## 什么是PITR? +时间点恢复(PITR)允许您通过结合全量备份和持续的binlog/wal/归档日志备份,将数据库恢复到特定时间点。 + +有关从全量备份和持续binlog备份恢复数据的详细信息,请参阅[从PITR恢复](restore-with-pitr.mdx)指南。 + +## 前提条件 + +开始前请确保: +- 环境准备: + - Kubernetes集群已启动并运行 + - kubectl CLI工具已配置可连接集群 + - 已安装[KubeBlocks CLI](../../user_docs/references/install-kbcli)和[KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。请按照此处说明进行安装。 +- 命名空间准备:为保持资源隔离,请为本教程创建专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` + +## 备份前提条件 + +1. 备份仓库配置: + - 已配置`BackupRepo` + - 集群与仓库间网络连通,`BackupRepo`状态为`Ready` + +2. 集群运行状态: + - 集群必须处于`Running`状态 + - 无进行中的操作(扩缩容、升级等) + +## 备份方法列表 + +KubeBlocks Redis支持以下备份方法: + +| 功能 | 方法 | 描述 | +|-------------|--------|------------| +| 全量备份 | datafile | 使用`redis-cli BGSAVE`命令备份数据 | +| 持续备份 | aof | 通过归档仅追加文件(AOF)实现持续增量备份 | + +## 部署Redis集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 启用持续备份 + +### 准备工作:设置`aof-timestamp-enabled`为`yes` +Redis仅追加文件(AOF)按处理顺序记录服务器接收的每个写操作,使Redis能通过重放这些命令重建数据集。 +KubeBlocks通过归档仅追加文件(AOF)支持Redis组件的持续备份。它将处理增量AOF文件,更新基础AOF文件,清理过期文件并保存备份状态(将备份过程的元数据如总大小和时间戳记录到`Backup`资源中)。 + +启用持续备份前,必须将变量`aof-timestamp-enabled`设为`yes`。 + +```yaml +# cat examples/redis/reconfigure-aof.yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-reconfigure-aof + namespace: demo +spec: + clusterName: redis-replication + reconfigures: + - componentName: redis + parameters: + # 表示要更新的参数名称 + - key: aof-timestamp-enabled + value: 'yes' + type: Reconfiguring +``` + +:::注意 +一旦启用`aof-timestamp-enabled`,Redis将在AOF文件中包含时间戳。 +可能产生以下副作用:存储开销、性能开销(写入延迟)。 +当您有高写入吞吐量或存储空间有限时,不建议启用此功能。 +::: + +### 更新BackupSchedule + +更新`BackupSchedule`以计划启用(`enabled`)备份方法并按需设置时间(`cronExpression`): + +```yaml +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: BackupSchedule +spec: + backupPolicyName: redis-replication-redis-backup-policy + schedules: + - backupMethod: datafile + # ┌───────────── 分钟 (0-59) + # │ ┌───────────── 小时 (0-23) + # │ │ ┌───────────── 月份中的天 (1-31) + # │ │ │ ┌───────────── 月份 (1-12) + # │ │ │ │ ┌───────────── 星期中的天 (0-6) (周日=0) + # │ │ │ │ │ + # 0 18 * * * + # 每天下午6点(18:00)执行此任务 + cronExpression: 0 18 * * * # 根据需要更新cronExpression + enabled: true # 设为`true`以定期调度基础备份 + retentionPeriod: 7d # 根据需要设置保留期 + - backupMethod: aof + cronExpression: '*/30 * * * *' + enabled: true # 设为`true`启用持续备份 + name: aof + retentionPeriod: 8d # 默认情况下,持续备份的保留期比全量备份多1天 +``` + +1. **全量备份** (datafile): + - 使用redis `BGSAVE`命令执行全量备份 + - 按配置的计划运行(默认每天) + - 作为PITR的基础 + +2. **持续备份** (archive-oplog): + - 持续处理增量AOF文件,更新基础AOF文件,清理过期文件 + - 维护备份元数据包括大小和时间范围 + +## 监控持续备份 + +使用以下命令验证持续备份操作: +```bash +# 获取持续备份 +kubectl get backup -l app.kubernetes.io/instance=redis-replication,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +# 获取处理持续备份的pod +kubectl get pod -l app.kubernetes.io/instance=redis-replication,dataprotection.kubeblocks.io/backup-type=Continuous -n demo +``` + +## 总结 + +本指南涵盖: +1. 使用pg-basebackup配置定时全量备份 +2. 使用wal-g-archive启用持续WAL归档 +3. 设置时间点恢复(PITR)能力 +4. 监控备份操作 + +关键优势: +- 定时全量备份确保定期恢复点 +- 持续WAL归档最小化潜在数据丢失 +- PITR支持恢复到任意时间点 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx new file mode 100644 index 00000000..e20f79cf --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/05-restoring-from-full-backup.mdx @@ -0,0 +1,183 @@ +--- +description: 了解如何通过集群注解(Cluster Annotation)或运维请求API(OpsRequest API)在KubeBlocks中从现有备份恢复一个新的Redis集群。 +keywords: +- Redis +- Restore +- Backup +- KubeBlocks +- Kubernetes +sidebar_label: 恢复 Redis 集群 +sidebar_position: 5 +title: 从备份恢复 Redis 集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 从备份恢复Redis集群 + +本指南演示了在KubeBlocks中从备份恢复Redis集群的两种方法: + +1. **集群注解法** - 使用YAML注解的简单声明式方法 +2. **OpsRequest API法** - 支持进度监控的增强型操作控制 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 恢复准备:定位完整备份 +在开始恢复前,请确保存在可用的完整备份。恢复过程将使用此备份创建新的Redis集群。 + +- 新集群可访问的备份仓库 +- 状态为`Completed`的有效完整备份 +- 充足的CPU/内存资源 +- 足够的存储容量 + +查找可用的完整备份: + +```bash +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=redis-replication # 获取完整备份列表 +``` + +选择状态为`Completed`的任意一个备份。 + +## 方案一:集群注解恢复法 + +### 步骤1:创建恢复集群 +创建包含恢复配置的新集群: + +关键参数: +- `kubeblocks.io/restore-from-backup`注解 +- 从上一步骤获取的备份名称和命名空间 + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication-restore + namespace: demo + annotations: + kubeblocks.io/restore-from-backup: '{"redis":{"name":"","namespace":"demo","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤2:监控恢复进度 +通过以下命令跟踪恢复进度: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +## 方案二:OpsRequest API恢复法 + +### 步骤1:发起恢复操作 +通过OpsRequest API创建恢复请求: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restore + namespace: demo +spec: + clusterName: pg-restored + force: false + restore: + backupName: + backupNamespace: demo + type: Restore +``` + +### 步骤2:跟踪操作进度 +监控恢复状态: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +### 步骤3:验证恢复集群 +确认恢复成功: +```bash +kubectl get cluster redis-replication-restored -n demo +``` +示例输出: +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication-restored redis Delete Running 3m2s +``` + + +## 清理资源 +删除Redis集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete cluster redis-replication-restored -n demo +kubectl delete ns demo +``` + +## 总结 + +本指南涵盖两种恢复方法: + +1. **集群注解法** - 基于YAML的简单方法 + - 获取系统凭证 + - 创建带恢复注解的集群 + - 监控进度 + +2. **OpsRequest API法** - 增强的操作控制 + - 创建恢复请求 + - 跟踪操作状态 + - 验证完成情况 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx new file mode 100644 index 00000000..c0604e27 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/06-restore-with-pitr.mdx @@ -0,0 +1,186 @@ +--- +description: 了解如何在KubeBlocks上使用完整备份和持续binlog备份实现Redis集群的时间点恢复(PITR)。 +keywords: +- Redis +- Full Backup +- PITR +- KubeBlocks +sidebar_label: 使用 PITR 恢复 +sidebar_position: 6 +title: 在KubeBlocks上通过时间点恢复(PITR)从备份还原Redis集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 在KubeBlocks中使用时间点恢复(PITR)从备份还原Redis集群 + +本指南演示如何在KubeBlocks中对Redis集群执行时间点恢复(PITR),使用以下要素: +1. 完整基础备份 +2. 持续的WAL(预写日志)备份 +3. 两种恢复方法: + - 集群注解(声明式方法) + - OpsRequest API(操作控制) + +PITR支持恢复到指定`timeRange`时间范围内的任意时间点。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 准备PITR恢复 +执行PITR恢复需要同时具备完整备份和持续备份。如果尚未配置,请参考相关文档进行设置。 + +- 已完成的完整备份 +- 活跃的持续WAL备份 +- 可访问的备份存储库 +- 新集群的充足资源 + +可通过以下步骤确认备份列表: + +### 1. 验证持续备份 +确认存在正在运行或已完成的持续WAL备份: + +```bash +# 每个集群应有且仅有一个持续备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Continuous,app.kubernetes.io/instance=redis-replication +``` + +### 2. 检查备份时间范围 +获取有效恢复时间窗口: + +```bash +kubectl get backup -n demo -o yaml | yq '.status.timeRange' +``` + +预期输出: +```text +start: "2025-05-07T09:12:47Z" +end: "2025-05-07T09:22:50Z" +``` + +### 3. 识别完整备份 +查找符合条件的完整备份: +- 状态:已完成 +- 完成时间晚于持续备份开始时间 + +```bash +# 应存在一个或多个完整备份 +kubectl get backup -n demo -l dataprotection.kubeblocks.io/backup-type=Full,app.kubernetes.io/instance=redis-replication +``` + +:::tip +KubeBlocks会自动选择符合条件的最近完整备份作为基础。 +确保存在满足条件的完整备份:其`stopTime`/`completionTimestamp`必须**晚于**持续备份的`startTime`,否则PITR恢复将失败。 +::: + +## 方案一:集群注解恢复 + +### 步骤1:创建恢复集群 +在集群注解中配置PITR参数: + +关键参数: +- `name`: 持续备份名称 +- `restoreTime`: 目标恢复时间(需在备份`timeRange`范围内) + +应用以下YAML配置: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pg-restore-pitr + namespace: demo + annotations: + # 注意:将替换为持续备份名称 + # 注意:将替换为备份时间范围内的有效时间 + kubeblocks.io/restore-from-backup: '{"redis":{"name":"","namespace":"demo","restoreTime":"","volumeRestorePolicy":"Parallel"}}' +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "14.7.2" + disableExporter: true + labels: + # 注意:根据实际情况更新标签 + apps.kubeblocks.postgres.patroni/scope: pg-restore-pitr-redis + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 步骤3:监控恢复过程 +通过以下命令跟踪恢复进度: + +```bash +# 查看恢复状态 +kubectl get restore -n demo -w + +# 查看集群状态 +kubectl get cluster -n demo -w +``` + +## 方案二:OpsRequest API恢复 + +如需操作控制和监控,可使用OpsRequest API: + +```yaml +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: redis-replication-restore + namespace: demo +spec: + clusterName: redis-replication-restore + force: false + restore: + backupName: + backupNamespace: demo + restorePointInTime: + type: Restore +``` + +### 监控恢复过程 +通过以下命令跟踪进度: + +```bash +# 查看恢复操作 +kubectl get restore -n demo -w + +# 验证集群状态 +kubectl get cluster -n demo -w +``` + +## 清理资源 +删除Redis集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete cluster redis-replication-restore -n demo +kubectl delete ns demo +``` + +## 总结 +本指南演示了如何在KubeBlocks中使用完整备份和持续备份对Redis集群执行时间点恢复(PITR)。关键步骤包括: +- 验证可用备份 +- 提取加密的系统账户凭证 +- 创建带有恢复配置的新Redis集群 +- 监控恢复过程 + +通过此方法,您可以将Redis集群恢复到特定时间点,确保数据损失最小化和业务连续性。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/_category_.yml new file mode 100644 index 00000000..09845f2d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/05-backup-restore/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 5 diff --git a/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx b/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx new file mode 100644 index 00000000..20033cad --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/01-custom-secret.mdx @@ -0,0 +1,157 @@ +--- +description: 了解如何在KubeBlocks上部署Redis集群,并通过Kubernetes Secrets安全配置自定义root密码。 +keywords: +- Redis +- KubeBlocks +- Custom Password +- Kubernetes +- Secrets +sidebar_label: 自定义密码 +sidebar_position: 1 +title: 在KubeBlocks上创建带自定义根密码的Redis集群 +--- +# 在 KubeBlocks 上创建带自定义密码的 Redis 集群 + +本指南演示如何在 KubeBlocks 中部署 Redis 集群,并将自定义 root 密码存储在 Kubernetes Secret 中。 + +## 先决条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + +## 部署 Redis 复制集群 + +KubeBlocks 采用声明式方法管理 Redis 集群。以下是一个配置示例,用于部署包含 2 个节点(1 主 1 从)且带有自定义 root 密码的 Redis 集群。 + +### 步骤 1:为默认账户创建 Secret + +自定义 root 密码存储在 Kubernetes Secret 中。通过应用以下 YAML 创建 Secret: + +```yaml +apiVersion: v1 +data: + password: Y3VzdG9tcGFzc3dvcmQ= # custompassword + username: cm9vdA== #root +immutable: true +kind: Secret +metadata: + name: custom-secret + namespace: demo +``` +- password: 将 custompassword 替换为您想要的密码,并使用 Base64 编码(`echo -n "custompassword" | base64`)。 +- username: Redis 默认用户是 'default',编码为 'cm9vdA=='。 + +### 步骤 2:部署 Redis 集群 + +应用以下清单部署 Redis 集群,并引用步骤 1 中创建的 Secret 作为 root 账户凭据: +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + systemAccounts: # 覆盖系统账户密码 + - name: default + secretRef: + name: custom-secret + namespace: demo + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` +**关键字段说明** +- `systemAccounts`: 覆盖引用 `ComponentDefinition` 中定义的系统账户。 + +:::tip + +在 KubeBlocks Redis 插件中,预定义了一组系统账户。只有这些账户可以通过新 Secret 进行自定义。 + +::: + +获取账户列表: +```bash +kubectl get cmpd redis-7-1.0.0 -oyaml | yq '.spec.systemAccounts[].name' +``` + +预期输出: +```bash +default +``` + +## 验证部署 + +import VerifyCluster from '../_tpl/_verify-redis-replication-cluster.mdx' + + + +## 连接 Redis 集群 + +KubeBlocks 会自动创建包含 Redis root 凭据的 Secret。使用以下命令获取凭据: + +```bash +kubectl get secrets -n demo redis-replication-redis-account-default -o jsonpath='{.data.password}' | base64 -d +custompassword +``` + +使用 Redis 客户端和自定义密码连接到集群主节点: +```bash +kubectl exec -it -n demo redis-replication-redis-0 -c redis -- reids-cli -a ${PASSWD} +``` + +## 清理资源 +删除 Redis 集群及其命名空间以移除所有创建的资源: + +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete secret custom-secret -n demo +kubectl delete ns demo +``` + +## 总结 +在本指南中,您完成了以下操作: +- 创建 Kubernetes Secret 安全存储自定义 Redis 默认密码 +- 在 KubeBlocks 中部署带有自定义 root 密码的 Redis 集群 +- 验证部署并使用 Redis 客户端连接到集群主节点 + +使用 Kubernetes Secret 可以确保 Redis 集群凭据的安全管理,而 KubeBlocks 简化了部署和管理流程。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/_category_.yml new file mode 100644 index 00000000..76712392 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/06-custom-secret/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 自定义 Secret +position: 6 diff --git a/docs/zh/preview/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx b/docs/zh/preview/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx new file mode 100644 index 00000000..0e6f7bc9 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/08-monitoring/01-integrate-with-prometheus-operator.mdx @@ -0,0 +1,267 @@ +--- +description: 了解如何通过Prometheus Operator为KubeBlocks中的Redis集群配置可观测性。设置监控并通过Grafana可视化指标数据。 +keywords: +- KubeBlocks +- Redis +- Prometheus +- Grafana +- Observability +- Metrics +sidebar_label: Redis 集群可观测性 +sidebar_position: 2 +title: 使用 Prometheus Operator 实现 Redis 集群可观测性 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 使用 Prometheus Operator 监控 Redis + +本指南演示如何在 KubeBlocks 中为 Redis 集群配置全面的监控方案,方案包含: + +1. Prometheus Operator 用于指标采集 +2. 内置 Redis Exporter 用于指标暴露 +3. Grafana 用于可视化展示 + +## 前提条件 + +import Prerequisites from '../_tpl/_prerequisites.mdx' + + + + + +## 安装监控套件 + +### 1. 安装 Prometheus Operator +使用 Helm 部署 kube-prometheus-stack: + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus prometheus-community/kube-prometheus-stack \ + -n monitoring \ + --create-namespace +``` + +### 2. 验证安装 +检查所有组件是否正常运行: +```bash +kubectl get pods -n monitoring +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE +alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 114s +prometheus-grafana-75bb7d6986-9zfkx 3/3 Running 0 2m +prometheus-kube-prometheus-operator-7986c9475-wkvlk 1/1 Running 0 2m +prometheus-kube-state-metrics-645c667b6-2s4qx 1/1 Running 0 2m +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 114s +prometheus-prometheus-node-exporter-47kf6 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-6ntsl 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-gvtxs 1/1 Running 0 2m1s +prometheus-prometheus-node-exporter-jmxg8 1/1 Running 0 2m1s +``` + +## 部署 Redis 集群 + +import CreateCluster from '../_tpl/_create-redis-replication-cluster.mdx' + + + +**关键监控配置** +- `disableExporter: false` 启用内置指标导出器 +- 导出器以边车容器形式运行在每个 Redis Pod 中 +- 通过 9187 端口采集 Redis 指标 + +## 验证部署 +监控集群状态直至其转为 Running(运行中)状态: +```bash +kubectl get cluster redis-replication -n demo -w +``` + +示例输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Creating 50s +redis-replication redis Delete Running 4m2s +``` +当集群状态显示为 Running 时,表示您的 Redis 集群已准备就绪可供使用。 + +## 配置指标收集 + +### 1. 验证Exporter端点 +确认指标已暴露: + +```bash +kubectl get po redis-replication-redis-0 -n demo -oyaml | \ + yq '.spec.containers[] | select(.name=="metrics") | .ports' +``` + +示例输出: +```yaml +- containerPort: 9121 + name: http-metrics # 用于PodMonitor + protocol: TCP +``` + +测试指标端点: + +```bash +kubectl -n demo exec -it pods/redis-replication-redis-0 -c metrics -- \ + curl -s http://127.0.0.1:9121/metrics | head -n 50 +``` + +### 2. 创建PodMonitor +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: redis-replication-pod-monitor + namespace: demo + labels: # 必须与'prometheus.spec.podMonitorSelector'中的设置匹配 + release: prometheus +spec: + jobLabel: app.kubernetes.io/managed-by + # 定义从关联的Kubernetes 'Pod'对象传输到采集指标的标签 + # 根据实际需求设置标签 + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: http-metrics # 必须与exporter端口名称匹配 + scheme: http + namespaceSelector: + matchNames: + - demo # 目标命名空间 + selector: + matchLabels: + app.kubernetes.io/instance: redis-replication + apps.kubeblocks.io/component-name: redis +``` + +**PodMonitor配置指南** + +| 参数 | 必填 | 说明 | +|-----------|----------|-------------| +| `port` | 是 | 必须与exporter端口名称('http-metrics')匹配 | +| `namespaceSelector` | 是 | 指定Redis运行的命名空间 | +| `labels` | 是 | 必须与Prometheus的podMonitorSelector匹配 | +| `path` | 否 | 指标端点路径(默认:/metrics) | +| `interval` | 否 | 采集间隔(默认:30s) | + +## 验证监控配置 + +### 1. 检查 Prometheus 监控目标 +转发并访问 Prometheus 用户界面: + +```bash +kubectl port-forward svc/prometheus-kube-prometheus-prometheus -n monitoring 9090:9090 +``` +在浏览器中打开: +http://localhost:9090/targets + +检查是否存在与 PodMonitor 对应的抓取任务(任务名称为 'demo/redis-replication-pod-monitor')。 + +预期状态: +- 目标状态应为 UP(正常) +- 目标标签应包含 podTargetLabels 中定义的标签(例如 'app_kubernetes_io_instance') + +### 2. 测试指标收集 +验证指标是否被正确抓取: +```bash +curl -sG "http://localhost:9090/api/v1/query" --data-urlencode 'query=redis_up{app_kubernetes_io_instance="redis-replication"}' | jq +``` + +示例输出: +```json +{ + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "redis_up", + "app_kubernetes_io_instance": "redis-replication", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "redis", + "apps_kubeblocks_io_pod_name": "redis-replication-redis-1", + "container": "metrics", + "endpoint": "http-metrics", + "instance": "10.244.0.233:9121", + "job": "kubeblocks", + "namespace": "demo", + "pod": "redis-replication-redis-1" + }, + "value": [ + 1747475968.165, + "1" + ] + }, + { + "metric": { + "__name__": "redis_up", + "app_kubernetes_io_instance": "redis-replication", + "app_kubernetes_io_managed_by": "kubeblocks", + "apps_kubeblocks_io_component_name": "redis", + "apps_kubeblocks_io_pod_name": "redis-replication-redis-0", + "container": "metrics", + "endpoint": "http-metrics", + "instance": "10.244.0.231:9121", + "job": "kubeblocks", + "namespace": "demo", + "pod": "redis-replication-redis-0" + }, + "value": [ + 1747475968.165, + "1" + ] + } + ] + } +} +``` + +## 在 Grafana 中可视化监控数据 + +### 1. 访问 Grafana +通过端口转发登录 Grafana: + +```bash +kubectl port-forward svc/prometheus-grafana -n monitoring 3000:80 +``` +在浏览器中访问 http://localhost:3000,使用默认凭证登录: +- 用户名:'admin' +- 密码:'prom-operator'(默认值) + +### 2. 导入仪表板 +导入 KubeBlocks Redis 监控仪表板: + +1. 在 Grafana 中导航至 "+" → "导入" +2. 选择以下任一方式导入: + - 粘贴仪表板 URL: + `https://raw.githubusercontent.com/apecloud/kubeblocks-addons/main/addons/redis/dashboards/redis.json` + - 或直接上传 JSON 文件 + +**仪表板包含:** +- 集群状态概览 +- 查询性能指标 +- 连接数统计 +- 副本同步健康状态 + +![redis-monitoring-grafana-dashboard.png](/img/docs/en/redis-monitoring-grafana-dashboard.png) + +## 删除 +要删除所有已创建的资源,请运行以下命令: +```bash +kubectl delete cluster redis-replication -n demo +kubectl delete ns demo +kubectl delete podmonitor redis-replication-pod-monitor -n demo +``` + +## 概述 +在本教程中,我们使用 Prometheus Operator 为 KubeBlocks 中的 Redis 集群配置了可观测性方案。通过设置 `PodMonitor`,我们实现了 Prometheus 对 Redis exporter 指标的自动抓取,并最终在 Grafana 中完成了指标可视化。这套监控体系能为 Redis 数据库的健康状态和性能表现提供关键洞察。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/08-monitoring/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/08-monitoring/_category_.yml new file mode 100644 index 00000000..02550e32 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/08-monitoring/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 监控 +position: 8 diff --git a/docs/zh/preview/kubeblocks-for-redis/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/_category_.yml new file mode 100644 index 00000000..36f9abda --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: KubeBlocks for Redis 社区版 +position: 12 diff --git a/docs/zh/preview/kubeblocks-for-redis/_tpl/_category_.yml b/docs/zh/preview/kubeblocks-for-redis/_tpl/_category_.yml new file mode 100644 index 00000000..82d8374c --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/_tpl/_category_.yml @@ -0,0 +1,5 @@ +collapsed: false +collapsible: true +hidden: true +label: 模板 +position: 100 diff --git a/docs/zh/preview/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx b/docs/zh/preview/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx new file mode 100644 index 00000000..9efd8931 --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/_tpl/_create-redis-replication-cluster.mdx @@ -0,0 +1,76 @@ +KubeBlocks 采用声明式方法来管理 Redis 复制集群。 +以下是一个部署包含两个组件(redis 和 redis sentinel)的 Redis 复制集群的配置示例。 + +应用以下 YAML 配置来部署集群: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: redis-replication + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +### 关键配置说明: +1. **集群定义** + - `clusterDef: redis` 指定使用 Redis 集群定义 + - `topology: replication` 设置为复制模式拓扑 + +2. **Redis 组件** + - 版本 `7.2.4` + - 2 个副本(1 主 1 从) + - 资源限制:0.5 CPU 核 + 0.5Gi 内存 + - 数据存储卷:20Gi RWO(ReadWriteOnce)存储 + +3. **Sentinel 组件** + - 3 个哨兵节点(满足高可用法定人数) + - 相同资源配置 + - 独立的数据存储卷配置 + +4. **终止策略** + - `terminationPolicy: Delete` 表示删除集群时会清理所有资源 + +注:`storageClassName: ""` 表示使用默认存储类,实际部署时应根据 Kubernetes 环境配置具体存储类名称。 \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/_tpl/_prerequisites.mdx b/docs/zh/preview/kubeblocks-for-redis/_tpl/_prerequisites.mdx new file mode 100644 index 00000000..87bfd77d --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/_tpl/_prerequisites.mdx @@ -0,0 +1,11 @@ +在继续之前,请确保满足以下条件: +- 环境准备: + - 已有一个运行中的 Kubernetes 集群。 + - 已配置 kubectl CLI 工具以与集群通信。 + - 已安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) 和 [KubeBlocks Operator](../../user_docs/overview/install-kubeblocks)。安装指引请参考此处。 +- 命名空间准备:为保持资源隔离,请为本教程创建一个专用命名空间: + +```bash +kubectl create ns demo +namespace/demo created +``` \ No newline at end of file diff --git a/docs/zh/preview/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx b/docs/zh/preview/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx new file mode 100644 index 00000000..d53fa5ed --- /dev/null +++ b/docs/zh/preview/kubeblocks-for-redis/_tpl/_verify-redis-replication-cluster.mdx @@ -0,0 +1,33 @@ +监控集群状态直至其转为运行中(Running)状态: +```bash +kubectl get cluster redis-replication -n demo -w +``` + +预期输出: + +```bash +NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE +redis-replication redis Delete Running 3m49s +``` + +检查Pod状态与角色: +```bash +kubectl get pods -l app.kubernetes.io/instance=redis-replication -L kubeblocks.io/role -n demo +``` + +预期输出: +```bash +NAME READY STATUS RESTARTS AGE ROLE +redis-replication-redis-0 3/3 Running 0 3m38s primary +redis-replication-redis-1 3/3 Running 0 3m16s secondary +redis-replication-redis-sentinel-0 2/2 Running 0 4m35s +redis-replication-redis-sentinel-1 2/2 Running 0 4m17s +redis-replication-redis-sentinel-2 2/2 Running 0 3m59s +``` + +当集群状态显示为Running时,表示您的Redis集群已准备就绪可供使用。 + +:::tip +如果是首次创建集群,可能需要一定时间拉取镜像后才能正常运行。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/_category_.yml b/docs/zh/preview/user_docs/concepts/_category_.yml new file mode 100644 index 00000000..f7a99ce0 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 概念与特性 +position: 2 diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/_category_.yaml b/docs/zh/preview/user_docs/concepts/backup-and-restore/_category_.yaml new file mode 100644 index 00000000..701c5678 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/_category_.yaml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份与恢复 +position: 3 diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/_category_.yaml b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/_category_.yaml new file mode 100644 index 00000000..082f1b6d --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/_category_.yaml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 备份 +position: 3 diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx new file mode 100644 index 00000000..a81abba8 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/backup-repo.mdx @@ -0,0 +1,651 @@ +--- +description: 如何配置 BackupRepo +keywords: +- introduction +- backup +- restore +sidebar_label: 配置备份仓库 +sidebar_position: 1 +title: 配置备份仓库 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 简介 + +BackupRepo 是备份数据的存储仓库。目前,KubeBlocks 支持配置多种对象存储服务作为备份仓库,包括 OSS(阿里云对象存储服务)、S3(亚马逊简单存储服务)、COS(腾讯云对象存储)、GCS(谷歌云存储)、OBS(华为云对象存储)、Azure Blob Storage、MinIO 以及其他兼容 S3 协议的服务。 + +您可以根据不同业务场景创建多个 BackupRepo。例如:按业务划分,将业务 A 的数据存储在仓库 A 中,业务 B 的数据存储在仓库 B 中;或者按地域配置多个仓库,实现异地容灾。但在创建备份时必须指定备份仓库。您也可以创建一个默认备份仓库,当未指定具体仓库时,KubeBlocks 会使用该默认仓库存储备份数据。 + +## 准备工作 + +在开始之前,请确保已完成以下所有准备工作。 + +* [安装 kbcli](./../../../references/install-kbcli) +* [安装 kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) +* [安装 Helm](https://helm.sh/docs/intro/install/) +* [安装 KubeBlocks](./../../../overview/install-kubeblocks) + + + +## 配置备份仓库 + +准备好对象存储服务后,即可配置备份仓库(BackupRepo)。KubeBlocks 提供两种配置方式: + +* 在安装 KubeBlocks 时自动配置备份仓库; +* 按需手动配置备份仓库。 + +:::tip + +如果没有云提供商的对象存储服务,可以按照[安装 MinIO](../../../references/install-minio)指南在 Kubernetes 中部署开源服务 MinIO。 + +::: + +### 访问备份仓库的方式 + +访问远程对象存储有两种方法: + +| 方法 | 描述 | 要求 | 安全考量 | +|------|------|------|----------| +| 工具(Tool) | 使用命令行工具直接访问远程存储 | 无需额外驱动 | 凭据会作为 Secret 跨命名空间同步 | +| 挂载(Mount) | 使用 CSI 驱动将远程存储挂载到本地 | 需要安装 CSI 驱动 | 命名空间间不共享凭据 | + +访问方式在创建 BackupRepo 时通过 `accessMethod` 字段指定,后续不可更改。 + +**推荐方案**: +- 在可信环境中使用"工具"方法简化部署 +- 在多租户场景中使用"挂载"方法增强安全性 + +### 手动配置备份仓库 + +如果在安装 KubeBlocks 时未配置备份仓库信息,可按照以下说明手动配置。 + + + + + +1. 安装 S3 CSI 驱动(仅用于挂载方式)。 + + ```bash + helm repo add yandex-s3 https://yandex-cloud.github.io/k8s-csi-s3/charts + + helm install csi-s3 yandex-s3/csi-s3 -n kb-system + ``` + 更多信息请参考[Yandex Cloud CSI S3 驱动](https://github.com/yandex-cloud/k8s-csi-s3)。 + +2. 创建备份仓库。 + + + + + + ```bash + # 创建保存 S3 访问密钥的 Secret + kubectl create secret generic s3-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: s3 + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + endpoint: "" + mountOptions: --memory-limit 1000 --dir-mode 0777 --file-mode 0666 + region: cn-northwest-1 + credential: + name: s3-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 OSS 访问密钥的 Secret + kubectl create secret generic oss-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: oss + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: cn-zhangjiakou + credential: + name: oss-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 OBS 访问密钥的 Secret + kubectl create secret generic obs-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: obs + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: cn-north-4 + credential: + name: obs-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 COS 访问密钥的 Secret + kubectl create secret generic cos-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: cos + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: ap-guangzhou + credential: + name: cos-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 GCS 访问密钥的 Secret + kubectl create secret generic gcs-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: gcs-s3comp + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: "" + region: auto + credential: + name: gcs-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 MinIO 访问密钥的 Secret + kubectl create secret generic minio-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: minio + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + mountOptions: "" + endpoint: + credential: + name: minio-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 S3 兼容存储访问密钥的 Secret + kubectl create secret generic s3-comp-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accessKeyId= \ + --from-literal=secretAccessKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: s3-compatible + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + bucket: test-kb-backup + endpoint: + forcePathStyle: true + credential: + name: s3-comp-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + + ```bash + # 创建保存 Azure Blob 存储访问密钥的 Secret + kubectl create secret generic azureblob-credential-for-backuprepo \ + -n kb-system \ + --from-literal=accountName= \ + --from-literal=accountKey= + + # 创建 BackupRepo 资源 + kubectl apply -f - <<-'EOF' + apiVersion: dataprotection.kubeblocks.io/v1alpha1 + kind: BackupRepo + metadata: + name: my-repo + annotations: + dataprotection.kubeblocks.io/is-default-repo: "true" + spec: + storageProviderRef: azureblob + accessMethod: Tool + pvReclaimPolicy: Retain + volumeCapacity: 100Gi + config: + container: test-kb-backup + credential: + name: azureblob-credential-for-backuprepo + namespace: kb-system + pathPrefix: "" + EOF + ``` + + + + + +3. 查看备份仓库及其状态。如果状态为 `Ready`,则表示备份仓库已就绪。 + + ```bash + kubectl get backuprepo + ``` + + + + + +1. 安装 S3 CSI 驱动(仅用于挂载方式)。 + + ```bash + # 启用 CSI-S3 插件 + kbcli addon enable csi-s3 + + # 可添加标志自定义插件安装 + # CSI-S3 默认在所有节点上安装 daemonSet Pod,可通过设置容忍度指定安装节点 + kbcli addon enable csi-s3 \ + --tolerations '[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' \ + --tolerations 'daemonset:[{"key":"taintkey","operator":"Equal","effect":"NoSchedule","value":"true"}]' + + # 查看 CSI-S3 驱动状态,确保为 Enabled + kbcli addon list csi-s3 + ``` + +2. 创建备份仓库。 + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider s3 \ + --region cn-northwest-1 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + 也可指定 `--access-method` 为 `Mount`。 + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider oss \ + --region cn-zhangjiakou \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + 也可使用 `--endpoint` 标志显式指定 OSS 端点。例如: + + ```bash + kbcli backuprepo create my-repo \ + --provider oss \ + --region cn-zhangjiakou \ + --bucket test-kb-backup \ + --endpoint https://oss-cn-zhangjiakou-internal.aliyuncs.com \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider obs \ + --region cn-north-4 \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + 对于 COS,存储桶的命名格式为 ``,其中 APPID 由腾讯云自动生成。设置 `--bucket` 时,请先在腾讯云控制台创建存储桶并获取存储桶名称。 + + ```bash + kbcli backuprepo create my-repo \ + --provider cos \ + --region ap-guangzhou \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider gcs-s3comp \ + --region auto \ + --bucket test-kb-backup \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + KubeBlocks 支持的 GCS 是 Google Cloud 提供的 S3 兼容版本。 + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider minio \ + --endpoint \ + --bucket test-minio \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --default + ``` + + 部署的 MinIO 地址为 http://minio.kb-system.svc.cluster.local:9000。 + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider s3-compatible \ + --endpoint \ + --bucket test-minio \ + --access-key-id \ + --secret-access-key \ + --access-method Tool \ + --force-path-style=true \ + --default + ``` + + + + + + ```bash + kbcli backuprepo create my-repo \ + --provider azureblob \ + --container test-kb-backup \ + --azure-account-name \ + --azure-account-key \ + --access-method Tool \ + --default + ``` + + + + + + 以上命令会创建一个默认备份仓库 `my-repo`。 + + * `my-repo` 是创建的备份仓库名称。如果不指定名称,系统会生成随机名称,格式为 `backuprepo-xxxxx`。 + * `--default` 表示将此仓库设为默认仓库。注意全局默认仓库只能有一个。如果存在多个默认仓库,KubeBlocks 无法决定使用哪个(类似于 Kubernetes 的默认 StorageClass),进而导致备份失败。使用 kbcli 创建 BackupRepo 可避免此问题,因为 kbcli 会在创建新仓库前检查是否存在其他默认仓库。 + * `--provider` 指定存储类型(即 `storageProvider`),是创建 BackupRepo 的必填项。可选值包括 `s3`、`cos`、`gcs-s3comp`、`obs`、`oss`、`azureblob`、`minio`、`s3-compatible`、`ftp` 和 `nfs`。不同存储提供商的参数各异,可运行 `kbcli backuprepo create --provider STORAGE-PROVIDER-NAME -h` 查看不同存储提供商的标志。请注意 `--provider` 是配置中的必填项。 + + `kbcli backuprepo create` 执行成功后,系统会创建类型为 `BackupRepo` 的 K8s 资源。可通过修改此资源的注解来调整默认仓库。 + + ```bash + # 取消默认仓库 + kubectl annotate backuprepo old-default-repo \ + --overwrite=true \ + dataprotection.kubeblocks.io/is-default-repo=false + ``` + + ```bash + # 设置新的默认仓库 + kubectl annotate backuprepo backuprepo-4qms6 \ + --overwrite=true \ + dataprotection.kubeblocks.io/is-default-repo=true + ``` + +3. 查看备份仓库及其状态。如果状态为 `Ready`,则表示备份仓库已就绪。 + + ```bash + kbcli backuprepo list + ``` + + + + + +:::note + +如果备份仓库状态显示 Failed 或长时间处于 PreChecking 状态,可运行 `kubectl describe backuprepo my-repo` 或 `kbcli backuprepo describe my-repo` 查看 `status.conditions` 获取详情。 + +排查建议: + +* 检查配置参数(如 `endpoint`、`accessKeyId` 和 `secretAccessKey`)是否正确指定。 +* 对于自托管对象存储(如 Ceph 对象存储),可尝试使用 `s3-compatible` 作为 StorageProvider。默认的 `s3` StorageProvider 使用虚拟托管 URL 样式,某些自托管存储可能不支持。 +* 如果出现 `InvalidLocationConstraint` 错误,检查其参数是否配置正确。如问题持续,可尝试将 `region` 参数留空后重试。 +* 如果状态长时间处于 `PreChecking`,请检查网络连接。确保从 Kubernetes 集群内部可访问存储服务。可通过运行 Pod 并使用相应客户端连接存储服务进行测试。 +* KubeBlocks 内部使用 [rclone](https://rclone.org/) 进行数据传输。检查 rclone 是否能成功访问存储服务。 + +::: + +### 自动配置备份仓库 + +安装 KubeBlocks 时可在 YAML 配置文件中指定备份仓库信息,KubeBlocks 将据此创建备份仓库。 + +1. 准备配置文件。 + + 以 AWS S3 为例,配置文件 `backuprepo.yaml` 内容如下: + + ```yaml + backupRepo: + create: true + storageProvider: s3 + config: + region: cn-northwest-1 + bucket: test-kb-backup + secrets: + accessKeyId: + secretAccessKey: + ``` + + * `region`: 指定 S3 所在区域。 + * `bucket`: 指定 S3 的存储桶名称。 + * `accessKeyId`: 指定 AWS 的访问密钥。 + * `secretAccessKey`: 指定 AWS 的密钥。 + * `storageProvider`:指定对象存储提供商,本例为 S3。 + +:::note + +* KubeBlocks 中可用的 `storageProvider` 选项包括 `s3`、`cos`、`gcs-s3comp`、`obs`、`oss`、`azureblob`、`minio`、`s3-compatible`、`ftp` 和 `nfs`。 +* 不同 `storageProvider` 的配置可能不同。上例中的 `config` 和 `secrets` 适用于 S3。 +* 执行命令 `kubectl get storageproviders.dataprotection.kubeblocks.io` 可查看支持的 `storageProvider` 选项。 + +::: + +2. 安装 KubeBlocks 时指定配置文件。 + + + + + + ```bash + kubectl create -f backuprepo.yaml + ``` + + 安装后使用以下命令检查备份仓库。 + + ```bash + kubectl get backuprepo + ``` + + + + + + ```bash + kbcli kubeblocks install -f backuprepo.yaml + ``` + + 安装后使用以下命令检查备份仓库。 + + ```bash + kbcli backuprepo list + ``` + + + + \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx new file mode 100644 index 00000000..3a55ed97 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/configure-backuppolicy.mdx @@ -0,0 +1,173 @@ +--- +description: 如何配置备份策略 +keywords: +- backup +- backup policy +sidebar_label: 配置备份策略 +sidebar_position: 2 +title: 配置备份策略 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 配置备份策略 + +## 配置加密密钥 + +为确保恢复后的集群能正常访问数据,KubeBlocks在备份过程中会对集群凭证进行加密,并将其安全存储在Backup对象的Annotation中。因此,为保障数据安全,强烈建议在安装或升级KubeBlocks时谨慎分配备份对象的Get/List权限,并指定加密密钥。这些措施有助于确保数据得到妥善保护。 + +自v0.9.0版本起,KubeBlocks已为datasafed集成数据加密功能。当前支持的加密算法包括`AES-128-CFB`、`AES-192-CFB`和`AES-256-CFB`。该功能允许备份数据在写入存储前进行加密,加密密钥将用于加密连接密码及备份数据。您可以根据实际需求引用现有密钥或为数据库集群创建不同的密钥。 + +### 引用现有密钥 + +若密钥已存在,您可以选择直接引用而无需设置`dataProtection.encryptionKey`。KubeBlocks提供了快速引用现有加密密钥的方式。 + +假设已有名为`dp-encryption-key`的预定义Secret,其中包含`encryptionKey`键值。例如通过以下命令创建的Secret: + +```bash +kubectl create secret generic dp-encryption-key \ + --from-literal=encryptionKey='S!B\*d$zDsb=' +``` + +随后在安装或升级KubeBlocks时可引用该密钥: + +```bash +kbcli kubeblocks install \ + --set dataProtection.encryptionKeySecretKeyRef.name="dp-encryption-key" \ + --set dataProtection.encryptionKeySecretKeyRef.key="encryptionKey" +# 上述命令等效于: +# kbcli kubeblocks install --set dataProtection.encryptionKey='S!B\*d$zDsb=' +``` + +### 创建新密钥 + +若不需要默认启用备份加密,或需使用独立的`encryptionKey`,只需创建Secret并按以下步骤手动启用备份加密: + +1. 创建存储加密密钥的Secret: + + ```bash + kubectl create secret generic backup-encryption \ + --from-literal=secretKey='your secret key' + ``` + +2. 启用加密: + + 请确保引用先前创建的密钥: + + ```bash + kubectl --type merge patch backuppolicy mysqlcluster-mysql-backup-policy \ + -p '{"spec":{"encryptionConfig":{"algorithm":"AES-256-CFB","passPhraseSecretKeyRef":{"name":"backup-encryption","key":"secretKey"}}}}' + ``` + +:::note + +也可使用`kbcli`简化流程: + +```bash +# 启用加密 +kbcli cluster edit-backup-policy --set encryption.algorithm=AES-256-CFB --set encryption.passPhrase="SECRET!" + +# 禁用加密 +kbcli cluster edit-backup-policy --set encryption.disabled=true +``` + +::: + +现在可照常执行备份和恢复操作。 + +:::note + +步骤1中创建的Secret不可修改或删除,否则可能导致备份解密失败。 + +::: + +默认情况下`encrytpionKey`仅用于加密连接密码,若需同时加密备份数据,请在上述命令中添加`--set dataProtection.enableBackupEncryption=true`。此后所有新建集群将默认启用备份加密。 + +## 创建集群 + +准备用于测试备份恢复功能的集群。以下示例使用默认命名空间中的MySQL集群`mycluster`: + +```shell +# 创建MySQL集群 +kbcli cluster create mysql mycluster + +# 查看备份策略 +kbcli cluster list-backup-policies mycluster +> +名称 命名空间 默认 集群 组件 创建时间 状态 +mycluster-mysql-backup-policy default true mycluster mysql 2025-05-26 18:11 UTC+0800 可用 +``` + +默认所有备份存储在全局仓库中。执行以下命令查看所有备份仓库,当`DEFAULT`字段为`true`时表示该仓库为默认仓库: + +```bash +# 查看备份仓库 +kbcli backuprepo list +``` + +## 查看备份策略 + +创建数据库集群后,支持备份的数据库会自动生成备份策略。执行以下命令查看集群备份策略: + + + + + +```bash +kubectl get backuppolicy -l app.kubernetes.io/instance=mycluster +> +名称 备份仓库 状态 创建时间 +mycluster-mysql-backup-policy 可用 83秒 +``` + + + + + +```bash +kbcli cluster list-backup-policies mycluster +> +名称 命名空间 默认 集群 组件 创建时间 状态 +mycluster-mysql-backup-policy default true mycluster mysql 2025-05-26 18:11 UTC+0800 可用 +``` + + + + + +备份策略包含集群支持的备份方法。执行以下命令查看备份方法: + + + + + +```bash +kubectl get backuppolicy mycluster-mysql-backup-policy -o yaml +``` + + + + + +```bash +kbcli cluster describe-backup-policy mycluster +> +概览: + 名称: mycluster-mysql-backup-policy + 集群: mycluster + 组件: mysql + 命名空间: default + 默认: true + +备份方法: +名称 动作集 快照卷 +xtrabackup mysql-xtrabackup-br false +volume-snapshot mysql-volume-snapshot true +archive-binlog mysql-pitr false +``` + + + + + +对于MySQL集群,默认支持两种备份方法:`xtrabackup`和`volume-snapshot`。前者使用备份工具`xtrabackup`将MySQL数据备份至对象存储,后者利用云存储的快照能力通过快照备份数据。创建备份时可指定使用的备份方法。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx new file mode 100644 index 00000000..8d725cd2 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/on-demand-backup.mdx @@ -0,0 +1,142 @@ +--- +description: 如何通过快照和备份工具按需备份数据库 +keywords: +- backup +- on-demand backup +- snapshot backup +- backup tool +sidebar_label: 按需备份 +sidebar_position: 4 +title: 按需备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 按需备份 + +KubeBlocks 支持按需备份。您可以通过指定 `--method` 来自定义备份方式。以下示例分别展示了使用备份工具和存储卷快照两种方法。 + +## 备份工具 + +以下命令使用 `xtrabackup` 备份方式创建一个名为 `mybackup` 的备份。 + + + + + +创建备份: +```bash +kubectl apply -f - < +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +mybackup mycluster-mysql-backup-policy xtrabackup kb-oss Completed 1632402 10s Delete 2025-05-26T10:14:33Z 2025-05-26T10:14:42Z +``` + + + + +创建备份 +```bash +kbcli cluster backup mycluster --name mybackup --method xtrabackup +> +Backup mybackup created successfully, you can view the progress: + kbcli cluster list-backups --names=mybackup -n default +``` + +查看备份 +```bash +kbcli cluster list-backups --names mybackup +> +NAME NAMESPACE SOURCE-CLUSTER METHOD STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATE-TIME COMPLETION-TIME EXPIRATION +mybackup default mycluster xtrabackup Completed 1632402 10s Delete May 26,2025 18:14 UTC+0800 May 26,2025 18:14 UTC+0800 +``` + + + + + +## 存储卷快照备份 + +:::note +**前提条件** +使用存储卷快照备份需要: +- StorageClass 必须支持存储卷快照功能 + +请查阅 CSI 驱动及其功能支持列表: +https://kubernetes-csi.github.io/docs/drivers.html + +::: + +要使用快照创建备份,需在 YAML 配置文件中将 `backupMethod` 或在 kbcli 命令中将 `--method` 参数设置为 `volume-snapshot`。 + + + + + +```bash +# 创建备份 +kubectl apply -f - < +NAME POLICY METHOD REPO STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATION-TIME COMPLETION-TIME EXPIRATION-TIME +my-snapshot-backup mycluster2-mysql-backup-policy volume-snapshot Running Delete 2025-05-26T10:30:10Z +``` + + + + + +```bash +# 创建备份 +kbcli cluster backup mycluster --name my-snapshot-backup --method volume-snapshot +> +Backup mybackup created successfully, you can view the progress: + kbcli cluster list-backups --names=mybackup -n default + +# 查看备份 +kbcli cluster list-backups --names=my-snapshot-backup -n default +> +NAME NAMESPACE SOURCE-CLUSTER METHOD STATUS TOTAL-SIZE DURATION DELETION-POLICY CREATE-TIME COMPLETION-TIME EXPIRATION +my-snapshot-backup default mycluster volume-snapshot Running Delete May 26,2025 18:30 UTC+0800 +``` + + + + + +:::caution + +1. 使用快照创建备份时,请确保所用存储支持快照功能,否则备份可能失败。 + +2. 通过 `kbcli` 手动创建的备份不会自动删除,需要您手动清理。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx new file mode 100644 index 00000000..c734e814 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/backup/scheduled-backup.mdx @@ -0,0 +1,89 @@ +--- +description: 如何按计划备份数据库 +keywords: +- backup and restore +- schedule +- automatic backup +- scheduled backup +sidebar_label: 定时备份 +sidebar_position: 3 +title: 定时备份 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 定时备份 + +KubeBlocks 支持为集群配置定时备份功能。 + + + + + +通过 kubectl 修改备份配置字段如下。 + +```bash +kubectl edit cluster -n default mycluster +``` + +编辑集群 YAML 文件。 + +```yaml +spec: + ... + backup: + # 是否启用自动备份 + enabled: true + # UTC 时区,以下示例表示每周一凌晨2点 + cronExpression: 0 18 * * * + # 使用 xtrabackup 进行备份。若存储支持快照,可改为 volume-snapshot + method: xtrabackup + # 是否启用 PITR(时间点恢复) + pitrEnabled: false + # 备份集的保留期限 + retentionPeriod: 7d + # 备份仓库名称 + repoName: my-repo +``` + +在上述 YAML 文件中,您可以根据需要设置是否启用自动备份和 PITR,同时指定备份方法、仓库名称、保留期限等参数。 + + + + + +```bash +kbcli cluster update mycluster --backup-enabled=true \ +--backup-method=xtrabackup --backup-repo-name=my-repo \ +--backup-retention-period=7d --backup-cron-expression="0 18 * * *" +``` + +- `--backup-enabled` 表示是否启用定时备份 +- `--backup-method` 指定备份方法,可通过 `kbcli cluster describe-backup-policy mycluster` 命令查看支持的备份方法 +- `--backup-repo-name` 指定备份仓库名称 +- `--backup-retention-period` 指定备份保留期限,示例中为7天 +- `--backup-cron-expression` 使用 UTC 时区的 cron 表达式指定备份计划,表达式格式参考 [cron](https://en.wikipedia.org/wiki/Cron) + + + + + +启用定时备份后,执行以下命令检查是否已创建 CronJob 对象: + +```bash +kubectl get cronjob +> +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +96523399-mycluster-default-xtrabackup 0 18 * * * False 0 57m +``` + +也可执行以下命令查看集群信息,其中 `Data Protection:` 部分会显示自动备份的配置详情。 + +```bash +kbcli cluster describe mycluster +> +... +数据保护: +备份仓库 自动备份 备份计划 备份方法 保留期限 +my-repo 已启用 0 18 * * * xtrabackup 7d +``` \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/introduction.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/introduction.mdx new file mode 100644 index 00000000..9b346faf --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/introduction.mdx @@ -0,0 +1,33 @@ +--- +description: KubeBlocks 备份与恢复功能简介 +keywords: +- introduction +- backup +- restore +sidebar_label: 简介 +sidebar_position: 1 +title: 简介 +--- +# 简介 + +KubeBlocks 提供数据备份与恢复功能,确保您的数据安全可靠。KubeBlocks 的备份恢复功能依赖于 BackupRepo,在使用全量备份恢复功能前,您需要[先配置 BackupRepo](./backup/backup-repo.md)。 + +KubeBlocks 采用物理备份方式,以数据库中的物理文件作为备份对象。您可以根据需求选择以下备份方案,按需或定时备份集群数据: + +* [按需备份](./backup/on-demand-backup.md):根据备份方式不同,可分为备份工具和快照备份两种类型。 + * 备份工具:您可以使用对应数据产品的专用备份工具,例如 MySQL 的 XtraBackup 和 PostgreSQL 的 pg_basebackup。KubeBlocks 支持为不同数据产品配置专属备份工具。 + * 快照备份:若您的数据存储在支持快照的云盘上,可通过创建快照实现数据备份。快照备份通常比备份工具更快速,因此推荐使用。 + +* [定时备份](./backup/scheduled-backup.md):您可以指定保留时间、备份方法、执行时间等参数来自定义备份计划。 + +在数据恢复方面,KubeBlocks 支持从备份集中恢复数据: + +* 数据恢复 + * [从备份集恢复数据](./restore/restore-data-from-backup-set.md) + +请按以下步骤操作实现集群备份与恢复: + +1. [配置 BackupRepo](./backup/backup-repo.md) +2. [配置 BackupPolicy](./backup/configure-backuppolicy.md) +3. 选择[按需](./backup/on-demand-backup.md)或[定时](./backup/scheduled-backup.md)方式备份集群 +4. 通过[时间点恢复(PITR)](./restore/pitr.md)或从[备份集](./restore/restore-data-from-backup-set.md)恢复数据 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/_category_.yaml b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/_category_.yaml new file mode 100644 index 00000000..6a8e63f5 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/_category_.yaml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 恢复 +position: 3 diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/pitr.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/pitr.mdx new file mode 100644 index 00000000..80ffc956 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/pitr.mdx @@ -0,0 +1,118 @@ +--- +description: 如何执行时间点恢复 (PITR) +keywords: +- backup and restore +- restore +- PITR +- postgresql +sidebar_label: 时间点恢复 +sidebar_position: 2 +title: PITR -> 时间点恢复 (Point-in-Time Recovery) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 时间点恢复 (PITR) + +## 什么是时间点恢复 (PITR) + +时间点恢复 (Point-in-Time Recovery) 是关系型数据库管理系统 (RDBMS) 中常用的备份恢复技术,它允许将数据变更恢复到特定时间点,使数据库回退到该时间点之前的状态。在 PITR 中,数据库系统会定期创建完整备份,之后记录所有事务日志,包括插入、更新和删除操作。恢复时,系统先还原最近的完整备份,再应用备份后记录的事务日志,使数据库回到所需状态。 + +KubeBlocks 支持 MySQL 和 PostgreSQL 等数据库的 PITR 功能。本文档以 PostgreSQL 的 PITR 为例,更多细节请参考 [PostgreSQL 备份与恢复](../../../../kubeblocks-for-postgresql/05-backup-restore/06-restore-with-pitr)。 + +## 如何执行时间点恢复? + +**步骤 1. 查看集群可恢复的时间点** + + + + + +```bash +# 获取持续备份的时间范围 +kubectl get backup -l app.kubernetes.io/instance=pg-cluster -l dataprotection.kubeblocks.io/backup-type=Continuous -oyaml +... +status: + timeRange: + end: "2024-05-07T10:47:14Z" + start: "2024-05-07T10:07:45Z" +``` + +可见当前备份时间范围为 `2024-05-07T10:07:45Z ~2024-05-07T10:47:14Z`。但数据恢复仍需依赖完整备份,且该完整备份必须完成于日志备份的时间范围内。 + + + + + +```bash +kbcli cluster describe pg-cluster +> +... +数据保护: +备份仓库 自动备份 备份计划 备份方法 备份保留期 可恢复时间范围 +minio 已启用 */5 * * * * archive-wal 8d May 07,2024 15:29:46 UTC+0800 ~ May 07,2024 15:48:47 UTC+0800 +``` + +`可恢复时间范围` 表示集群可恢复的时间区间。 + +可见当前备份时间范围为 `May 07,2024 15:29:46 UTC+0800 ~ May 07,2024 15:48:47 UTC+0800`。但数据恢复仍需依赖完整备份,且该完整备份必须完成于日志备份的时间范围内。 + + + + + +**步骤 2. 将集群恢复到指定时间点** + + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: +name: pg-cluster-pitr +spec: + clusterName: pg-cluster-pitr + restore: + backupName: 818aa0e0-pg-kubeblocks-cloud-n-archive-wal + restorePointInTime: "2024-05-07T10:07:45Z" + volumeRestorePolicy: Parallel + type: Restore +``` + + + + + +```bash +kbcli cluster restore pg-cluster-pitr --restore-to-time 'May 07,2024 15:48:47 UTC+0800' --backup +``` + + + + + +**步骤 3. 检查新集群状态** + + + + + +```bash +kubectl get cluster pg-cluster-pitr +``` + + + + + +```bash +kbcli cluster list pg-cluster-pitr +``` + + + + + +当状态变为 `Running` 时,表示操作成功。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx new file mode 100644 index 00000000..849108eb --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/backup-and-restore/restore/restore-data-from-backup-set.mdx @@ -0,0 +1,61 @@ +--- +description: 如何从备份集恢复数据 +keywords: +- backup and restore +- restore +- backup set +sidebar_label: 从备份集恢复 +sidebar_position: 1 +title: 从备份集恢复数据 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 从备份集恢复数据 + +KubeBlocks 支持从备份中恢复集群。本文档以 MySQL 为例,更多详细信息请参考 [MySQL 备份与恢复](../../../../kubeblocks-for-mysql/05-backup-restore/05-restoring-from-full-backup)。 + +**步骤 1. 查看备份集。** + + + + + +```bash +kubectl get backups +``` + + + + + +对于现有集群,执行: + +```bash +kbcli cluster list-backups mycluster +``` + +如果集群已被删除,执行: + +```bash +kbcli dataprotection list-backups +``` + + + + + +**步骤 2. 从指定备份恢复集群。** + +```bash +# 恢复为新集群 +kbcli cluster restore myrestore --backup mybackup +> +集群 myrestore 已创建 + +# 查看恢复集群的状态 +kbcli cluster list myrestore +> +名称 命名空间 集群定义 终止策略 状态 创建时间 +myrestore default mysql Delete 运行中 2025年5月26日 18:42 UTC+0800 +``` \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/concept.mdx b/docs/zh/preview/user_docs/concepts/concept.mdx new file mode 100644 index 00000000..f30d5f14 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/concept.mdx @@ -0,0 +1,159 @@ +--- +description: KubeBlocks, CRD +keywords: +- kubeblocks +- concepts +sidebar_position: 1 +title: 概念 +--- +# 概念 + +在["统一API如何降低学习曲线"](./../overview/introduction)章节中,您已经看到了使用统一API表示各类数据库的优势。如果仔细观察这些示例,您会发现示例YAML文件中存在两个核心概念:**Cluster(集群)**和**Component(组件)**。例如,`test-mysql`是一个Cluster,它包含一个名为`mysql`的Component(其componentDef为`apecloud-mysql`);同样,`test-redis`也是一个Cluster,它包含两个Component:一个名为`redis`(componentDef为`redis-7`)的组件包含两个副本,另一个名为`redis-sentinel`(componentDef为`redis-sentinel`)的组件包含三个副本。 + +本文将解释这两个概念的设计初衷,并简要介绍其底层API(即CRD)。 + +## KubeBlocks 分层 API 的设计动机 + +在 KubeBlocks 中,为了通过统一 API 管理各类数据库,我们需要对不同数据库的拓扑结构和特性进行抽象。 + +我们观察到,生产环境中部署的数据库系统通常采用多组件构成的拓扑结构。例如,一个生产级 MySQL 集群可能包含若干 Proxy 节点(如 ProxySQL、MaxScale、Vitess、WeScale 等)和多个 MySQL 服务器节点(如 MySQL 社区版、Percona、MariaDB、ApeCloud MySQL 等),以实现更高的可用性和读写分离能力。类似地,Redis 部署通常由主节点和多个只读副本组成,通过 Sentinel 管理实现高可用。部分用户还会使用 twemproxy 进行水平分片,以获得更大的容量和吞吐量。 + +这种模块化设计在分布式数据库系统中更为显著——整个系统被划分为职责明确且单一的独立组件,例如数据存储、查询处理、事务管理、日志记录和元数据管理等。这些组件通过网络交互,在提供与单机数据库相当的强一致性和事务保证的同时,还能实现负载均衡、分布式事务和具备故障转移能力的灾难恢复等复杂功能。 + +因此,KubeBlocks 采用了分层 API(即 CRD)设计,由 **Cluster** 和 **Component** 构成,以适配数据库系统多组件、高可变的部署拓扑。这些抽象层使我们能够灵活表示和管理数据库系统在 Kubernetes 上的多样化动态拓扑,并轻松将组件按选定拓扑组装成集群。 + +组件(Component)是构成集群的基本构建块。实际上,Addon 开发者可以在 ClusterDefinition 中定义多个组件如何组装成不同的拓扑结构(不过等等,这听起来是否很复杂?如果您不是 Addon 开发者,其实无需关心 ClusterDefinition 的细节,只需知道 Addon 可以提供多种拓扑供您选择即可)。例如 Redis Addon 就提供了三种拓扑:"standalone"(单机)、"replication"(主从复制)和 "replication-twemproxy"(主从分片)。用户在创建集群时可以指定所需拓扑。 + +以下是通过 `clusterDef` 和 `topology` 创建 Redis 集群的示例: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-redis-use-topology + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: redis + topology: replication + componentSpecs: + - name: redis + serviceVersion: "7.2.4" + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +细心的读者会发现:通过在 Cluster 中指定 `clusterDef` 和 `topology`,就不再需要为每个 Component 单独指定 `componentDef` 了。 + +最后分享一个有趣的事实:您知道这个项目为何命名为 KubeBlocks 吗?通过 Component API,我们将数据库容器打包成标准化的构建块,这些构建块可以按照指定拓扑组装成数据库集群并在 Kubernetes 上运行——整个过程就像用乐高积木搭建模型一样充满模块化的乐趣。 + +## 深入解析KubeBlocks API + +KubeBlocks的核心CRD架构如下图所示。我们特别突出了API的分层结构,而其他重要API(如OpsRequest、Backup和Restore)未在此图中展示。这些省略是为了聚焦层次关系,使图表更清晰。我们将在其他文档中阐述这些附加API。 + +![KubeBlocks API层级结构](/img/docs/en/kubeblocks_api_layers.png) + +KubeBlocks的CRD可分为两大类别:面向用户的CRD和面向扩展组件的CRD。 + +**面向用户的CRD** + +这类CRD包括Cluster(集群)、Component(组件)和InstanceSet(实例集)。当使用KubeBlocks创建数据库集群时,这些自定义资源将被生成。具体而言: +- Cluster对象由用户直接创建 +- Component对象是KubeBlocks集群控制器在感知到Cluster对象后递归创建的子资源 +- InstanceSet对象是组件控制器在感知到Component对象后递归创建的子资源,而实例集控制器会进一步递归创建Pod和PVC对象 + +**面向扩展组件的CRD** + +这类CRD包括ClusterDefinition(集群定义)、ComponentDefinition(组件定义)和ComponentVersion(组件版本)。这些CR由扩展组件开发者编写,并打包在扩展组件的Helm Chart中。 + +:::重要说明 +虽然用户不需要编写ClusterDefinition和ComponentDefinition的CR,但仍需使用这些CR。如先前创建Redis集群的示例所示,当用户创建Cluster时,需要在每个组件的`componentDef`中指定对应ComponentDefinition CR的名称,或在`clusterDef`中指定对应ClusterDefinition CR的名称及所需拓扑结构。 +::: + +### 面向用户的KubeBlocks API + +#### Cluster(集群) +Cluster对象代表由KubeBlocks管理的完整数据库集群。一个Cluster可包含多个Component。用户在此指定每个组件的配置,集群控制器将生成并协调对应的Component对象。此外,集群控制器还管理所有在集群级别暴露的服务地址。 + +对于采用无共享架构的分布式数据库(如Redis Cluster),Cluster支持管理多个分片,每个分片由独立的Component管理。该架构还支持动态分片:如需横向扩展新增分片,只需添加新Component;反之如需收缩减少分片,则移除对应Component。 + +#### Component(组件) +Component是Cluster对象的基础构建单元。例如,Redis集群可包含`redis`、`sentinel`等组件,以及可能的代理组件如`twemproxy`。 + +Component对象负责管理组件内所有副本的生命周期,支持广泛的操作包括:供应、停止、重启、终止、升级、配置变更、垂直与水平扩缩容、故障转移、主从切换、调度配置、服务暴露、系统账户管理等。 + +Component是从用户提交的Cluster对象派生的内部子对象,主要供KubeBlocks控制器使用。不建议用户直接修改Component对象,而应仅将其用于监控组件状态。 + +#### InstanceSet(实例集) +自KubeBlocks v0.9起,我们使用InstanceSet替代了StatefulSet。 + +数据库实例(或称副本)由一个Pod和若干辅助对象(PVC、Service、ConfigMap、Secret)组成。InstanceSet是负责管理一组实例的工作负载CRD。在KubeBlocks中,所有工作负载最终都通过InstanceSet管理。相较于Kubernetes原生的工作负载CRD(如StatefulSet和Deployment),InstanceSet融入了更多数据库领域的特殊考量与设计,例如每个副本的角色分配、更高的可用性要求,以及特定节点下线等运维需求。 + +### 面向扩展组件的KubeBlocks API + +:::开发须知 +仅扩展组件开发者需要理解ClusterDefinition和ComponentDefinition API。因此KubeBlocks用户可以轻松绕过这两个API。 +::: + +#### ClusterDefinition(集群定义) +ClusterDefinition是用于定义数据库集群所有可用拓扑结构的API,提供多样化的拓扑配置以满足不同部署需求和场景。 + +每个拓扑结构包含组件列表,每个组件都关联到一个ComponentDefinition,这种设计增强了可重用性并减少冗余。例如,etcd、Zookeeper等通用组件的ComponentDefinition只需定义一次,即可在多个ClusterDefinition中复用,简化新系统的搭建流程。 + +此外,ClusterDefinition还规定了组件的启动、升级和关闭顺序,确保对组件生命周期的管控具有可预测性。 + +#### ComponentDefinition(组件定义) +ComponentDefinition是创建Component的可复用蓝图或模板,封装了关键的静态设置,包括:组件描述、Pod模板、配置文件模板、脚本、参数列表、注入的环境变量及其来源、事件处理器等。ComponentDefinition与Component中的动态设置协同工作,在集群创建时实例化组件。 + +ComponentDefinition中可定义的关键要素包括: +- PodSpec模板:指定组件使用的PodSpec模板 +- 配置模板:声明组件所需的配置文件模板 +- 脚本:提供组件管理和操作所需的脚本 +- 存储卷:配置组件所需的存储卷及其规格 +- Pod角色:定义组件内Pod的各种角色及其能力 +- 暴露的Kubernetes服务:声明组件需要暴露的服务 +- 系统账户:配置组件所需的系统账户 + +ComponentDefinition还支持定义组件对事件的响应行为,例如成员加入/退出、组件增删、角色变更、主从切换等。这使得组件能自动处理事件,从而将复杂行为封装在组件内部。 + +## 什么是 Addon + +KubeBlocks 通过 Addon 机制扩展对各类数据库引擎的支持。一个 Addon 代表对特定数据库引擎的扩展支持,例如 MySQL Addon、PostgreSQL Addon、Redis Addon、MongoDB Addon 和 Kafka Addon 等。目前 KubeBlocks 仓库中已提供超过 30 种 Addon。 + +每个 Addon 包含基于 ClusterDefinition、ComponentDefinition 和 ComponentVersion 等 CRD 的 CR(自定义资源),以及部分 ConfigMap(用作配置模板或脚本文件模板)、脚本文件、定义备份恢复操作方式的 CR,以及 Grafana 仪表板的 JSON 对象。 + +Addon 会以 Helm Chart 的形式打包安装。用户安装某个数据库引擎的 Addon 后,在创建 Cluster 时即可引用该 Addon 中包含的 ClusterDefinition CR 和 ComponentDefinition CR,从而创建对应数据库引擎的 Cluster。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/in-place-update/_category_.yaml b/docs/zh/preview/user_docs/concepts/in-place-update/_category_.yaml new file mode 100644 index 00000000..dd446eaa --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/in-place-update/_category_.yaml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 原地更新 +position: 4 diff --git a/docs/zh/preview/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx b/docs/zh/preview/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx new file mode 100644 index 00000000..6e488277 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/in-place-update/ignore-vertical-scale.mdx @@ -0,0 +1,13 @@ +--- +description: 启用原地更新 +keywords: +- in-place update +sidebar_label: 启用原地更新 +sidebar_position: 2 +title: 启用原地更新 +--- +# 启用原地更新功能 + +在 Kubernetes 1.27 之前的版本中,我们已经看到许多 Kubernetes 发行版支持对 Resources 进行原地更新。不同的发行版可能采用不同的方式来实现这一特性。 + +为了适配这些 Kubernetes 发行版,KubeBlocks 引入了 `IgnorePodVerticalScaling` 功能开关。当启用此功能时,KubeBlocks 会在实例更新过程中忽略对 Resources 中 CPU 和 Memory 的修改,确保最终渲染出的 Pod 的 Resources 配置与当前运行中的 Pod 的 Resources 配置保持一致。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/in-place-update/overview.mdx b/docs/zh/preview/user_docs/concepts/in-place-update/overview.mdx new file mode 100644 index 00000000..a5113d84 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/in-place-update/overview.mdx @@ -0,0 +1,56 @@ +--- +description: 简介 +keywords: +- in-place update +- overview +sidebar_label: 简介 +sidebar_position: 1 +title: 简介 +--- +# 概述 + +在早期版本中,KubeBlocks 最终生成的 Workload 都是 StatefulSet。对于 StatefulSet 而言,PodTemplate 部分的任何变更都可能导致所有 Pod 的更新,且更新方式为 `Recreate`,即删除当前所有 Pod 并创建新的 Pod。这对于可用性要求极高的数据库管理显然不是最佳实践。 + +为解决这个问题,KubeBlocks 从 0.9 版本开始引入了实例原地更新特性,降低实例更新对系统可用性的影响。 + +## 实例的哪些字段支持原地更新? + +原则上,KubeBlocks 实例原地更新利用了 [Kubernetes Pod API 的原地更新能力](https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement)。因此具体支持的字段如下: + +* `annotations` +* `labels` +* `spec.activeDeadlineSeconds` +* `spec.initContainers[*].image` +* `spec.containers[*].image` +* `spec.tolerations`(仅支持添加 Toleration) + +从 Kubernetes 1.27 版本开始,可以通过 `InPlacePodVerticalScaling` 特性开关进一步支持 CPU 和 Memory 的原地更新。KubeBlocks 也支持 `InPlacePodVerticalScaling` 特性开关,从而进一步支持以下能力: + +对于 Kubernetes 版本 >= 1.27 且启用了 InPlacePodVerticalScaling 的情况,支持以下字段的原地更新: + +* `spec.containers[*].resources.requests["cpu"]` +* `spec.containers[*].resources.requests["memory"]` +* `spec.containers[*].resources.limits["cpu"]` +* `spec.containers[*].resources.limits["memory"]` + +需要注意的是,资源调整成功后,部分应用可能需要重启才能识别新的资源配置。此时需要在 ClusterDefinition 或 ComponentDefinition 中进一步配置容器的 `restartPolicy`。 + +对于 PVC,KubeBlocks 同样利用了 PVC API 的能力,仅支持存储卷扩容。如果扩容因某些原因失败,支持回退到原容量。但 StatefulSet 中的 VolumeClaimTemplate 一旦声明就无法修改。目前 Kubernetes 社区正在[开发该能力](https://github.com/kubernetes/enhancements/pull/4651),但至少要到 Kubernetes 1.32 版本才会提供。 + +## 从上层 API 角度看,哪些字段变更后会利用原地更新? + +KubeBlocks 上层与实例相关的 API 包括 Cluster、ClusterDefinition、ClusterVersion、ComponentDefinition 和 ComponentVersion。这些 API 中有若干字段最终会被直接或间接用于渲染实例对象,从而可能触发实例的原地更新。 + +这些 API 中的字段众多。下表简要说明。 + +:::note + +API 中标记为 deprecated 或 immutable 的字段未包含在列表中。 + +::: + +| API | 字段 | 描述 | +|:-----|:-------|:-----------| +|Cluster| `annotations`,

`labels`,

`spec.tolerations`,

`spec.componentSpecs[*].serviceVersion`,

`spec.componentSpecs[*].tolerations`,

`spec.componentSpecs[*].resources`,

`spec.componentSpecs[*].volumeClaimTemplates`,

`spec.componentSpecs[*].instances[*].annotations`,

`spec.componentSpecs[*].instances[*].labels`,

`spec.componentSpecs[*].instances[*].image`,

`spec.componentSpecs[*].instances[*].tolerations`,

`spec.componentSpecs[*].instances[*].resources`,

`spec.componentSpecs[*].instances[*].volumeClaimTemplates`,

`spec.shardingSpecs[*].template.serviceVersion`,

`spec.shardingSpecs[*].template.tolerations`,

`spec.shardingSpecs[*].template.resources`,

`spec.shardingSpecs[*].template.volumeClaimTemplates`

| 资源相关字段指:

`requests["cpu"]`,

`requests["memory"]`,

`limits["cpu"]`,

`limits["memory"]` | +| ComponentVersion | `spec.releases[*].images` | 是否触发原地更新取决于对应镜像是否变更 | +| KubeBlocks 内置 | `annotations`, `labels` | | \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/instance-template/_category_.yml b/docs/zh/preview/user_docs/concepts/instance-template/_category_.yml new file mode 100644 index 00000000..9555d19a --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/instance-template/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 实例模板 +position: 5 diff --git a/docs/zh/preview/user_docs/concepts/instance-template/how-to-use-instance-template.mdx b/docs/zh/preview/user_docs/concepts/instance-template/how-to-use-instance-template.mdx new file mode 100644 index 00000000..340f9109 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/instance-template/how-to-use-instance-template.mdx @@ -0,0 +1,207 @@ +--- +description: 应用实例模板 +keywords: +- apply instance template +- instance template +sidebar_label: 应用实例模板 +sidebar_position: 2 +title: 应用实例模板 +--- +# 应用实例模板 + +实例模板可应用于多种场景。本节以RisingWave集群为例进行说明。 + +KubeBlocks支持管理RisingWave集群。RisingWave插件由RisingWave官方团队贡献。为使RisingWave达到最佳运行状态,它需要依赖外部存储解决方案(如AWS S3或阿里云OSS)作为其状态后端。在创建RisingWave集群时,必须为外部存储配置凭证等相关信息以确保正常运行,且这些信息可能因集群而异。 + +在RisingWave的官方镜像中,这些信息可以通过环境变量注入。因此,在KubeBlocks 0.9版本中,我们可以在实例模板中配置相应的环境变量,并在每次创建集群时设置这些环境变量的值,从而将凭证信息注入到RisingWave的容器中。 + +## 示例说明 + +在 RisingWave 插件的默认模板中,[环境变量配置](https://github.com/apecloud/kubeblocks-addons/blob/main/addons/risingwave/templates/cmpd-compute.yaml#L26)如下所示: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: risingwave +# ... +spec: +#... + runtime: + containers: + - name: compute + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + command: + - /risingwave/bin/risingwave + - compute-node + env: + - name: RUST_BACKTRACE + value: "1" + - name: RW_CONFIG_PATH + value: /risingwave/config/risingwave.toml + - name: RW_LISTEN_ADDR + value: 0.0.0.0:5688 + - name: RW_ADVERTISE_ADDR + value: $(KB_POD_FQDN):5688 + - name: RW_META_ADDR + value: load-balance+http://$(metaSvc)-headless:5690 + - name: RW_METRICS_LEVEL + value: "1" + - name: RW_CONNECTOR_RPC_ENDPOINT + value: $(connectorSvc):50051 + - name: RW_PROMETHEUS_LISTENER_ADDR + value: 0.0.0.0:1222 +# ... +``` + +当在[集群资源](https://github.com/apecloud/kubeblocks-addons/blob/main/addons-cluster/risingwave/templates/cluster.yaml)中添加实例模板后: + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: {{ include "risingwave-cluster.name" . }} + namespace: {{ .Release.Namespace }} +# ... +spec: + componentSpecs: + - componentDef: compute + name: compute + replicas: {{ .Values.risingwave.compute.replicas }} + instances: + - name: instance + replicas: {{ .Values.risingwave.compute.replicas }} + env: + - name: RW_STATE_STORE + value: "hummock+s3://{{ .Values.risingwave.stateStore.s3.bucket }}" + - name: AWS_REGION + value: "{{ .Values.risingwave.stateStore.s3.region }}" + {{- if eq .Values.risingwave.stateStore.s3.authentication.serviceAccountName "" }} + - name: AWS_ACCESS_KEY_ID + value: "{{ .Values.risingwave.stateStore.s3.authentication.accessKey }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ .Values.risingwave.stateStore.s3.authentication.secretAccessKey }}" + {{- end }} + - name: RW_DATA_DIRECTORY + value: "{{ .Values.risingwave.stateStore.dataDirectory }}" + {{- if .Values.risingwave.stateStore.s3.endpoint }} + - name: RW_S3_ENDPOINT + value: "{{ .Values.risingwave.stateStore.s3.endpoint }}" + {{- end }} + {{- if .Values.risingwave.metaStore.etcd.authentication.enabled }} + - name: RW_ETCD_USERNAME + value: "{{ .Values.risingwave.metaStore.etcd.authentication.username }}" + - name: RW_ETCD_PASSWORD + value: "{{ .Values.risingwave.metaStore.etcd.authentication.password }}" + {{- end }} + - name: RW_ETCD_ENDPOINTS + value: "{{ .Values.risingwave.metaStore.etcd.endpoints }}" + - name: RW_ETCD_AUTH + value: "{{ .Values.risingwave.metaStore.etcd.authentication.enabled}}" +# ... +``` + +在上述示例中,我们通过 `instances` 字段添加了一个名为 `instance` 的实例模板。该模板定义了若干环境变量,例如 `RW_STATE_STORE` 和 `AWS_REGION`。KubeBlocks 会将这些环境变量追加到默认模板定义的环境变量列表中。因此,最终渲染出的实例将同时包含默认模板和该实例模板中定义的所有环境变量。 + +此外,实例模板中的 `replicas` 字段与 `componentSpec` 中的设置完全一致(均为 `{{ .Values.risingwave.compute.replicas }}`),这意味着在覆盖默认模板后,该实例模板将用于渲染该组件内的所有实例。 + +## 实例模板详细信息 + +- `Name`字段:每个组件可以定义多个实例模板,通过`Name`字段配置模板名称,同一组件内必须保持唯一。 +- `Replica`字段:每个模板可通过`Replicas`字段设置基于该模板渲染的实例数量,默认值为1。同一组件内所有实例模板的`Replicas`之和必须小于等于该组件的`Replicas`值。若基于实例模板渲染的实例数量小于组件所需总数,剩余实例将使用默认模板渲染。 + +基于实例模板渲染的实例命名模式为`$(集群名称)-$(组件名称)-$(实例模板名称)-序号`。例如上述RisingWave集群中,集群名称为`risingwave`,组件名称为`compute`,实例模板名称为`instance`,`Replicas`数量为3,因此渲染出的实例名称为risingwave-compute-instance-0、risingwave-compute-instance-1和risingwave-compute-instance-2。 + +实例模板可在集群创建时使用,也可在运营期间更新,具体包括添加、删除或更新实例模板。更新实例模板可能会更新、删除或重建实例,建议在执行更新前仔细评估最终变更是否符合预期。 + +### 注解(Annotations) + +实例模板中的`Annotations`用于覆盖默认模板中的`Annotations`字段。若实例模板`Annotations`中的Key已存在于默认模板中,则该Key对应的`value`将采用实例模板中的值;若默认模板中不存在该Key,则将该Key和Value添加到最终`Annotations`中。 + +***示例:*** + +默认模板中的`annotations`为: + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar" +``` + +实例模板中的`annotations`为: + +```yaml +annotations: + "foo1": "bar1" + "foo2": "bar2" +``` + +则渲染后的实际注解为: + +```yaml +annotations: + "foo0": "bar0" + "foo1": "bar1" + "foo2": "bar2" +``` + +:::注意 + +KubeBlocks会添加系统`Annotations`,且不会覆盖这些注解。 + +::: + +### 标签(Labels) + +您也可以通过实例模板设置`Labels`。 + +与`Annotations`类似,实例模板中的`Labels`遵循相同的覆盖逻辑应用于现有标签。 + +:::注意 + +KubeBlocks会添加系统`Labels`,且不会覆盖这些标签。 + +::: + +### 镜像(Image) + +实例模板中的`Image`字段用于覆盖默认模板中第一个容器的`Image`字段。 + +:::注意 + +使用`Image`字段需谨慎:对于类似StatefulSet的数据库,更改`Image`通常涉及数据格式兼容性问题。更改此字段时,请确保实例模板中的镜像版本与默认模板中的版本完全兼容。 + +::: + +KubeBlocks 0.9及以上版本通过`ComponentVersion`提供详细的镜像版本设计,建议使用`ComponentVersion`进行版本管理。 + +### 节点名称(NodeName) + +实例模板中的`NodeName`将覆盖默认模板中的同名字段。 + +### 节点选择器(NodeSelector) + +实例模板中的`NodeSelector`将覆盖默认模板中的同名字段。 + +### 容忍度(Tolerations) + +实例模板中的`Tolerations`将覆盖默认模板中的同名字段。 + +若实例模板中的`Toleration`与默认模板中的某个`Toleration`完全相同(具有相同的`Key`、`Operator`、`Value`、`Effect`和`TolerationSeconds`),则该`Toleration`将被忽略;否则会添加到默认模板的`Tolerations`列表中。 + +### 运行时类名(RuntimeClassName) + +实例模板中的`RuntimeClassName`将覆盖默认模板中的同名字段。 + +### 资源(Resources) + +实例模板中的`Resources`将覆盖默认模板中的同名字段并获取最高优先级。 + +### 环境变量(Env) + +实例模板中定义的环境变量(`Env`)将覆盖除KubeBlocks设置的默认`Env`外的其他所有环境变量。覆盖逻辑与`Annotations`和`Labels`类似:若环境变量名称相同,则使用实例模板中的值或值来源;若不同,则添加为新环境变量。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/concepts/instance-template/introduction.mdx b/docs/zh/preview/user_docs/concepts/instance-template/introduction.mdx new file mode 100644 index 00000000..28329507 --- /dev/null +++ b/docs/zh/preview/user_docs/concepts/instance-template/introduction.mdx @@ -0,0 +1,27 @@ +--- +description: 简介 +keywords: +- instance template +sidebar_label: 简介 +sidebar_position: 1 +title: 简介 +--- +# 简介 + +## 什么是实例模板 + +在KubeBlocks中,*实例*(instance)是最基础的运行单元,它由一个Pod和若干辅助对象组成。简单来说,你可以先将其理解为一个Pod,后文我们将统一称之为"实例"。 + +从0.9版本开始,我们能够为集群中的特定组件定义多个实例模板。这些模板包含名称(Name)、副本数(Replicas)、注解(Annotations)、标签(Labels)、环境变量(Env)、容忍度(Tolerations)、节点选择器(NodeSelector)等多个字段。这些字段最终会覆盖默认模板(源自ClusterDefinition和ComponentDefinition)中的对应字段,生成用于渲染实例的最终模板。 + +## 为何引入实例模板 + +在KubeBlocks架构中,一个*集群*(Cluster)由多个*组件*(Component)构成,而每个*组件*最终会管理多个*Pod*及其辅助对象。 + +在0.9版本之前,这些Pod都是通过共享的PodTemplate渲染生成的,该模板定义在ClusterDefinition或ComponentDefinition中。但这种设计无法满足以下需求: + + - 对于通过相同插件渲染的集群,需要设置独立的调度配置,如*节点名称*(NodeName)、*节点选择器*(NodeSelector)或*容忍度*(Tolerations) + - 对于通过相同插件渲染的组件,需要为其管理的Pod添加自定义*注解*(Annotations)、*标签*(Labels)或环境变量(ENV) + - 对于同一组件管理的Pod,需要配置不同的*CPU*、*内存*等*资源请求*(Resource Requests)与*限制*(Limits) + +随着各类相似需求的涌现,Cluster API从0.9版本开始引入了实例模板功能来满足这些需求。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/overview/_category_.yml b/docs/zh/preview/user_docs/overview/_category_.yml new file mode 100644 index 00000000..eb2bb227 --- /dev/null +++ b/docs/zh/preview/user_docs/overview/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 入门指南 +position: 1 diff --git a/docs/zh/preview/user_docs/overview/install-kubeblocks.mdx b/docs/zh/preview/user_docs/overview/install-kubeblocks.mdx new file mode 100644 index 00000000..9fa0bc55 --- /dev/null +++ b/docs/zh/preview/user_docs/overview/install-kubeblocks.mdx @@ -0,0 +1,543 @@ +--- +description: 使用 Helm 在现有 Kubernetes 集群上安装 KubeBlocks +keywords: +- taints +- affinity +- tolerance +- install +- kbcli +- KubeBlocks +- helm +sidebar_label: 安装 +sidebar_position: 4 +title: 安装 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import { VersionProvider, Version } from '@/components/VersionContext'; + + + +# KubeBlocks + +本指南介绍在现有 Kubernetes 集群上部署 KubeBlocks 的方法。请选择您偏好的安装方式: + +- **Helm**(生产环境推荐) +- **kbcli**(简化的 CLI 体验) + +## 先决条件 + +### 资源需求 +| 组件 | 数据库 | 推荐配置 | +|--------------|------------|-----------------------| +| **控制平面** | - | 1个节点(4核,4GB内存,50GB存储) | +| **数据平面** | MySQL | 2个节点(2核,4GB内存,50GB存储) | +| | PostgreSQL | 2个节点(2核,4GB内存,50GB存储) | +| | Redis | 2个节点(2核,4GB内存,50GB存储) | +| | MongoDB | 3个节点(2核,4GB内存,50GB存储) | + +- **控制平面**:运行KubeBlocks组件的节点 +- **数据平面**:托管数据库实例的节点 + +### 系统要求 + +安装前请确认您的环境满足以下要求: + +- Kubernetes集群(推荐v1.21+版本)- 如需创建测试集群请参考[准备本地K8s集群](../references/prepare-a-local-k8s-cluster) +- 已安装并配置`kubectl` v1.21+版本,且具有集群访问权限 +- 已安装Helm([安装指南](https://helm.sh/docs/intro/install/)) +- 已安装Snapshot Controller([安装指南](../references/install-snapshot-controller)) + +## 安装 KubeBlocks + + + + + +```bash +# 步骤 1:安装 CRDs +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/{{VERSION}}/kubeblocks_crds.yaml + +# 步骤 2:配置 Helm 仓库 +helm repo add kubeblocks https://apecloud.github.io/helm-charts +helm repo update + +# 步骤 3:部署 KubeBlocks +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace --version={{VERSION}} +``` + + +:::note + +如果您使用的 Kubernetes 版本 ≤ 1.23,在安装 CRDs 时可能会遇到以下错误: + +```bash +unknown field "x-kubernetes-validations".... if you choose to ignore these errors, turn validation off with --validate\=false +``` + +可以通过以下命令解决: +```bash +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/{{VERSION}}/kubeblocks_crds.yaml --validate\=false +``` +::: + + +**需要其他版本?** + +您可以在 [KubeBlocks 发布页面](https://github.com/apecloud/kubeblocks/releases/) 查找可用版本,或使用以下命令查询: + +```bash +# 获取最新稳定版 +curl -s https://api.github.com/repos/apecloud/kubeblocks/releases/latest | jq -r '.tag_name' + +# 获取所有版本(包括预发布版) +curl -s https://api.github.com/repos/apecloud/kubeblocks/tags | jq -r '.[0].name' +``` + + + + + +**准备工作**: +- 安装 [KubeBlocks CLI](../../user_docs/references/install-kbcli) +- 确保 kubectl 已配置集群访问权限 + +```bash +kbcli kubeblocks install --version={{VERSION}} --create-namespace +``` + +**需要其他版本?** + +列出可用版本或查找其他发布: + +```bash +# 列出稳定版 +kbcli kubeblocks list-versions + +# 列出所有版本(包括预发布版) +kbcli kb list-versions --devel --limit=100 +``` + +或浏览 [KubeBlocks 发布页面](https://github.com/apecloud/kubeblocks/releases/) 上的所有版本。 + +:::note + +**版本兼容性说明** + +KubeBlocks 要求 `kbcli` 与安装的发行版主版本号匹配: +- 兼容示例:kbcli v1.0.0 与 KubeBlocks v1.0.0 +- 不兼容示例:kbcli v0.9.0 与 KubeBlocks v1.0.0 + +主版本号不匹配可能导致意外行为或错误。 + +::: + +默认情况下,KubeBlocks 会安装在 `kb-system` 命名空间。如需指定其他命名空间: + +```bash +kbcli kubeblocks install --version={{VERSION}} --create-namespace --namespace my-namespace +``` + +💡 *请将 `my-namespace` 替换为您期望的命名空间名称。* + + + + + +## 验证安装 + +执行以下命令检查 KubeBlocks 是否安装成功。 + + + + + +```bash +kubectl -n kb-system get pods +``` + +
+ 预期输出: + +如果所有 KubeBlocks 工作负载均处于就绪状态,则表示 KubeBlocks 已成功安装。 + +```bash +NAME READY STATUS RESTARTS AGE +kubeblocks-7cf7745685-ddlwk 1/1 Running 0 4m39s +kubeblocks-dataprotection-95fbc79cc-b544l 1/1 Running 0 4m39s +``` +
+ +
+ + + +```bash +kbcli kubeblocks status +``` + +
+ 预期输出: + +如果所有 KubeBlocks 工作负载均处于就绪状态,则表示 KubeBlocks 已成功安装。 + +```bash +KubeBlocks is deployed in namespace: kb-system,version: {{VERSION}} + +Kubernetes Cluster: +VERSION PROVIDER REGION AVAILABLE ZONES +v1.29.2 Kind + +KubeBlocks Workloads: +NAMESPACE KIND NAME READY PODS CPU(CORES) MEMORY(BYTES) CREATED-AT +kb-system Deployment kubeblocks 1/1 N/A N/A May 26,2025 13:53 UTC+0800 +kb-system Deployment kubeblocks-dataprotection 1/1 N/A N/A May 26,2025 13:53 UTC+0800 + +KubeBlocks Addons: +NAME STATUS TYPE PROVIDER +apecloud-mysql Enabled Helm N/A +etcd Enabled Helm N/A +kafka Enabled Helm N/A +``` +
+ +
+ +
+ +## 高级配置 + +以下列出 KubeBlocks 的一些常用配置。有关 KubeBlocks 选项的更多信息,请参考 [KubeBlocks 选项](../references/kubeblocks_options)。 + +### 自定义镜像仓库 + +通过指定以下参数来配置镜像仓库。 + + + + + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +```bash +kbcli kubeblocks upgrade --version {{VERSION}} \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +上述命令中参数说明如下: + +- `--set image.registry` 指定 KubeBlocks 镜像仓库地址 +- `--set dataProtection.image.registry` 指定 KubeBlocks-DataProtection 镜像仓库地址 +- `--set addonChartsImage.registry` 指定 Addon Charts 镜像仓库地址 + +若无法访问 `docker.io`,请使用以下镜像仓库和命名空间: +- 仓库地址:apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com +- 命名空间:apecloud + +### 指定容忍度 + +如需为 KubeBlocks 安装配置自定义容忍度,可使用以下命令: + + + + + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set-json 'tolerations=[ { "key": "control-plane-taint", "operator": "Equal", "effect": "NoSchedule", "value": "true" } ]' \ +--set-json 'dataPlane.tolerations=[{ "key": "data-plane-taint", "operator": "Equal", "effect": "NoSchedule", "value": "true"}]' +``` + + + + + +```bash +kbcli kubeblocks install --version {{VERSION}} --create-namespace \ +--set image.registry=docker.io \ +--set dataProtection.image.registry=docker.io \ +--set addonChartsImage.registry=docker.io +``` + + + + + +### 跳过 Addon 自动安装 + +```bash +helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace \ +--version {{VERSION}} \ +--set autoInstalledAddons="{}" +``` + +### 启用原地垂直扩缩容 + +如需启用 KubeBlocks 的原地垂直扩缩容功能,在安装或升级时设置特性门控参数: + +```bash +featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + +1. 安装 +```bash +helm install kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --create-namespace \ + --version {{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + +2. 升级 +```bash +helm upgrade kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --version {{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + + +1. 安装 +```bash +kbcli kubeblocks install \ + --version={{VERSION}} \ + --create-namespace \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + +2. 升级 +```bash +kbcli kubeblocks upgrade \ + --version={{VERSION}} \ + --set featureGates.inPlacePodVerticalScaling.enabled=true +``` + + + + +**验证** + +安装或升级后,可通过以下命令验证特性门控是否启用: + +```bash +kubectl -n kb-system get deployments.apps kubeblocks -oyaml | \ + yq '.spec' | \ + grep IN_PLACE_POD_VERTICAL_SCALING -A 1 +``` + +输出应显示: +```text +- name: IN_PLACE_POD_VERTICAL_SCALING + value: "true" +``` + +## 卸载 KubeBlocks + +:::note + +在卸载 KubeBlocks 和 kbcli 之前,请删除所有集群和备份。 +::: + +```bash +# 获取集群和备份信息 +kubectl get cluster -A +kubectl get backup -A + +# 按命名空间删除集群和备份 +kubectl delete cluster -n +kubectl delete backup -n +``` + + + + + +1. 列出所有插件 +```bash +# 列出所有插件 +helm list -n kb-system | grep kb-addon +``` + +2. 卸载所有插件 +```bash +helm list -n kb-system | grep kb-addon | awk '{print $1}' | xargs -I {} helm -n kb-system uninstall {} +``` + +卸载过程中会看到如下提示信息: +``` +Release "kb-addon-etcd" uninstalled +These resources were kept due to the resource policy: +[ConfigMap] kafka27-configuration-tpl-1.0.0 +[ComponentDefinition] kafka-combine-1.0.0 +[ComponentDefinition] kafka-controller-1.0.0 +[ComponentDefinition] kafka-exporter-1.0.0 +[ComponentDefinition] kafka27-broker-1.0.0 +[ComponentDefinition] kafka-broker-1.0.0 +``` + +由于资源策略保留了一些资源,需要检查并全部删除 + +3. 检查剩余资源(如 ComponentDefinition、配置相关的 ConfigMap 等)并全部删除 +```bash +kubectl get componentdefinitions.apps.kubeblocks.io +kubectl get parametersdefinitions.parameters.kubeblocks.io +kubectl get configmap -n kb-system | grep configuration +kubectl get configmap -n kb-system | grep template +``` + +示例删除命令: +```bash +kubectl delete componentdefinitions.apps.kubeblocks.io --all +kubectl delete parametersdefinitions.parameters.kubeblocks.io --all +kubectl get configmap -n kb-system | grep configuration | awk '{print $1}' | xargs -I {} kubectl delete -n kb-system cm {} +kubectl get configmap -n kb-system | grep template| awk '{print $1}' | xargs -I {} kubectl delete -n kb-system cm {} +``` + +4. 删除 Addon 自定义资源 + +```bash +kubectl delete addon.extensions.kubeblocks.io --all +``` + +5. 验证所有 KubeBlocks 资源是否已删除 + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` + +如果输出显示仍有自定义资源残留,请全部删除。 + +6. 卸载 KubeBlocks + +```bash +helm uninstall kubeblocks --namespace kb-system +``` + +Helm 不会删除 CRD 对象。可以通过以下命令删除 KubeBlocks 创建的 CRD: + +```bash +kubectl get crd -o name | grep kubeblocks.io | xargs kubectl delete +``` + +7. 再次验证所有 KubeBlocks 资源是否已删除 + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` +此时输出应为空。 + + + + + +1. 检查插件列表 +```bash +kbcli addon list | grep Enabled +``` + +2. 为所有插件设置 `keepResource=false` +```bash +# 更新插件配置,移除 ComponentDefinition/ConfigMaps 中的 'helm.sh/resource-policy: keep' 注解 +kbcli addon enable --set extra.keepResource=false +``` + +示例: +```bash +kbcli addon enable apecloud-mysql --set extra.keepResource=false +kbcli addon enable etcd --set extra.keepResource=false +kbcli addon enable kafka --set extra.keepResource=false +kbcli addon enable mongodb --set extra.keepResource=false +kbcli addon enable mysql --set extra.keepResource=false +kbcli addon enable postgresql --set extra.keepResource=false +kbcli addon enable redis --set extra.keepResource=false +``` + +3. 禁用所有插件 + +```bash +kbcli addon disable +``` + +示例: +```bash +kbcli addon disable apecloud-mysql +kbcli addon disable etcd +kbcli addon disable kafka +kbcli addon disable mongodb +kbcli addon disable mysql +kbcli addon disable postgresql +kbcli addon disable redis +``` + +4. 验证所有 KubeBlocks 资源是否已删除 + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` + +如果输出显示仍有自定义资源残留,请全部删除。 + +5. 卸载 KubeBlocks +```bash +kbcli kubeblocks uninstall +``` + +6. 再次验证所有 KubeBlocks 资源是否已删除 + +```bash +kubectl get crd | grep kubeblocks.io | awk '{print $1}' | while read crd; do + echo "Processing CRD: $crd" + kubectl get "$crd" -o json | jq '.items[] | select(.metadata.finalizers != null) | .metadata.name' -r | while read resource; do + echo "Custom Resource left: $resource in CRD: $crd" + done +done +``` +检查是否还有 ConfigMap 残留: + +```bash +kubectl get configmap -n kb-system +``` + + + + + +
\ No newline at end of file diff --git a/docs/zh/preview/user_docs/overview/introduction.mdx b/docs/zh/preview/user_docs/overview/introduction.mdx new file mode 100644 index 00000000..33261edc --- /dev/null +++ b/docs/zh/preview/user_docs/overview/introduction.mdx @@ -0,0 +1,258 @@ +--- +description: KubeBlocks 简介 +keywords: +- kubeblocks +- overview +- introduction +sidebar_position: 1 +title: 简介 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 简介 + +## 什么是 KubeBlocks + +KubeBlocks 是一个开源的 Kubernetes Operator,专为数据库(更准确地说,是为包括数据库和消息队列等中间件在内的有状态应用)设计,使用户能够在 Kubernetes 上运行和管理多种类型的数据库。据我们所知,大多数数据库 Operator 通常只管理特定类型的数据库。例如: +- CloudNativePG、Zalando、CrunchyData 和 StackGres 的 Operator 可以管理 PostgreSQL +- Strimzi 管理 Kafka +- Oracle 和 Percona 的 MySQL Operator 管理 MySQL + +相比之下,KubeBlocks 被设计为一个**通用数据库 Operator**。这意味着在设计 KubeBlocks 的 API 时,我们并未将其绑定到任何特定数据库,而是抽象了各类数据库的共性,形成了一套与引擎无关的通用 API。因此,围绕这套抽象 API 实现的 Operator 也同样不依赖于具体的数据库引擎。 + +![KubeBlocks 的设计架构,一个通用数据库 Operator](/img/docs/en/kubeblocks_general_purpose_arch.png) + +在上图中,Cluster、Component 和 InstanceSet 都是 KubeBlocks 提供的 CRD。如果您想了解更多关于它们的信息,请参考[概念](../concepts/concept)部分。 + +KubeBlocks 提供了一个 Addon API 来支持各类数据库的集成。例如,我们目前为以下主流开源数据库引擎提供了 KubeBlocks Addon: +- MySQL +- PostgreSQL +- Redis +- MongoDB +- Kafka +- RabbitMQ +- Minio +- Elasticsearch +- StarRocks +- Qdrant +- Milvus +- ZooKeeper +- etcd +- ... + +有关 Addon 的详细列表及其功能,请参阅[支持的 Addon](supported-addons.md)。 + +统一的 API 使得 KubeBlocks 成为在 Kubernetes 上运行多种类型数据库的理想选择。它可以显著降低学习多个 Operator 的复杂度。 + +## 统一API如何降低学习成本 + +以下示例展示了如何使用KubeBlocks的Cluster API编写YAML文件来创建一个包含两个副本的MySQL集群。 + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-mysql + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: mysql + componentDef: "mysql-8.0" + serviceVersion: 8.0.35 + disableExporter: false + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + +接下来就是神奇之处:只需修改少量字段,您就能创建一个包含两个副本的PostgreSQL集群!同样的方法也适用于MongoDB和Redis(Redis示例稍长是因为它创建了两个组件:redis-server和sentinel),这种模式可以应用于一系列数据库引擎。 + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-pg + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: postgresql + topology: replication + componentSpecs: + - name: postgresql + serviceVersion: 16.4.0 + labels: + apps.kubeblocks.postgres.patroni/scope: test-pg-postgresql + disableExporter: true + replicas: 2 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: test-mongo + namespace: demo +spec: + terminationPolicy: Delete + clusterDef: mongodb + topology: replicaset + componentSpecs: + - name: mongodb + serviceVersion: "6.0.16" + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi +``` + + + + +```yaml +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: Cluster +metadata: + name: test-redis + namespace: demo +spec: + terminationPolicy: Delete + componentSpecs: + - name: redis + componentDef: redis-7 + replicas: 2 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: redis-sentinel + componentDef: redis-sentinel + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 0.5Gi + requests: + cpu: '0.5' + memory: 0.5Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +``` + + + + +这意味着在Kubernetes上管理多种数据库变得简单、高效且标准化,为您节省了大量原本需要查阅手册和API文档的时间。 + +## 核心特性 + +- **数据库集群的创建与销毁**:支持快速部署和清理数据库集群。 +- **集群启停控制**:可对数据库集群执行启动、停止及重启操作。 +- **多样化拓扑部署**:创建集群时支持选择引擎插件(Addon)提供的部署拓扑方案。例如: + - Redis可选基于Sentinel的读写分离架构或Redis Cluster模式 + - MySQL可选搭配Proxy实现读写分离,并支持多种高可用方案(内置Raft共识插件、外部etcd协调器或Orchestrator) +- **差异化副本配置**:支持单个数据库集群内不同副本采用不同配置。典型场景如MySQL集群中主实例使用8核CPU而只读副本使用4核CPU——此功能是Kubernetes StatefulSet原生不具备的。 +- **灵活网络管理**: + - **动态服务暴露**:可将数据库访问端点动态发布为多种Service类型(ClusterIP、LoadBalancer、NodePort) + - **HostNetwork支持**:支持主机网络模式 + - **智能客户端访问**: + - 部分数据库(如Redis、MongoDB、Kafka)支持通过智能客户端(Smart Client)访问,该客户端能根据服务端返回的节点地址自动重定向请求或处理读写分离 + - 对于etcd等具备客户端自动故障转移能力的数据库,KubeBlocks支持为每个Pod分配独立服务地址(Pod Service) +- **全面的Day-2运维支持**: + - **水平扩缩容**(增减副本数量) + - **垂直扩缩容**(调整单副本CPU/内存资源) + - **PVC存储卷扩容** + - **备份与恢复功能** + - **配置变更**(支持热加载时自动生效) + - **参数修改** + - **主从切换** + - **滚动升级** + - **指定副本下线** + - **次版本升级** +- **双模式API**: + - 除声明式API外,还提供专用于执行一次性运维任务的OpsRequest API + - OpsRequest API额外支持队列管理、并发控制、进度跟踪和操作回滚等高级特性 +- **可观测性**:支持与Prometheus和Grafana集成实现监控可视化 +- **高效CLI工具**: + - 提供功能强大且符合直觉的命令行工具`kbcli` + - 显著简化Kubernetes上KubeBlocks自定义资源的操作流程,减少输入量 + - 对Kubernetes资深用户,kbcli可与kubectl配合使用,形成更高效的操作组合 + +") + +## 部署架构 +下图展示了KubeBlocks在云环境中的典型部署示意图。 + +Kubernetes应部署在节点间可通过网络互相通信的环境中(例如VPC内部)。KubeBlocks Operator部署在专用命名空间(kb-system)中,而数据库实例则部署在用户指定的命名空间内。 + +在生产环境中,我们建议将KubeBlocks Operator(以及安装的Prometheus和Grafana)与数据库部署在不同的节点上。默认情况下,数据库集群的多个副本会通过反亲和性规则调度到不同节点运行,以确保高可用性。用户还可以配置可用区(AZ)级别的反亲和性,将数据库副本分布到不同的可用区,从而增强灾难恢复能力。 + +每个数据库副本运行在独立的Pod中。除了运行数据库进程的主容器外,Pod还包含若干边车容器:一个名为`lorry`的容器(从KubeBlocks v1.0开始将更名为kbagent),用于执行来自KubeBlocks控制器的Action命令;另一个名为`config-manager`的容器,负责管理数据库配置文件并支持热更新。此外,引擎的Addon可能包含一个exporter容器,用于收集Prometheus监控所需的指标。 + +![KubeBlocks Architecture](/img/docs/en/kubeblocks-architecture-ha.png) \ No newline at end of file diff --git a/docs/zh/preview/user_docs/overview/supported-addons.mdx b/docs/zh/preview/user_docs/overview/supported-addons.mdx new file mode 100644 index 00000000..23d5755f --- /dev/null +++ b/docs/zh/preview/user_docs/overview/supported-addons.mdx @@ -0,0 +1,217 @@ +--- +description: KubeBlocks 支持的插件 +keywords: +- addons +- enable +- KubeBlocks +- prometheus +- s3 +- alertmanager +sidebar_label: 支持的插件 +sidebar_position: 3 +title: 支持的插件 +--- +# 支持的插件 + +KubeBlocks 通过插件(Addons)扩展对各种数据库引擎的支持。目前 KubeBlocks 代码仓库中提供了超过 30 个插件,这些插件可进一步分类如下。 + +关于插件的安装与启用方法,请参阅[插件安装教程](./../references/install-addons)。 + +## 关系型数据库 + +MySQL和PostgreSQL是全球最流行的两款开源关系型数据库,它们各自拥有分支/变种版本。 + +### MySQL及其变种 + +**插件列表** + +| 插件名称 | 描述 | +|:----------------|:---------------| +| mysql | 该插件使用Oracle官方发布的社区版MySQL镜像。 | +| mariadb | MariaDB是一款高性能开源关系型数据库管理系统,广泛应用于Web和应用服务器场景。 | + +**支持功能** + +:::note + +以下列出的版本可能不是最新状态,且可能遗漏部分支持版本。如需获取最新插件版本,请参考[KubeBlocks插件GitHub仓库](https://github.com/apecloud/kubeblocks-addons)。 + +::: + +| 插件(v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级(数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| mysql | • 5.7.44
• 8.0.33
• 8.4.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| mariadb | 10.6.15 | ✔️ | 不支持 | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | + +### PostgreSQL及其变种 + +**插件列表** + +| 插件名称 | 描述 | +|:----------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| postgresql | 该插件使用集成Patroni的Spilo镜像提供高可用(HA)PostgreSQL服务。 | +| vanilla-postgresql | 本插件基于原生PostgreSQL构建,为PostgreSQL及其变种提供高可用能力。 | +| orioledb | OrioleDB是PostgreSQL的新型存储引擎,为这个全球最受欢迎的数据库平台带来了现代化的数据库容量、功能和性能解决方案。 | + + +**支持功能** + +| 插件(v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级(数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| postgresql | • 12.14.0
• 12.14.1
• 12.15.0
• 14.7.2
• 14.8.0
• 15.7.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| vanilla-postgresql | • 12.15.0
• 14.7.0
• 15.6.1138
• 15.7.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | +| orioledb | 14.7.2-beta1 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | + +## NoSQL + +**插件列表** + +| 插件名称 | 描述 | +|:--------------|:-----------------------------| +| mongodb | MongoDB 是一个面向文档的 NoSQL 数据库,适用于海量数据存储场景。 | +| redis | Redis 是一个快速、开源的内存键值数据存储系统。 | +| etcd | etcd 是一个强一致性的分布式键值存储,为分布式系统或机器集群提供可靠的数据存储访问方式。 | +| zookeeper | Apache ZooKeeper 是一个集中式服务,用于维护配置信息、命名服务、提供分布式同步以及集群管理服务。 | + + +**支持功能** + + +| 插件 (v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| mongodb | • 4.0.28
• 4.2.24
• 4.4.29
• 5.0.28
• 6.0.16
• 7.0.12 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | ✔️ | ✔️ | +| redis | • 7.0.6
• 7.2.4
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | ✔️ | ✔️ | 不支持 | +| etcd |

3.5.15

3.5.6

| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| zookeeper |

3.4.14

3.6.4

3.7.2

3.8.4

3.9.2

| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | + +## OLAP 系统 + +**插件列表** + +| 插件名称 | 描述 | +|:--------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| elasticsearch | Elasticsearch 是一个分布式、RESTful 风格的搜索引擎,专为生产级工作负载的速度和相关性优化设计。 | +| starrocks-ce | StarRocks 是新一代高性能分析型数据仓库,支持实时、多维度和高并发的数据分析。 | +| clickhouse | ClickHouse 是一个列式数据库,用户可以通过 SQL 查询实时生成强大的分析结果。 | +| opensearch | 开源的分布式 RESTful 搜索引擎。 | + +**支持功能** + +| 插件 (v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:-------------:|:----------------------------:|:--------:|:--------:|:----------:|:---------:|:----:|:--------:|:---------:|:----:|:----:|:---------------------:|:--------:|:--------:|:--------:| +| elasticsearch | • 7.10.1
• 7.7.1
• 7.8.1
• 8.1.3
• 8.8.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| starrocks-ce | • 3.2.2
• 3.3.0
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| clickhouse | 22.9.4 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| opensearch | 2.7.0 | ✔️ | 不支持 | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | + + + +## 分布式 SQL 数据库 + +**插件列表** + +| 插件名称 | 描述 | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tidb | TiDB 是一款兼容 MySQL 的分布式数据库,其 SQL 层采用 Go 语言开发,存储层基于 RocksDB,事务模型采用 Percolator。由 PingCap 提供。 | +| oceanbase-ce | OceanBase 社区版是一款采用 C++ 开发的 MySQL 兼容分布式数据库。 | +| polardb-x | PolarDB-X 社区版是一款兼容 MySQL 的分布式数据库,支持基于 MySQL 的水平扩展。由阿里云提供并开源。 | + + +**支持功能** + + +| 插件 (v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主备切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| tidb | • 6.5.10
• 7.1.5
• 7.5.2
| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| oceanbase | 4.3.0 | N/A | ✔️ | ✔️ | N/A | N/A | | N/A | N/A | N/A | N/A | N/A | N/A | N/A | +| polardb-x | 2.3 | ✔️ | ✔️ | N/A | ✔️ | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | + +## 消息队列 + +**插件列表** + +| 插件 | 描述 | +|:----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| kafka | Apache Kafka 是一个开源的分布式事件流平台,被数千家企业用于构建高性能数据管道、流分析、数据集成和关键任务应用。
| +| rabbitmq | RabbitMQ 是一个可靠且成熟的消息和流处理代理。 | +| pulsar | Apache Pulsar 是一个开源的分布式消息和流处理平台。 | + +## 向量数据库 + +**插件列表** + +| 插件名称 | 描述 | +|:--------------|:----------------------------------------------------------------------------------------| +| qdrant | Qdrant 是一个向量数据库及向量相似性搜索引擎。 | +| weaviate | Weaviate 是一个开源的向量数据库。 | +| milvus | Milvus 是一个灵活、可靠且极速的云原生开源向量数据库。 | + +**支持的功能** + +| 插件 (v0.9.0) | 支持的版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:-------------:|:--------------------------------------:|:--------:|:--------:|:----------:|:---------:|:----:|:-------:|:---------:|:----:|:----:|:----------------------:|:--------:|:--------:|:--------:| +| qdrant | • 1.10.0
• 1.5.0
• 1.7.3
• 1.8.1
• 1.8.4 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| weaviate | 1.23.1 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | +| milvus | 2.3.2 | ✔️ | 不支持 | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | + + + +## 时序数据库 + +**插件列表** + +| 插件名称 | 描述 | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| influxdb | InfluxDB 是一款专为时序数据优化的数据库,能够高效处理和扩展大规模时序数据工作负载,实现实时分析功能。 | +| victoria-metrics | VictoriaMetrics 是一个快速、经济高效且可扩展的监控解决方案及时序数据库。 | +| greptimedb | GreptimeDB 是一款开源时序数据库,特别专注于可扩展性、分析能力和效率。 | +| tdengine | TDengine™ 是专为工业物联网设计的工业数据平台,集成了时序数据库功能,并具备流处理、数据订阅和缓存等核心特性。 | + + +**支持功能** + + +| 插件 (v0.9.0) | 支持版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| influxdb | 2.7.4 | ✔️ | 不支持 | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| victoria-metrics | 1.0.0 | | | | | | | | | | | | | | +| greptimedb | 0.3.2 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | +| tdengine | 3.0.5 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | 不支持 | + + + +## 图数据库 + +**插件列表** + +| 插件 | 描述 | +|:----------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| nebula | NebulaGraph 是一款开源的图数据库,能够存储和处理包含万亿级边和顶点的图数据。 | + + +**支持的功能** + + +| 插件 (v0.9.0) | 支持的版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主从切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| nebula | 3.5.0 | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | + + + +## 存储系统 + +**插件列表** + +| 插件 | 描述 | +|:----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| minio | MinIO 是一个对象存储解决方案,提供与亚马逊网络服务 S3 兼容的 API,并支持所有核心 S3 功能。 | + + +**支持的功能** + + +| 插件 (v0.9.0) | 支持的版本 | 垂直扩展 | 水平扩展 | 存储卷扩容 | 停止/启动 | 重启 | 暴露服务 | 备份/恢复 | 日志 | 配置 | 升级 (数据库引擎版本) | 账户管理 | 故障转移 | 主备切换 | +|:------------------:|:--------------------------------------:|:------:|:------:|:------------:|:----------:|:-------:|:------:|:--------------:|:----:|:------:|:---------------------------:|:-------:|:--------:|:----------:| +| minio | RELEASE.2024-06-29T01-20-47Z | ✔️ | ✔️ | 不适用 | ✔️ | ✔️ | ✔️ | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | 不适用 | + + diff --git a/docs/zh/preview/user_docs/references/_category_.yml b/docs/zh/preview/user_docs/references/_category_.yml new file mode 100644 index 00000000..65807990 --- /dev/null +++ b/docs/zh/preview/user_docs/references/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 参考文献 +position: 81 diff --git a/docs/zh/preview/user_docs/references/api-reference/_category_.yml b/docs/zh/preview/user_docs/references/api-reference/_category_.yml new file mode 100644 index 00000000..7b0acb21 --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: API 参考 +position: 1 diff --git a/docs/zh/preview/user_docs/references/api-reference/add-on.mdx b/docs/zh/preview/user_docs/references/api-reference/add-on.mdx new file mode 100644 index 00000000..eeb6961c --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/add-on.mdx @@ -0,0 +1,2545 @@ +--- +title: Add-On API Reference +description: Add-On API Reference +keywords: [add-on, api] +sidebar_position: 5 +sidebar_label: Add-On +--- +
+ +

+Packages: +

+ +

extensions.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+Addon + +

+
+ +

+Addon is the Schema for the add-ons API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`extensions.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Addon` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +AddonSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Specifies the description of the add-on. +

+ +
+ +`type`
+ + +AddonType + + + +
+ + +

+Defines the type of the add-on. The only valid value is ‘helm’. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Indicates the version of the add-on. +

+ +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the provider of the add-on. +

+ +
+ +`helm`
+ + +HelmTypeInstallSpec + + + +
+ +(Optional) + +

+Represents the Helm installation specifications. This is only processed +when the type is set to ‘helm’. +

+ +
+ +`defaultInstallValues`
+ + +[]AddonDefaultInstallSpecItem + + + +
+ + +

+Specifies the default installation parameters. +

+ +
+ +`install`
+ + +AddonInstallSpec + + + +
+ +(Optional) + +

+Defines the installation parameters. +

+ +
+ +`installable`
+ + +InstallableSpec + + + +
+ +(Optional) + +

+Represents the installable specifications of the add-on. This includes +the selector and auto-install settings. +

+ +
+ +`cliPlugins`
+ + +[]CliPlugin + + + +
+ +(Optional) + +

+Specifies the CLI plugin installation specifications. +

+ +
+ +
+ +`status`
+ + +AddonStatus + + + +
+ + +
+

+AddonDefaultInstallSpecItem + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpec`
+ + +AddonInstallSpec + + + +
+ + +

+ +(Members of `AddonInstallSpec` are embedded into this type.) + +

+ +
+ +`selectors`
+ + +[]SelectorRequirement + + + +
+ +(Optional) + +

+Indicates the default selectors for add-on installations. If multiple selectors are provided, +all selectors must evaluate to true. +

+ +
+

+AddonInstallExtraItem + +

+ +

+ +(Appears on:AddonInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpecItem`
+ + +AddonInstallSpecItem + + + +
+ + +

+ +(Members of `AddonInstallSpecItem` are embedded into this type.) + +

+ +
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the item. +

+ +
+

+AddonInstallSpec + +

+ +

+ +(Appears on:AddonDefaultInstallSpecItem, AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`AddonInstallSpecItem`
+ + +AddonInstallSpecItem + + + +
+ + +

+ +(Members of `AddonInstallSpecItem` are embedded into this type.) + +

+ +
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Can be set to true if there are no specific installation attributes to be set. +

+ +
+ +`extras`
+ + +[]AddonInstallExtraItem + + + +
+ +(Optional) + +

+Specifies the installation specifications for extra items. +

+ +
+

+AddonInstallSpecItem + +

+ +

+ +(Appears on:AddonInstallExtraItem, AddonInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of replicas. +

+ +
+ +`persistentVolumeEnabled`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the Persistent Volume is enabled or not. +

+ +
+ +`storageClass`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the storage class. +

+ +
+ +`tolerations`
+ +string + + +
+ +(Optional) + +

+Specifies the tolerations in a JSON array string format. +

+ +
+ +`resources`
+ + +ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resource requirements. +

+ +
+

+AddonPhase +(`string` alias) +

+ +

+ +(Appears on:AddonStatus) + +

+
+ +

+AddonPhase defines addon phases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Disabled" +

+
+ +
+ +

+"Disabling" +

+
+ +
+ +

+"Enabled" +

+
+ +
+ +

+"Enabling" +

+
+ +
+ +

+"Failed" +

+
+ +
+

+AddonSelectorKey +(`string` alias) +

+ +

+ +(Appears on:SelectorRequirement) + +

+
+ +

+AddonSelectorKey are selector requirement key types. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeGitVersion" +

+
+ +
+ +

+"KubeProvider" +

+
+ +
+ +

+"KubeVersion" +

+
+ +
+

+AddonSpec + +

+ +

+ +(Appears on:Addon) + +

+
+ +

+AddonSpec defines the desired state of an add-on. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`description`
+ +string + + +
+ +(Optional) + +

+Specifies the description of the add-on. +

+ +
+ +`type`
+ + +AddonType + + + +
+ + +

+Defines the type of the add-on. The only valid value is ‘helm’. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Indicates the version of the add-on. +

+ +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the provider of the add-on. +

+ +
+ +`helm`
+ + +HelmTypeInstallSpec + + + +
+ +(Optional) + +

+Represents the Helm installation specifications. This is only processed +when the type is set to ‘helm’. +

+ +
+ +`defaultInstallValues`
+ + +[]AddonDefaultInstallSpecItem + + + +
+ + +

+Specifies the default installation parameters. +

+ +
+ +`install`
+ + +AddonInstallSpec + + + +
+ +(Optional) + +

+Defines the installation parameters. +

+ +
+ +`installable`
+ + +InstallableSpec + + + +
+ +(Optional) + +

+Represents the installable specifications of the add-on. This includes +the selector and auto-install settings. +

+ +
+ +`cliPlugins`
+ + +[]CliPlugin + + + +
+ +(Optional) + +

+Specifies the CLI plugin installation specifications. +

+ +
+

+AddonStatus + +

+ +

+ +(Appears on:Addon) + +

+
+ +

+AddonStatus defines the observed state of an add-on. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +AddonPhase + + + +
+ + +

+Defines the current installation phase of the add-on. It can take one of +the following values: `Disabled`, `Enabled`, `Failed`, `Enabling`, `Disabling`. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides a detailed description of the current state of add-on API installation. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this add-on. It corresponds +to the add-on’s generation, which is updated on mutation by the API Server. +

+ +
+

+AddonType +(`string` alias) +

+ +

+ +(Appears on:AddonSpec) + +

+
+ +

+AddonType defines the addon types. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Helm" +

+
+ +
+

+CliPlugin + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the plugin. +

+ +
+ +`indexRepository`
+ +string + + +
+ + +

+Defines the index repository of the plugin. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief description of the plugin. +

+ +
+

+DataObjectKeySelector + +

+ +

+ +(Appears on:HelmInstallValues) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the name of the object being referred to. +

+ +
+ +`key`
+ +string + + +
+ + +

+Specifies the key to be selected. +

+ +
+

+HelmInstallOptions +(`map[string]string` alias) +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+

+HelmInstallValues + +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`urls`
+ +[]string + + +
+ +(Optional) + +

+Specifies the URL location of the values file. +

+ +
+ +`configMapRefs`
+ + +[]DataObjectKeySelector + + + +
+ +(Optional) + +

+Selects a key from a ConfigMap item list. The value can be +a JSON or YAML string content. Use a key name with “.json”, “.yaml”, or “.yml” +extension to specify a content type. +

+ +
+ +`secretRefs`
+ + +[]DataObjectKeySelector + + + +
+ +(Optional) + +

+Selects a key from a Secrets item list. The value can be +a JSON or YAML string content. Use a key name with “.json”, “.yaml”, or “.yml” +extension to specify a content type. +

+ +
+ +`setValues`
+ +[]string + + +
+ +(Optional) + +

+Values set during Helm installation. Multiple or separate values can be specified with commas (key1=val1,key2=val2). +

+ +
+ +`setJSONValues`
+ +[]string + + +
+ +(Optional) + +

+JSON values set during Helm installation. Multiple or separate values can be specified with commas (key1=jsonval1,key2=jsonval2). +

+ +
+

+HelmJSONValueMapType + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`tolerations`
+ +string + + +
+ +(Optional) + +

+Specifies the toleration mapping key. +

+ +
+

+HelmTypeInstallSpec + +

+ +

+ +(Appears on:AddonSpec) + +

+
+ +

+HelmTypeInstallSpec defines the Helm installation spec. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`chartLocationURL`
+ +string + + +
+ + +

+Specifies the URL location of the Helm Chart. +

+ +
+ +`installOptions`
+ + +HelmInstallOptions + + + +
+ +(Optional) + +

+Defines the options for Helm release installation. +

+ +
+ +`installValues`
+ + +HelmInstallValues + + + +
+ +(Optional) + +

+Defines the set values for Helm release installation. +

+ +
+ +`valuesMapping`
+ + +HelmValuesMapping + + + +
+ +(Optional) + +

+Defines the mapping of add-on normalized resources parameters to Helm values’ keys. +

+ +
+ +`chartsImage`
+ +string + + +
+ +(Optional) + +

+Defines the image of Helm charts. +

+ +
+ +`chartsPathInImage`
+ +string + + +
+ +(Optional) + +

+Defines the path of Helm charts in the image. This path is used to copy +Helm charts from the image to the shared volume. The default path is “/charts”. +

+ +
+

+HelmValueMapType + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicaCount`
+ +string + + +
+ +(Optional) + +

+Defines the key for setting the replica count in the Helm values map. +

+ +
+ +`persistentVolumeEnabled`
+ +string + + +
+ +(Optional) + +

+Indicates whether the persistent volume is enabled in the Helm values map. +

+ +
+ +`storageClass`
+ +string + + +
+ +(Optional) + +

+Specifies the key for setting the storage class in the Helm values map. +

+ +
+

+HelmValuesMapping + +

+ +

+ +(Appears on:HelmTypeInstallSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`HelmValuesMappingItem`
+ + +HelmValuesMappingItem + + + +
+ + +

+ +(Members of `HelmValuesMappingItem` are embedded into this type.) + +

+ +
+ +`extras`
+ + +[]HelmValuesMappingExtraItem + + + +
+ +(Optional) + +

+Helm value mapping items for extra items. +

+ +
+

+HelmValuesMappingExtraItem + +

+ +

+ +(Appears on:HelmValuesMapping) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`HelmValuesMappingItem`
+ + +HelmValuesMappingItem + + + +
+ + +

+ +(Members of `HelmValuesMappingItem` are embedded into this type.) + +

+ +
+ +`name`
+ +string + + +
+ + +

+Name of the item. +

+ +
+

+HelmValuesMappingItem + +

+ +

+ +(Appears on:HelmValuesMapping, HelmValuesMappingExtraItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`valueMap`
+ + +HelmValueMapType + + + +
+ +(Optional) + +

+Defines the “key” mapping values. Valid keys include `replicaCount`, +`persistentVolumeEnabled`, and `storageClass`. +Enum values explained: +

+
    +
  • +`replicaCount` sets the replicaCount value mapping key. +
  • +
  • +`persistentVolumeEnabled` sets the persistent volume enabled mapping key. +
  • +
  • +`storageClass` sets the storageClass mapping key. +
  • +
+ +
+ +`jsonMap`
+ + +HelmJSONValueMapType + + + +
+ +(Optional) + +

+Defines the “key” mapping values. The valid key is tolerations. +Enum values explained: +

+
    +
  • +`tolerations` sets the toleration mapping key. +
  • +
+ +
+ +`resources`
+ + +ResourceMappingItem + + + +
+ +(Optional) + +

+Sets resources related mapping keys. +

+ +
+

+InstallableSpec + +

+ +

+ +(Appears on:AddonSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`selectors`
+ + +[]SelectorRequirement + + + +
+ +(Optional) + +

+Specifies the selectors for add-on installation. If multiple selectors are provided, +they must all evaluate to true for the add-on to be installed. +

+ +
+ +`autoInstall`
+ +bool + + +
+ + +

+Indicates whether an add-on should be installed automatically. +

+ +
+

+LineSelectorOperator +(`string` alias) +

+ +

+ +(Appears on:SelectorRequirement) + +

+
+ +

+LineSelectorOperator defines line selector operators. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Contains" +

+
+ +
+ +

+"DoesNotContain" +

+
+ +
+ +

+"DoesNotMatchRegex" +

+
+ +
+ +

+"MatchRegex" +

+
+ +
+

+ResourceMappingItem + +

+ +

+ +(Appears on:HelmValuesMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storage`
+ +string + + +
+ +(Optional) + +

+Specifies the key used for mapping the storage size value. +

+ +
+ +`cpu`
+ + +ResourceReqLimItem + + + +
+ +(Optional) + +

+Specifies the key used for mapping both CPU requests and limits. +

+ +
+ +`memory`
+ + +ResourceReqLimItem + + + +
+ +(Optional) + +

+Specifies the key used for mapping both Memory requests and limits. +

+ +
+

+ResourceReqLimItem + +

+ +

+ +(Appears on:ResourceMappingItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requests`
+ +string + + +
+ +(Optional) + +

+Specifies the mapping key for the request value. +

+ +
+ +`limits`
+ +string + + +
+ +(Optional) + +

+Specifies the mapping key for the limit value. +

+ +
+

+ResourceRequirements + +

+ +

+ +(Appears on:AddonInstallSpecItem) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`limits`
+ + +Kubernetes core/v1.ResourceList + + + +
+ +(Optional) + +

+Limits describes the maximum amount of compute resources allowed. +More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. +

+ +
+ +`requests`
+ + +Kubernetes core/v1.ResourceList + + + +
+ +(Optional) + +

+Requests describes the minimum amount of compute resources required. +If Requests is omitted for a container, it defaults to Limits if that is explicitly specified; +otherwise, it defaults to an implementation-defined value. +More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. +

+ +
+

+SelectorRequirement + +

+ +

+ +(Appears on:AddonDefaultInstallSpecItem, InstallableSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ + +AddonSelectorKey + + + +
+ + +

+The selector key. Valid values are KubeVersion, KubeGitVersion and KubeProvider. +

+
    +
  • +`KubeVersion` the semver expression of Kubernetes versions, i.e., v1.24. +
  • +
  • +`KubeGitVersion` may contain distro. info., i.e., v1.24.4+eks. +
  • +
  • +`KubeProvider` the Kubernetes provider, i.e., aws, gcp, azure, huaweiCloud, tencentCloud etc. +
  • +
+ +
+ +`operator`
+ + +LineSelectorOperator + + + +
+ + +

+Represents a key’s relationship to a set of values. +Valid operators are Contains, NotIn, DoesNotContain, MatchRegex, and DoesNoteMatchRegex. +

+ +

+Possible enum values: +

+
    +
  • +`Contains` line contains a string. +
  • +
  • +`DoesNotContain` line does not contain a string. +
  • +
  • +`MatchRegex` line contains a match to the regular expression. +
  • +
  • +`DoesNotMatchRegex` line does not contain a match to the regular expression. +
  • +
+ +
+ +`values`
+ +[]string + + +
+ +(Optional) + +

+Represents an array of string values. This serves as an “OR” expression to the operator. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/api-reference/cluster.mdx b/docs/zh/preview/user_docs/references/api-reference/cluster.mdx new file mode 100644 index 00000000..3a99c520 --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/cluster.mdx @@ -0,0 +1,57618 @@ +--- +title: Cluster API Reference +description: Cluster API Reference +keywords: [cluster, api] +sidebar_position: 1 +sidebar_label: Cluster +--- +
+ +

+Packages: +

+ +

apps.kubeblocks.io/v1

+
+
+Resource Types: + +

+Cluster + +

+
+ +

+Cluster offers a unified management interface for a wide variety of database and storage systems: +

+
    +
  • +Relational databases: MySQL, PostgreSQL, MariaDB +
  • +
  • +NoSQL databases: Redis, MongoDB +
  • +
  • +KV stores: ZooKeeper, etcd +
  • +
  • +Analytics systems: ElasticSearch, OpenSearch, ClickHouse, Doris, StarRocks, Solr +
  • +
  • +Message queues: Kafka, Pulsar +
  • +
  • +Distributed SQL: TiDB, OceanBase +
  • +
  • +Vector databases: Qdrant, Milvus, Weaviate +
  • +
  • +Object storage: Minio +
  • +
+ +

+KubeBlocks utilizes an abstraction layer to encapsulate the characteristics of these diverse systems. +A Cluster is composed of multiple Components, each defined by vendors or KubeBlocks Addon developers via ComponentDefinition, +arranged in Directed Acyclic Graph (DAG) topologies. +The topologies, defined in a ClusterDefinition, coordinate reconciliation across Cluster’s lifecycle phases: +Creating, Running, Updating, Stopping, Stopped, Deleting. +Lifecycle management ensures that each Component operates in harmony, executing appropriate actions at each lifecycle stage. +

+ +

+For sharded-nothing architecture, the Cluster supports managing multiple shards, +each shard managed by a separate Component, supporting dynamic resharding. +

+ +

+The Cluster object is aimed to maintain the overall integrity and availability of a database cluster, +serves as the central control point, abstracting the complexity of multiple-component management, +and providing a unified interface for cluster-wide operations. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`Cluster` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterDef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Delete`: Deletes all runtime resources belong to the Cluster. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`shardings`
+ + +[]ClusterSharding + + + +
+ +(Optional) + +

+Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ClusterSharding. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +
+ +`status`
+ + +ClusterStatus + + + +
+ + +
+

+ClusterDefinition + +

+
+ +

+ClusterDefinition defines the topology for databases or storage systems, +offering a variety of topological configurations to meet diverse deployment needs and scenarios. +

+ +

+It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, +which enhances reusability and reduce redundancy. +For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, +simplifying the setup of new systems. +

+ +

+Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, +ensuring a controlled and predictable management of cluster lifecycles. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ClusterDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterDefinitionSpec + + + +
+ +
+
+ + + + + + + + + +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+ +
+ +`status`
+ + +ClusterDefinitionStatus + + + +
+ + +
+

+Component + +

+
+ +

+Component is a fundamental building block of a Cluster object. +For example, a Redis Cluster can include Components like ‘redis’, ‘sentinel’, and potentially a proxy like ‘twemproxy’. +

+ +

+The Component object is responsible for managing the lifecycle of all replicas within a Cluster component, +It supports a wide range of operations including provisioning, stopping, restarting, termination, upgrading, +configuration changes, vertical and horizontal scaling, failover, switchover, cross-node migration, +scheduling configuration, exposing Services, managing system accounts, enabling/disabling exporter, +and configuring log collection. +

+ +

+Component is an internal sub-object derived from the user-submitted Cluster object. +It is designed primarily to be used by the KubeBlocks controllers, +users are discouraged from modifying Component objects directly and should use them only for monitoring Component statuses. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`Component` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ +(Optional) + +

+Specifies the behavior when a Component is deleted. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +`sidecars`
+ + +[]Sidecar + + + +
+ +(Optional) + +

+Specifies the sidecars to be injected into the Component. +

+ +
+ +
+ +`status`
+ + +ComponentStatus + + + +
+ + +
+

+ComponentDefinition + +

+
+ +

+ComponentDefinition serves as a reusable blueprint for creating Components, +encapsulating essential static settings such as Component description, +Pod templates, configuration file templates, scripts, parameter lists, +injected environment variables and their sources, and event handlers. +ComponentDefinition works in conjunction with dynamic settings from the ClusterComponentSpec, +to instantiate Components during Cluster creation. +

+ +

+Key aspects that can be defined in a ComponentDefinition include: +

+
    +
  • +PodSpec template: Specifies the PodSpec template used by the Component. +
  • +
  • +Configuration templates: Specify the configuration file templates required by the Component. +
  • +
  • +Scripts: Provide the necessary scripts for Component management and operations. +
  • +
  • +Storage volumes: Specify the storage volumes and their configurations for the Component. +
  • +
  • +Pod roles: Outlines various roles of Pods within the Component along with their capabilities. +
  • +
  • +Exposed Kubernetes Services: Specify the Services that need to be exposed by the Component. +
  • +
  • +System accounts: Define the system accounts required for the Component. +
  • +
  • +Monitoring and logging: Configure the exporter and logging settings for the Component. +
  • +
+ +

+ComponentDefinitions also enable defining reactive behaviors of the Component in response to events, +such as member join/leave, Component addition/deletion, role changes, switch over, and more. +This allows for automatic event handling, thus encapsulating complex behaviors within the Component. +

+ +

+Referencing a ComponentDefinition when creating individual Components ensures inheritance of predefined configurations, +promoting reusability and consistency across different deployments and cluster topologies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ComponentDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the config file templates and volume mount parameters used by the Component. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ config files. +Each template is represented as a ConfigMap and may contain multiple config files, with each file being a key in the ConfigMap. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +TLS + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`available`
+ + +ComponentAvailable + + + +
+ +(Optional) + +

+Specifies the strategies for determining the available status of the Component. +

+ +

+This field is immutable. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero or one role. +KubeBlocks operator determines the role of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns the role for each replica, and the returned role must be predefined here. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+To prevent privilege escalation, only permissions already owned by KubeBlocks can be added here. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +
+ +`status`
+ + +ComponentDefinitionStatus + + + +
+ + +
+

+ComponentVersion + +

+
+ +

+ComponentVersion is the Schema for the componentversions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ComponentVersion` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentVersionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+ +
+ +`status`
+ + +ComponentVersionStatus + + + +
+ + +
+

+ServiceDescriptor + +

+
+ +

+ServiceDescriptor describes a service provided by external sources. +It contains the necessary details such as the service’s address and connection credentials. +To enable a Cluster to access this service, the ServiceDescriptor’s name should be specified +in the Cluster configuration under `clusterComponent.serviceRefs[*].serviceDescriptor`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ServiceDescriptor` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ServiceDescriptorSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`podFQDNs`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the pod FQDNs of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+ +
+ +`status`
+ + +ServiceDescriptorStatus + + + +
+ + +
+

+ShardingDefinition + +

+
+ +

+ShardingDefinition is the Schema for the shardingdefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`ShardingDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ShardingDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`template`
+ + +ShardingTemplate + + + +
+ + +

+This field is immutable. +

+ +
+ +`shardsLimit`
+ + +ShardsLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of shards supported by the sharding. +

+ +

+This field is immutable. +

+ +
+ +`provisionStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for provisioning shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ShardingLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a sharding throughout its lifecycle. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]ShardingSystemAccount + + + +
+ +(Optional) + +

+Defines the system accounts for the sharding. +

+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +ShardingTLS + + + +
+ +(Optional) + +

+Defines the TLS for the sharding. +

+ +

+This field is immutable. +

+ +
+ +
+ +`status`
+ + +ShardingDefinitionStatus + + + +
+ + +
+

+SidecarDefinition + +

+
+ +

+SidecarDefinition is the Schema for the sidecardefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`SidecarDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +SidecarDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the sidecar. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the component definition that the sidecar belongs to. +

+ +

+For a specific cluster object, if there is any components provided by the component definition of @owner, +the sidecar will be created and injected into the components which are provided by +the component definition of @selectors automatically. +

+ +

+This field is immutable. +

+ +
+ +`selectors`
+ +[]string + + +
+ + +

+Specifies the component definition of components that the sidecar along with. +

+ +

+This field is immutable. +

+ +
+ +`containers`
+ + +[]Kubernetes core/v1.Container + + + +
+ + +

+List of containers for the sidecar. +

+ +

+Cannot be updated. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are needed by the sidecar. +

+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the configuration file templates used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the scripts used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +
+ +`status`
+ + +SidecarDefinitionStatus + + + +
+ + +
+

+Action + +

+ +

+ +(Appears on:ClusterComponentConfig, ComponentLifecycleActions, Probe, ShardingLifecycleActions, ConfigTemplate, MembershipReconfiguration) + +

+
+ +

+Action defines a customizable hook or procedure tailored for different database engines, +designed to be invoked at predetermined points within the lifecycle of a Component instance. +It provides a modular and extensible way to customize a Component’s behavior through the execution of defined actions. +

+ +

+Available Action triggers include: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: Defines the procedure to transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+Actions can be executed in different ways: +

+
    +
  • +ExecAction: Executes a command inside a container. +A set of predefined environment variables are available and can be leveraged within the `exec.command` +to access context information such as details about pods, components, the overall cluster state, +or database connection credentials. +These variables provide a dynamic and context-aware mechanism for script execution. +
  • +
  • +HTTPAction: Performs an HTTP request. +HTTPAction is to be implemented in future version. +
  • +
  • +GRPCAction: In future version, Actions will support initiating gRPC calls. +This allows developers to implement Actions using plugins written in programming language like Go, +providing greater flexibility and extensibility. +
  • +
+ +

+An action is considered successful on returning 0, or HTTP 200 for status HTTP(s) Actions. +Any other return value or HTTP status codes indicate failure, +and the action may be retried based on the configured retry policy. +

+
    +
  • +If an action exceeds the specified timeout duration, it will be terminated, and the action is considered failed. +
  • +
  • +If an action produces any data as output, it should be written to stdout, +or included in the HTTP response payload for HTTP(s) actions. +
  • +
  • +If an action encounters any errors, error messages should be written to stderr, +or detailed in the HTTP response with the appropriate non-200 status code. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`exec`
+ + +ExecAction + + + +
+ +(Optional) + +

+Defines the command to run. +

+ +

+This field cannot be updated. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration in seconds that the Action is allowed to run. +

+ +

+If the Action does not complete within this time frame, it will be terminated. +

+ +

+This field cannot be updated. +

+ +
+ +`retryPolicy`
+ + +RetryPolicy + + + +
+ +(Optional) + +

+Defines the strategy to be taken when retrying the Action after a failure. +

+ +

+It specifies the conditions under which the Action should be retried and the limits to apply, +such as the maximum number of retries and backoff strategy. +

+ +

+This field cannot be updated. +

+ +
+ +`preCondition`
+ + +PreConditionType + + + +
+ +(Optional) + +

+Specifies the state that the cluster must reach before the Action is executed. +Currently, this is only applicable to the `postProvision` action. +

+ +

+The conditions are as follows: +

+
    +
  • +`Immediately`: Executed right after the Component object is created. +The readiness of the Component and its resources is not guaranteed at this stage. +
  • +
  • +`RuntimeReady`: The Action is triggered after the Component object has been created and all associated +runtime resources (e.g. Pods) are in a ready state. +
  • +
  • +`ComponentReady`: The Action is triggered after the Component itself is in a ready state. +This process does not affect the readiness state of the Component or the Cluster. +
  • +
  • +`ClusterReady`: The Action is executed after the Cluster is in a ready state. +This execution does not alter the Component or the Cluster’s state of readiness. +
  • +
+ +

+This field cannot be updated. +

+ +
+

+ActionAssertion + +

+ +

+ +(Appears on:ComponentAvailableProbeAssertion) + +

+
+ +

+ActionAssertion defines the custom assertions for evaluating the success or failure of an action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`succeed`
+ +bool + + +
+ +(Optional) + +

+Whether the action should succeed or fail. +

+ +

+This field is immutable once set. +

+ +
+ +`stdout`
+ + +ActionOutputMatcher + + + +
+ +(Optional) + +

+Specifies the stdout matcher for the action. +

+ +

+This field is immutable once set. +

+ +
+ +`stderr`
+ + +ActionOutputMatcher + + + +
+ +(Optional) + +

+Specifies the stderr matcher for the action. +

+ +

+This field is immutable once set. +

+ +
+

+ActionOutputMatcher + +

+ +

+ +(Appears on:ActionAssertion) + +

+
+ +

+ActionOutputMatcher defines the matcher for the output of an action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`equalTo`
+ +string + + +
+ +(Optional) + +

+The output of the action should be equal to the specified value. +

+ +

+This field is immutable once set. +

+ +
+ +`contains`
+ +string + + +
+ +(Optional) + +

+The output of the action should contain the specified value. +

+ +

+This field is immutable once set. +

+ +
+

+ClusterBackup + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether automated backup is enabled for the Cluster. +

+ +
+ +`retentionPeriod`
+ +github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.RetentionPeriod + + +
+ +(Optional) + +

+Determines the duration to retain backups. Backups older than this period are automatically removed. +

+ +

+For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +Default value is 7d. +

+ +
+ +`method`
+ +string + + +
+ + +

+Specifies the backup method to use, as defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Specifies the maximum time in minutes that the system will wait to start a missed backup job. +If the scheduled backup time is missed for any reason, the backup job must start within this deadline. +Values must be between 0 (immediate execution) and 1440 (one day). +

+ +
+ +`repoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the backupRepo. If not set, the default backupRepo will be used. +

+ +
+ +`pitrEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable point-in-time recovery. +

+ +
+ +`continuousMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the backup method to use, if not set, use the first continuous method. +

+ +
+ +`incrementalBackupEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable incremental backup. +

+ +
+ +`incrementalCronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the incremental backup schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+

+ClusterComponentConfig + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+ClusterComponentConfig represents a configuration for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the config. +

+ +
+ +`variables`
+ +map[string]string + + +
+ +(Optional) + +

+Variables are key-value pairs for dynamic configuration values that can be provided by the user. +

+ +
+ +`ClusterComponentConfigSource`
+ + +ClusterComponentConfigSource + + + +
+ + +

+ +(Members of `ClusterComponentConfigSource` are embedded into this type.) + +

+ +

+The external source for the configuration. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+The custom reconfigure action to reload the service configuration whenever changes to this config are detected. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_CONFIG_FILES_CREATED: file1,file2… +
  • +
  • +KB_CONFIG_FILES_REMOVED: file1,file2… +
  • +
  • +KB_CONFIG_FILES_UPDATED: file1:checksum1,file2:checksum2… +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`externalManaged`
+ +bool + + +
+ +(Optional) + +

+ExternalManaged indicates whether the configuration is managed by an external system. +When set to true, the controller will use the user-provided template and reconfigure action, +ignoring the default template and update behavior. +

+ +
+

+ClusterComponentConfigSource + +

+ +

+ +(Appears on:ClusterComponentConfig) + +

+
+ +

+ClusterComponentConfigSource represents the source of a configuration for a component. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ +(Optional) + +

+ConfigMap source for the config. +

+ +
+

+ClusterComponentService + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+References the ComponentService name defined in the `componentDefinition.spec.services[*].name`. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP` allocates a Cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort` builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the ClusterIP. +
  • +
  • +`LoadBalancer` builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the ClusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for ClusterComponentService. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to generate individual Services for each Pod. +If set to true, a separate Service will be created for each Pod in the Cluster. +

+ +
+

+ClusterComponentSpec + +

+ +

+ +(Appears on:ClusterSharding, ClusterSpec) + +

+
+ +

+ClusterComponentSpec defines the specification of a Component within a Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the Component’s name. +It’s part of the Service DNS name and must comply with the IANA service naming rule. +The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), +but required otherwise. +

+ +
+ +`componentDef`
+ +string + + +
+ +(Optional) + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+Supports three different ways to specify the ComponentDefinition: +

+
    +
  • +the regular expression - recommended +
  • +
  • +the full name - recommended +
  • +
  • +the name prefix +
  • +
+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +These environment variables will be placed after the environment variables declared in the Pod. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +If defined, it will overwrite the scheduling policy defined in ClusterSpec. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ClusterComponentService + + + +
+ +(Optional) + +

+Overrides services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`tls`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys, +to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ClusterComponentStatus + +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterComponentStatus records Component status. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ComponentPhase + + + +
+ + +

+Specifies the current state of the Component. +

+ +
+ +`message`
+ +map[string]string + + +
+ +(Optional) + +

+Records detailed information about the Component in its current phase. +The keys are either podName, deployName, or statefulSetName, formatted as ‘ObjectKind/Name’. +

+ +
+

+ClusterComponentVolumeClaimTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refers to the name of a volumeMount defined in either: +

+
    +
  • +`componentDefinition.spec.runtime.containers[*].volumeMounts` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].podSpec.containers[*].volumeMounts` (deprecated) +
  • +
+ +

+The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the labels for the PVC of the volume. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the annotations for the PVC of the volume. +

+ +
+ +`spec`
+ + +PersistentVolumeClaimSpec + + + +
+ +(Optional) + +

+Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume +with the mount name specified in the `name` field. +

+ +

+When a Pod is created for this ClusterComponent, a new PVC will be created based on the specification +defined in the `spec` field. The PVC will be associated with the volume mount specified by the `name` field. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +`volumeAttributesClassName`
+ +string + + +
+ +(Optional) + +

+volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. +

+ +

+More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass +

+ +
+ +
+

+ClusterDefinitionSpec + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionSpec defines the desired state of ClusterDefinition. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+

+ClusterDefinitionStatus + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionStatus defines the observed state of ClusterDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this ClusterDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the ClusterDefinition. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the ClusterDefinition is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`topologies`
+ +string + + +
+ +(Optional) + +

+Topologies this ClusterDefinition supported. +

+ +
+

+ClusterObjectReference + +

+ +

+ +(Appears on:ComponentVarSelector, CredentialVarSelector, HostNetworkVarSelector, ResourceVarSelector, ServiceRefVarSelector, ServiceVarSelector, TLSVarSelector) + +

+
+ +

+ClusterObjectReference defines information to let you locate the referenced object inside the same Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) used by the component that the referent object resident in. +

+ +

+If not specified, the component itself will be used. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Name of the referent object. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specify whether the object must be defined. +

+ +
+ +`multipleClusterObjectOption`
+ + +MultipleClusterObjectOption + + + +
+ +(Optional) + +

+This option defines the behavior when multiple component objects match the specified @CompDef. +If not provided, an error will be raised when handling multiple matches. +

+ +
+

+ClusterPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterPhase defines the phase of the Cluster within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterPhase represents some components are in `Failed` phase, indicates that the cluster is in +a fragile state and troubleshooting is required. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterPhase represents all components are in `Creating` phase. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterPhase indicates the cluster is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterPhase represents all components are in `Failed` phase, indicates that the cluster is unavailable. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterPhase represents all components are in `Running` phase, indicates that the cluster is functioning properly. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterPhase represents all components are in `Stopped` phase, indicates that the cluster has stopped and +is not providing any functionality. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterPhase represents at least one component is in `Stopping` phase, indicates that the cluster is in +the process of stopping. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterPhase represents all components are in `Creating`, `Running` or `Updating` phase, and at least one +component is in `Creating` or `Updating` phase, indicates that the cluster is undergoing an update. +

+ +
+

+ClusterService + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterService defines a service that is exposed externally, allowing entities outside the cluster to access it. +For example, external applications, or other Clusters. +And another Cluster managed by the same KubeBlocks operator can resolve the address exposed by a ClusterService +using the `serviceRef` field. +

+ +

+When a Component needs to access another Cluster’s ClusterService using the `serviceRef` field, +it must also define the service type and version information in the `componentDefinition.spec.serviceRefDeclarations` +section. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`componentSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of components, to be used as a selector for the service. +

+ +

+If the `componentSelector` is set as the name of a sharding, the service will be exposed to all components in the sharding. +

+ +
+

+ClusterSharding + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. +A typical design pattern for distributed databases is to distribute data across multiple shards, +with each shard consisting of multiple replicas. +Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components +using a template when shards are added. +When shards are removed, the corresponding Components are also deleted. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the common parent part of all shard names. +

+ +

+This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. +It is used to generate the names of underlying Components following the pattern `$(clusterSharding.name)-$(ShardID)`. +ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. +For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name +would be “my-shard-abc”. +

+ +

+Note that the name defined in Component template(`clusterSharding.template.name`) will be disregarded +when generating the Component names of the shards. The `clusterSharding.name` field takes precedence. +

+ +
+ +`shardingDef`
+ +string + + +
+ +(Optional) + +

+Specifies the ShardingDefinition custom resource (CR) that defines the sharding’s characteristics and behavior. +

+ +

+The full name or regular expression is supported to match the ShardingDefinition. +

+ +
+ +`template`
+ + +ClusterComponentSpec + + + +
+ + +

+The template for generating Components for shards, where each shard consists of one Component. +

+ +

+This field is of type ClusterComponentSpec, which encapsulates all the required details and +definitions for creating and managing the Components. +KubeBlocks uses this template to generate a set of identical Components of shards. +All the generated Components will have the same specifications and definitions as specified in the `template` field. +

+ +

+This allows for the creation of multiple Components with consistent configurations, +enabling sharding and distribution of workloads across Components. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards. +

+ +

+Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including: +

+
    +
  • +Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned. +
  • +
  • +Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted. +
  • +
+ +
+

+ClusterSpec + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterSpec defines the desired state of Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterDef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Delete`: Deletes all runtime resources belong to the Cluster. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`shardings`
+ + +[]ClusterSharding + + + +
+ +(Optional) + +

+Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ClusterSharding. +

+ +

+Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+

+ClusterStatus + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterStatus defines the observed state of the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the Cluster object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ClusterPhase + + + +
+ +(Optional) + +

+The current phase of the Cluster includes: +`Creating`, `Running`, `Updating`, `Stopping`, `Stopped`, `Deleting`, `Failed`, `Abnormal`. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all Components within the Cluster. +

+ +
+ +`shardings`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all shardings within the Cluster. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Cluster object. +Each condition in the list provides real-time information about certain aspect of the Cluster object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Cluster. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ClusterTopology + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterTopology represents the definition for a specific cluster topology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the unique identifier for the cluster topology. +Cannot be updated. +

+ +
+ +`components`
+ + +[]ClusterTopologyComponent + + + +
+ +(Optional) + +

+Components specifies the components in the topology. +

+ +
+ +`shardings`
+ + +[]ClusterTopologySharding + + + +
+ +(Optional) + +

+Shardings specifies the shardings in the topology. +

+ +
+ +`orders`
+ + +ClusterTopologyOrders + + + +
+ +(Optional) + +

+Specifies the sequence in which components within a cluster topology are +started, stopped, and upgraded. +This ordering is crucial for maintaining the correct dependencies and operational flow across components. +

+ +
+ +`default`
+ +bool + + +
+ +(Optional) + +

+Default indicates whether this topology serves as the default configuration. +When set to true, this topology is automatically used unless another is explicitly specified. +

+ +
+

+ClusterTopologyComponent + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyComponent defines a Component within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the component within the cluster topology. +

+ +

+It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+If the @template field is set to true, the name will be used as a prefix to match the specific components dynamically created. +

+ +

+Cannot be updated once set. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+The system selects the ComponentDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ComponentDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ComponentDefinition CR +by specifying a name prefix or regular expression pattern. +
  4. +
+ +

+Cannot be updated once set. +

+ +
+ +`template`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the topology component will be considered as a template for instantiating components upon user requests dynamically. +

+ +

+Cannot be updated once set. +

+ +
+

+ClusterTopologyOrders + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyOrders manages the lifecycle of components within a cluster by defining their provisioning, +terminating, and updating sequences. +It organizes components into stages or groups, where each group indicates a set of components +that can be managed concurrently. +These groups are processed sequentially, allowing precise control based on component dependencies and requirements. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provision`
+ +[]string + + +
+ +(Optional) + +

+Specifies the order for creating and initializing entities. +This is designed for entities that depend on one another. Entities without dependencies can be grouped together. +

+ +

+Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`terminate`
+ +[]string + + +
+ +(Optional) + +

+Outlines the order for stopping and deleting entities. +This sequence is designed for entities that require a graceful shutdown or have interdependencies. +

+ +

+Entities that can be terminated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`update`
+ +[]string + + +
+ +(Optional) + +

+Update determines the order for updating entities’ specifications, such as image upgrades or resource scaling. +This sequence is designed for entities that have dependencies or require specific update procedures. +

+ +

+Entities that can be updated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+

+ClusterTopologySharding + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologySharding defines a sharding within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the sharding within the cluster topology. +It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+Cannot be updated once set. +

+ +
+ +`shardingDef`
+ +string + + +
+ + +

+Specifies the sharding definition that defines the characteristics and behavior of the sharding. +

+ +

+The system selects the ShardingDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ShardingDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ShardingDefinition CR +by specifying a regular expression pattern. +
  4. +
+ +

+Once set, this field cannot be updated. +

+ +
+

+ClusterVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ClusterVarSelector selects a var from a Cluster. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterVars`
+ + +ClusterVars + + + +
+ + +

+ +(Members of `ClusterVars` are embedded into this type.) + +

+ +
+

+ClusterVars + +

+ +

+ +(Appears on:ClusterVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`namespace`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the namespace of the Cluster object. +

+ +
+ +`clusterName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Cluster object. +

+ +
+ +`clusterUID`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the UID of the Cluster object. +

+ +
+

+ComponentAvailable + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentAvailable defines the strategies for determining whether the component is available. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withPhases`
+ +string + + +
+ +(Optional) + +

+Specifies the phases that the component will go through to be considered available. +

+ +

+This field is immutable once set. +

+ +
+ +`withProbe`
+ + +ComponentAvailableWithProbe + + + +
+ +(Optional) + +

+Specifies the strategies for determining whether the component is available based on the available probe. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableCondition + +

+ +

+ +(Appears on:ComponentAvailableWithProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentAvailableExpression`
+ + +ComponentAvailableExpression + + + +
+ + +

+ +(Members of `ComponentAvailableExpression` are embedded into this type.) + +

+ +
+ +`and`
+ + +[]ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical And to combine multiple expressions. +

+ +

+This field is immutable once set. +

+ +
+ +`or`
+ + +[]ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical Or to combine multiple expressions. +

+ +

+This field is immutable once set. +

+ +
+ +`not`
+ + +ComponentAvailableExpression + + + +
+ +(Optional) + +

+Logical Not to negate the expression. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableExpression + +

+ +

+ +(Appears on:ComponentAvailableCondition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`all`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+All replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`any`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+At least one replica must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`none`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+None of the replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+ +`majority`
+ + +ComponentAvailableProbeAssertion + + + +
+ +(Optional) + +

+Majority replicas must satisfy the assertion. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableProbeAssertion + +

+ +

+ +(Appears on:ComponentAvailableExpression) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ActionAssertion`
+ + +ActionAssertion + + + +
+ + +

+ +(Members of `ActionAssertion` are embedded into this type.) + +

+ +
+ +`and`
+ + +[]ActionAssertion + + + +
+ +(Optional) + +

+Logical And to combine multiple assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`or`
+ + +[]ActionAssertion + + + +
+ +(Optional) + +

+Logical Or to combine multiple assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`not`
+ + +ActionAssertion + + + +
+ +(Optional) + +

+Logical Not to negate the assertions. +

+ +

+This field is immutable once set. +

+ +
+ +`strict`
+ +bool + + +
+ +(Optional) + +

+Specifies whether apply the assertions strictly to all replicas. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentAvailableWithProbe + +

+ +

+ +(Appears on:ComponentAvailable) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`timeWindowSeconds`
+ +int32 + + +
+ +(Optional) + +

+This field is immutable once set. +

+ +
+ +`condition`
+ + +ComponentAvailableCondition + + + +
+ +(Optional) + +

+Specifies the conditions that the component will go through to be considered available. +

+ +

+This field is immutable once set. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+A brief description for the condition when the component is available. +

+ +
+

+ComponentDefinitionSpec + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the config file templates and volume mount parameters used by the Component. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ config files. +Each template is represented as a ConfigMap and may contain multiple config files, with each file being a key in the ConfigMap. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +TLS + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`available`
+ + +ComponentAvailable + + + +
+ +(Optional) + +

+Specifies the strategies for determining the available status of the Component. +

+ +

+This field is immutable. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero or one role. +KubeBlocks operator determines the role of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns the role for each replica, and the returned role must be predefined here. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+To prevent privilege escalation, only permissions already owned by KubeBlocks can be added here. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+

+ComponentDefinitionStatus + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+ +

+ComponentDefinitionStatus defines the observed state of ComponentDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ComponentDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ComponentDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ComponentDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ComponentFileTemplate + +

+ +

+ +(Appears on:ComponentDefinitionSpec, SidecarDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the template. +

+ +
+ +`template`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced template ConfigMap object. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+Refers to the volume name of PodTemplate. The file produced through the template will be mounted to +the corresponding volume. Must be a DNS_LABEL name. +The volume name must be defined in podSpec.containers[*].volumeMounts. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The operator attempts to set default file permissions (0444). +

+ +

+Must be specified as an octal value between 0000 and 0777 (inclusive), +or as a decimal value between 0 and 511 (inclusive). +YAML supports both octal and decimal values for file permissions. +

+ +

+Please note that this setting only affects the permissions of the files themselves. +Directories within the specified path are not impacted by this setting. +It’s important to be aware that this setting might conflict with other options +that influence the file mode, such as fsGroup. +In such cases, the resulting file mode may have additional bits set. +Refers to documents of k8s.ConfigMapVolumeSource.defaultMode for more information. +

+ +
+ +`externalManaged`
+ +bool + + +
+ +(Optional) + +

+ExternalManaged indicates whether the configuration is managed by an external system. +When set to true, the controller will ignore the management of this configuration. +

+ +
+ +`restartOnFileChange`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to restart the pod when the file changes. +

+ +
+

+ComponentLifecycleActions + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentLifecycleActions defines a collection of Actions for customizing the behavior of a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a component’s creation. +

+ +

+By setting `postProvision.customHandler.preCondition`, you can determine the specific lifecycle stage +at which the action should trigger: `Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +with `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a component. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a scale-down operation for the Component is initiated. +The actual termination and cleanup of the Component and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`roleProbe`
+ + +Probe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the role of replicas. +

+ +

+This action is periodically triggered at the specified interval to determine the role of each replica. +Upon successful execution, the action’s output designates the role of the replica, +which should match one of the predefined role names within `componentDefinition.spec.roles`. +The output is then compared with the previous successful execution result. +If a role change is detected, an event is generated to inform the controller, +which initiates an update of the replica’s role. +

+ +

+Defining a RoleProbe Action for a Component is required if roles are defined for the Component. +It ensures replicas are correctly labeled with their respective roles. +Without this, services that rely on roleSelectors might improperly direct traffic to wrong replicas. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the Pod whose role is being assessed. +
  • +
+ +

+Expected output of this action: +- On Success: The determined role of the replica, which must align with one of the roles specified + in the component definition. +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`availableProbe`
+ + +Probe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the availability of the component. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`switchover`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of a role to a new replica. +This approach aims to minimize downtime and maintain availability +during events such as planned maintenance or when performing stop, shutdown, restart, or upgrade operations. +In a typical consensus system, this action is used to transfer leader role to another replica. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_SWITCHOVER_CANDIDATE_NAME: The name of the pod of the new role’s candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CANDIDATE_FQDN: The FQDN of the pod of the new role’s candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CURRENT_NAME: The name of the pod of the current role. +
  • +
  • +KB_SWITCHOVER_CURRENT_FQDN: The FQDN of the pod of the current role. +
  • +
  • +KB_SWITCHOVER_ROLE: The role that will be transferred to another replica. +This variable can be empty if, for example, role probe does not succeed. +It depends on the addon implementation what to do under such cases. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberJoin`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to add a new replica to the replication group. +

+ +

+This action is initiated after a replica pod becomes ready. +

+ +

+The role of the replica (e.g., primary, secondary) will be determined and assigned as part of the action command +implementation, or automatically by the database kernel or a sidecar utility like Patroni that implements +a consensus algorithm. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_JOIN_MEMBER_POD_FQDN: The pod FQDN of the replica being added to the group. +
  • +
  • +KB_JOIN_MEMBER_POD_NAME: The pod name of the replica being added to the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message detailing the reason for any failure encountered +during the addition of the new member. +

+ +

+For example, to add a new OBServer to an OceanBase Cluster in ‘zone1’, the following command may be used: +

+
+
+command:
+- bash
+- -c
+- |
+   CLIENT="mysql -u $SERVICE_USER -p$SERVICE_PASSWORD -P $SERVICE_PORT -h $SERVICE_HOST -e"
+	  $CLIENT "ALTER SYSTEM ADD SERVER '$KB_POD_FQDN:$SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberLeave`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to remove a replica from the replication group. +

+ +

+This action is initiated before remove a replica from the group. +The operator will wait for MemberLeave to complete successfully before releasing the replica and cleaning up +related Kubernetes resources. +

+ +

+The process typically includes updating configurations and informing other group members about the removal. +Data migration is generally not part of this action and should be handled separately if needed. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_LEAVE_MEMBER_POD_FQDN: The pod name of the replica being removed from the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_NAME: The pod name of the replica being removed from the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+For example, to remove an OBServer from an OceanBase Cluster in ‘zone1’, the following command can be executed: +

+
+
+command:
+- bash
+- -c
+- |
+   CLIENT="mysql -u $SERVICE_USER -p$SERVICE_PASSWORD -P $SERVICE_PORT -h $SERVICE_HOST -e"
+	  $CLIENT "ALTER SYSTEM DELETE SERVER '$KB_POD_FQDN:$SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readonly`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to switch a replica into the read-only state. +

+ +

+Use Case: +This action is invoked when the database’s volume capacity nears its upper limit and space is about to be exhausted. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readwrite`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to transition a replica from the read-only state back to the read-write state. +

+ +

+Use Case: +This action is used to bring back a replica that was previously in a read-only state, +which restricted write operations, to its normal operational state where it can handle +both read and write operations. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataDump`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for exporting the data from a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Applicability: +Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+The output should be a valid data dump streamed to stdout. It must exclude any irrelevant information to ensure +that only the necessary data is exported for import into the new replica. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_TARGET_POD_NAME: The name of the replica pod into which the data will be loaded. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataLoad`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for importing data into a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+Data should be received through stdin. If any error occurs during the process, +the action must be able to guarantee idempotence to allow for retries from the beginning. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure that update a replica with new configuration. +

+ +

+Note: This field is immutable once it has been set. +

+ +

+This Action is reserved for future versions. +

+ +
+ +`accountProvision`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure to generate a new database account. +

+ +

+Use Case: +This action is designed to create system accounts that are utilized for replication, monitoring, backup, +and other administrative tasks. +

+ +

+The container executing this action has access to following variables: +

+
    +
  • +KB_ACCOUNT_NAME: The name of the system account to be manipulated. +
  • +
  • +KB_ACCOUNT_PASSWORD: The password for the system account. +
  • +
  • +KB_ACCOUNT_STATEMENT: The statement used to manipulate the system account. +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ComponentPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+ +

+ComponentPhase defines the phase of the Component within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +

+CreatingComponentPhase indicates the component is currently being created. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingComponentPhase indicates the component is currently being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedComponentPhase indicates that there are some pods of the component not in a ‘Running’ state. +

+ +
+ +

+"Running" +

+
+ +

+RunningComponentPhase indicates that all pods of the component are up-to-date and in a ‘Running’ state. +

+ +
+ +

+"Starting" +

+
+ +

+StartingComponentPhase indicates the component is currently being started. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedComponentPhase indicates the component is stopped. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingComponentPhase indicates the component is currently being stopped. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingComponentPhase indicates the component is currently being updated. +

+ +
+

+ComponentService + +

+ +

+ +(Appears on:ComponentDefinitionSpec, ComponentSpec) + +

+
+ +

+ComponentService defines a service that would be exposed as an inter-component service within a Cluster. +A Service defined in the ComponentService is expected to be accessed by other Components within the same Cluster. +

+ +

+When a Component needs to use a ComponentService provided by another Component within the same Cluster, +it can declare a variable in the `componentDefinition.spec.vars` section and bind it to the specific exposed address +of the ComponentService using the `serviceVarRef` field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to create a corresponding Service for each Pod of the selected Component. +When set to true, a set of Services will be automatically generated for each Pod, +and the `roleSelector` field will be ignored. +

+ +

+The names of the generated Services will follow the same suffix naming pattern: `$(serviceName)-$(podOrdinal)`. +The total number of generated Services will be equal to the number of replicas specified for the Component. +

+ +

+Example usage: +

+
+
+name: my-service
+serviceName: my-service
+podService: true
+disableAutoProvision: true
+spec:
+  type: NodePort
+  ports:
+  - name: http
+    port: 80
+    targetPort: 8080
+
+
+ +

+In this example, if the Component has 3 replicas, three Services will be generated: +- my-service-0: Points to the first Pod (podOrdinal: 0) +- my-service-1: Points to the second Pod (podOrdinal: 1) +- my-service-2: Points to the third Pod (podOrdinal: 2) +

+ +

+Each generated Service will have the specified spec configuration and will target its respective Pod. +

+ +

+This feature is useful when you need to expose each Pod of a Component individually, allowing external access +to specific instances of the Component. +

+ +
+ +`disableAutoProvision`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the automatic provisioning of the service should be disabled. +

+ +

+If set to true, the service will not be automatically created at the component provisioning. +Instead, you can enable the creation of this service by specifying it explicitly in the cluster API. +

+ +
+

+ComponentSpec + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentSpec defines the desired state of Component +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ +(Optional) + +

+Specifies the behavior when a Component is deleted. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+If not specified, KubeBlocks automatically creates a default ServiceAccount named +“kb-{componentdefinition.name}”, bound to a role with rules defined in ComponentDefinition’s +`policyRules` field. If needed (currently this means if any lifecycleAction is enabled), +it will also be bound to a default role named +“kubeblocks-cluster-pod-role”, which is installed together with KubeBlocks. +If multiple components use the same ComponentDefinition, they will share one ServiceAccount. +

+ +

+If the field is not empty, the specified ServiceAccount will be used, and KubeBlocks will not +create a ServiceAccount. But KubeBlocks does create RoleBindings for the specified ServiceAccount. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +`sidecars`
+ + +[]Sidecar + + + +
+ +(Optional) + +

+Specifies the sidecars to be injected into the Component. +

+ +
+

+ComponentStatus + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentStatus represents the observed state of a Component within the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Specifies the most recent generation observed for this Component object. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Component object. +Each condition in the list provides real-time information about certain aspect of the Component object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Component. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+ +`phase`
+ + +ComponentPhase + + + +
+ + +

+Indicates the current phase of the Component, with each phase indicating specific conditions: +

+
    +
  • +Creating: The initial phase for new Components, transitioning from ‘empty’(“”). +
  • +
  • +Running: All Pods are up-to-date and in a Running state. +
  • +
  • +Updating: The Component is currently being updated, with no failed Pods present. +
  • +
  • +Failed: A significant number of Pods have failed. +
  • +
  • +Stopping: All Pods are being terminated, with current replica count at zero. +
  • +
  • +Stopped: All associated Pods have been successfully deleted. +
  • +
  • +Starting: Pods are being started. +
  • +
  • +Deleting: The Component is being deleted. +
  • +
+ +
+ +`message`
+ +map[string]string + + +
+ +(Optional) + +

+A map that stores detailed message about the Component. +Each entry in the map provides insights into specific elements of the Component, such as Pods or workloads. +

+ +

+Keys in this map are formatted as `ObjectKind/Name`, where `ObjectKind` could be a type like Pod, +and `Name` is the specific name of the object. +

+ +
+

+ComponentSystemAccount + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account. +

+ +
+ +`disabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the system account is disabled. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+For user-specified passwords, the maximum length is limited to 64 bytes. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ComponentVarSelector selects a var from a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ComponentVars`
+ + +ComponentVars + + + +
+ + +

+ +(Members of `ComponentVars` are embedded into this type.) + +

+ +
+

+ComponentVars + +

+ +

+ +(Appears on:ComponentVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Component object. +

+ +
+ +`shortName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the short name of the Component object. +

+ +
+ +`replicas`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the replicas of the component. +

+ +
+ +`podNames`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod name list of the component. +and the value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+ +`podNamesForRole`
+ + +RoledVar + + + +
+ +(Optional) + +

+Reference to the pod name list of the component that have a specific role. +The value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNsForRole`
+ + +RoledVar + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component that have a specific role. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+

+ComponentVersionCompatibilityRule + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionCompatibilityRule defines the compatibility between a set of component definitions and a set of releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this ComponentVersion. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`releases`
+ +[]string + + +
+ + +

+Releases is a list of identifiers for the releases. +

+ +
+

+ComponentVersionRelease + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionRelease represents a release of component instances within a ComponentVersion. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is a unique identifier for this release. +Cannot be updated. +

+ +
+ +`changes`
+ +string + + +
+ +(Optional) + +

+Changes provides information about the changes made in this release. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+ServiceVersion defines the version of the well-known service that the component provides. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If the release is used, it will serve as the service version for component instances, overriding the one defined in the component definition. +Cannot be updated. +

+ +
+ +`images`
+ +map[string]string + + +
+ + +

+Images define the new images for containers, actions or external applications within the release. +

+ +

+If an image is specified for a lifecycle action, the key should be the field name (case-insensitive) of +the action in the LifecycleActions struct. +

+ +
+

+ComponentVersionSpec + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionSpec defines the desired state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+

+ComponentVersionStatus + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionStatus defines the observed state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this ComponentVersion. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase valid values are ``,`Available`, 'Unavailable`. +Available is ComponentVersion become available, and can be used for co-related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Extra message for current phase. +

+ +
+ +`serviceVersions`
+ +string + + +
+ +(Optional) + +

+ServiceVersions represent the supported service versions of this ComponentVersion. +

+ +
+

+ComponentVolume + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the volume. +It must be a DNS_LABEL and unique within the pod. +More info can be found at: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +Note: This field cannot be updated. +

+ +
+ +`needSnapshot`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the creation of a snapshot of this volume is necessary when performing a backup of the Component. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Sets the critical threshold for volume space utilization as a percentage (0-100). +

+ +

+Exceeding this percentage triggers the system to switch the volume to read-only mode as specified in +`componentDefinition.spec.lifecycleActions.readOnly`. +This precaution helps prevent space depletion while maintaining read-only access. +If the space utilization later falls below this threshold, the system reverts the volume to read-write mode +as defined in `componentDefinition.spec.lifecycleActions.readWrite`, restoring full functionality. +

+ +

+Note: This field cannot be updated. +

+ +
+

+ConnectionCredentialAuth + +

+ +

+ +(Appears on:ServiceDescriptorSpec) + +

+
+ +

+ConnectionCredentialAuth specifies the authentication credentials required for accessing an external service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the username for the external service. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the password for the external service. +

+ +
+

+ContainerVars + +

+ +

+ +(Appears on:HostNetworkVars) + +

+
+ +

+ContainerVars defines the vars that can be referenced from a Container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the container. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Container port to reference. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:ConnectionCredentialAuth, ServiceDescriptorSpec) + +

+
+ +

+CredentialVar represents a variable that retrieves its value either directly from a specified expression +or from a source defined in `valueFrom`. +Only one of these options may be used at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Holds a direct string or an expression that can be evaluated to a string. +

+ +

+It can include variables denoted by $(VAR_NAME). +These variables are expanded to the value of the environment variables defined in the container. +If a variable cannot be resolved, it remains unchanged in the output. +

+ +

+To escape variable expansion and retain the literal value, use double $ characters. +

+ +

+For example: +

+
    +
  • +”$(VAR_NAME)” will be expanded to the value of the environment variable VAR_NAME. +
  • +
  • +”$$(VAR_NAME)” will result in “$(VAR_NAME)” in the output, without any variable expansion. +
  • +
+ +

+Default value is an empty string. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Specifies the source for the variable’s value. +

+ +
+

+CredentialVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+CredentialVarSelector selects a var from a Credential (SystemAccount). +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Credential (SystemAccount) to select from. +

+ +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+CredentialVars + +

+ +

+ +(Appears on:CredentialVarSelector, ServiceRefVars) + +

+
+ +

+CredentialVars defines the vars that can be referenced from a Credential (SystemAccount). +!!!!! CredentialVars will only be used as environment variables for Pods & Actions, and will not be used to render the templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`password`
+ + +VarOption + + + +
+ +(Optional) + +
+

+EnvVar + +

+ +

+ +(Appears on:ComponentDefinitionSpec, SidecarDefinitionSpec) + +

+
+ +

+EnvVar represents a variable present in the env of Pod/Action or the template of config/script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the variable. Must be a C_IDENTIFIER. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Variable references `$(VAR_NAME)` are expanded using the previously defined variables in the current context. +

+ +

+If a variable cannot be resolved, the reference in the input string will be unchanged. +Double `$$` are reduced to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. +

+
    +
  • +`$$(VAR_NAME)` will produce the string literal `$(VAR_NAME)`. +
  • +
+ +

+Escaped references will never be expanded, regardless of whether the variable exists or not. +Defaults to “”. +

+ +
+ +`valueFrom`
+ + +VarSource + + + +
+ +(Optional) + +

+Source for the variable’s value. Cannot be used if value is not empty. +

+ +
+ +`expression`
+ +string + + +
+ +(Optional) + +

+A Go template expression that will be applied to the resolved value of the var. +

+ +

+The expression will only be evaluated if the var is successfully resolved to a non-credential value. +

+ +

+The resolved value can be accessed by its name within the expression, system vars and other user-defined +non-credential vars can be used within the expression in the same way. +Notice that, when accessing vars by its name, you should replace all the “-” in the name with “_”, because of +that “-” is not a valid identifier in Go. +

+ +

+All expressions are evaluated in the order the vars are defined. If a var depends on any vars that also +have expressions defined, be careful about the evaluation order as it may use intermediate values. +

+ +

+The result of evaluation will be used as the final value of the var. If the expression fails to evaluate, +the resolving of var will also be considered failed. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+ExecAction describes an Action that executes a command inside a container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the container image to be used for running the Action. +

+ +

+When specified, a dedicated container will be created using this image to execute the Action. +All actions with same image will share the same container. +

+ +

+This field cannot be updated. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Represents a list of environment variables that will be injected into the container. +These variables enable the container to adapt its behavior based on the environment it’s running in. +

+ +

+This field cannot be updated. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed inside the container. +The working directory for this command is the container’s root directory(‘/’). +Commands are executed directly without a shell environment, meaning shell-specific syntax (‘|’, etc.) is not supported. +If the shell is required, it must be explicitly invoked in the command. +

+ +

+A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Args represents the arguments that are passed to the `command` for execution. +

+ +
+ +`targetPodSelector`
+ + +TargetPodSelector + + + +
+ +(Optional) + +

+Defines the criteria used to select the target Pod(s) for executing the Action. +This is useful when there is no default target replica identified. +It allows for precise control over which Pod(s) the Action should run in. +

+ +

+If not specified, the Action will be executed in the pod where the Action is triggered, such as the pod +to be removed or added; or a random pod if the Action is triggered at the component level, such as +post-provision or pre-terminate of the component. +

+ +

+This field cannot be updated. +

+ +
+ +`matchingKey`
+ +string + + +
+ +(Optional) + +

+Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. +The impact of this field depends on the `targetPodSelector` value: +

+
    +
  • +When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. +
  • +
  • +When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` +will be selected for the Action. +
  • +
+ +

+This field cannot be updated. +

+ +
+ +`container`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the container within the same pod whose resources will be shared with the action. +This allows the action to utilize the specified container’s resources without executing within it. +

+ +

+The name must match one of the containers defined in `componentDefinition.spec.runtime`. +

+ +

+The resources that can be shared are included: +

+
    +
  • +volume mounts +
  • +
+ +

+This field cannot be updated. +

+ +
+

+Exporter + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the built-in metrics exporter container. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+Specifies the http/https url path to scrape for metrics. +If empty, Prometheus uses the default value (e.g. `/metrics`). +

+ +
+ +`scrapePort`
+ +string + + +
+ +(Optional) + +

+Specifies the port name to scrape for metrics. +

+ +
+ +`scrapeScheme`
+ + +PrometheusScheme + + + +
+ +(Optional) + +

+Specifies the schema to use for scraping. +`http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. +If empty, Prometheus uses the default value `http`. +

+ +
+

+HostNetwork + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerPorts`
+ + +[]HostNetworkContainerPort + + + +
+ +(Optional) + +

+The list of container ports that are required by the component. +

+ +
+

+HostNetworkContainerPort + +

+ +

+ +(Appears on:HostNetwork) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ + +

+Container specifies the target container within the Pod. +

+ +
+ +`ports`
+ +[]string + + +
+ + +

+Ports are named container ports within the specified container. +These container ports must be defined in the container for proper port allocation. +

+ +
+

+HostNetworkVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+HostNetworkVarSelector selects a var from host-network resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The component to select from. +

+ +
+ +`HostNetworkVars`
+ + +HostNetworkVars + + + +
+ + +

+ +(Members of `HostNetworkVars` are embedded into this type.) + +

+ +
+

+HostNetworkVars + +

+ +

+ +(Appears on:HostNetworkVarSelector) + +

+
+ +

+HostNetworkVars defines the vars that can be referenced from host-network resources. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ + +ContainerVars + + + +
+ +(Optional) + +
+

+InstanceTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the instance. +If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or ClusterComponentSpec. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+InstanceUpdateStrategy defines fine-grained control over the spec update process of all instances. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +InstanceUpdateStrategyType + + + +
+ +(Optional) + +

+Indicates the type of the update strategy. +Default is RollingUpdate. +

+ +
+ +`rollingUpdate`
+ + +RollingUpdate + + + +
+ +(Optional) + +

+Specifies how the rolling update should be applied. +

+ +
+

+InstanceUpdateStrategyType +(`string` alias) +

+ +

+ +(Appears on:InstanceUpdateStrategy) + +

+
+ +

+InstanceUpdateStrategyType is a string enumeration type that enumerates +all possible update strategies for the KubeBlocks controllers. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"OnDelete" +

+
+ +

+OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated +when they are manually deleted. +

+ +
+ +

+"RollingUpdate" +

+
+ +

+RollingUpdateStrategyType indicates that update will be +applied to all Instances with respect to the workload +ordering constraints. +

+ +
+

+Issuer + +

+ +

+ +(Appears on:ClusterComponentSpec, TLSConfig) + +

+
+ +

+Issuer defines the TLS certificates issuer for the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +IssuerName + + + +
+ + +

+The issuer for TLS certificates. +It only allows two enum values: `KubeBlocks` and `UserProvided`. +

+
    +
  • +`KubeBlocks` indicates that the self-signed TLS certificates generated by the KubeBlocks Operator will be used. +
  • +
  • +`UserProvided` means that the user is responsible for providing their own CA, Cert, and Key. +In this case, the user-provided CA certificate, server certificate, and private key will be used +for TLS communication. +
  • +
+ +
+ +`secretRef`
+ + +TLSSecretRef + + + +
+ +(Optional) + +

+SecretRef is the reference to the secret that contains user-provided certificates. +It is required when the issuer is set to `UserProvided`. +

+ +
+

+IssuerName +(`string` alias) +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+IssuerName defines the name of the TLS certificates issuer. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeBlocks" +

+
+ +

+IssuerKubeBlocks represents certificates that are signed by the KubeBlocks Operator. +

+ +
+ +

+"UserProvided" +

+
+ +

+IssuerUserProvided indicates that the user has provided their own CA-signed certificates. +

+ +
+

+LetterCase +(`string` alias) +

+ +

+ +(Appears on:PasswordConfig) + +

+
+ +

+LetterCase defines the available cases to be used in password generation. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"LowerCases" +

+
+ +

+LowerCases represents the use of lower case letters only. +

+ +
+ +

+"MixedCases" +

+
+ +

+MixedCases represents the use of a mix of both lower and upper case letters. +

+ +
+ +

+"UpperCases" +

+
+ +

+UpperCases represents the use of upper case letters only. +

+ +
+

+LogConfig + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a descriptive label for the log type, such as ‘slow’ for a MySQL slow log file. +It provides a clear identification of the log’s purpose and content. +

+ +
+ +`filePathPattern`
+ +string + + +
+ + +

+Specifies the paths or patterns identifying where the log files are stored. +This field allows the system to locate and manage log files effectively. +

+ +

+Examples: +

+
    +
  • +/home/postgres/pgdata/pgroot/data/log/postgresql-* +
  • +
  • +/data/mysql/log/mysqld-error.log +
  • +
+ +
+

+MultipleClusterObjectCombinedOption + +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectCombinedOption defines options for handling combined variables. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`newVarSuffix`
+ +string + + +
+ +(Optional) + +

+If set, the existing variable will be kept, and a new variable will be defined with the specified suffix +in pattern: $(var.name)_$(suffix). +The new variable will be auto-created and placed behind the existing one. +If not set, the existing variable will be reused with the value format defined below. +

+ +
+ +`valueFormat`
+ + +MultipleClusterObjectValueFormat + + + +
+ +(Optional) + +

+The format of the value that the operator will use to compose values from multiple components. +

+ +
+ +`flattenFormat`
+ + +MultipleClusterObjectValueFormatFlatten + + + +
+ +(Optional) + +

+The flatten format, default is: $(comp-name-1):value,$(comp-name-2):value. +

+ +
+

+MultipleClusterObjectOption + +

+ +

+ +(Appears on:ClusterObjectReference) + +

+
+ +

+MultipleClusterObjectOption defines the options for handling multiple cluster objects matched. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requireAllComponentObjects`
+ +bool + + +
+ +(Optional) + +

+RequireAllComponentObjects controls whether all component objects must exist before resolving. +If set to true, resolving will only proceed if all component objects are present. +

+ +
+ +`strategy`
+ + +MultipleClusterObjectStrategy + + + +
+ + +

+Define the strategy for handling multiple cluster objects. +

+ +
+ +`combinedOption`
+ + +MultipleClusterObjectCombinedOption + + + +
+ +(Optional) + +

+Define the options for handling combined variables. +Valid only when the strategy is set to “combined”. +

+ +
+

+MultipleClusterObjectStrategy +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectStrategy defines the strategy for handling multiple cluster objects. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"combined" +

+
+ +

+MultipleClusterObjectStrategyCombined - the values from all matched components will be combined into a single +variable using the specified option. +

+ +
+ +

+"individual" +

+
+ +

+MultipleClusterObjectStrategyIndividual - each matched component will have its individual variable with its name +as the suffix. +This is required when referencing credential variables that cannot be passed by values. +

+ +
+

+MultipleClusterObjectValueFormat +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormat defines the format details for the value. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Flatten" +

+
+ +
+

+MultipleClusterObjectValueFormatFlatten + +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormatFlatten defines the flatten format for the value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`delimiter`
+ +string + + +
+ + +

+Pair delimiter. +

+ +
+ +`keyValueDelimiter`
+ +string + + +
+ + +

+Key-value delimiter. +

+ +
+

+NamedVar + +

+ +

+ +(Appears on:ContainerVars, ResourceVars, ServiceVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+Ordinals + +

+ +

+ +(Appears on:InstanceTemplate, InstanceSetSpec, InstanceTemplate) + +

+
+ +

+Ordinals represents a combination of continuous segments and individual values. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ranges`
+ + +[]Range + + + +
+ + +
+ +`discrete`
+ +[]int32 + + +
+ + +
+

+PasswordConfig + +

+ +

+ +(Appears on:ComponentSystemAccount, SystemAccount) + +

+
+ +

+PasswordConfig helps provide to customize complexity of password generation pattern. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`length`
+ +int32 + + +
+ +(Optional) + +

+The length of the password. +

+ +
+ +`numDigits`
+ +int32 + + +
+ +(Optional) + +

+The number of digits in the password. +

+ +
+ +`numSymbols`
+ +int32 + + +
+ +(Optional) + +

+The number of symbols in the password. +

+ +
+ +`letterCase`
+ + +LetterCase + + + +
+ +(Optional) + +

+The case of the letters in the password. +

+ +
+ +`seed`
+ +string + + +
+ +(Optional) + +

+Seed to generate the account’s password. +Cannot be updated. +

+ +
+

+PersistentVolumeClaimRetentionPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the VolumeClaimTemplates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`whenDeleted`
+ + +PersistentVolumeClaimRetentionPolicyType + + + +
+ +(Optional) + +

+WhenDeleted specifies what happens to PVCs created from VolumeClaimTemplates when the workload is deleted. +The `Retain` policy causes PVCs to not be affected by workload deletion. +The default policy of `Delete` causes those PVCs to be deleted. +

+ +
+ +`whenScaled`
+ + +PersistentVolumeClaimRetentionPolicyType + + + +
+ +(Optional) + +

+WhenScaled specifies what happens to PVCs created from VolumeClaimTemplates when the workload is scaled down. +The `Retain` policy causes PVCs to not be affected by a scale down. +The default policy of `Delete` causes the associated PVCs for pods scaled down to be deleted. +

+ +
+

+PersistentVolumeClaimRetentionPolicyType +(`string` alias) +

+ +

+ +(Appears on:PersistentVolumeClaimRetentionPolicy) + +

+
+ +

+PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine +when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is +deleted or scaled down. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+DeletePersistentVolumeClaimRetentionPolicyType specifies that PersistentVolumeClaims associated with +VolumeClaimTemplates will be deleted in the scenario specified in PersistentVolumeClaimRetentionPolicy. +

+ +
+ +

+"Retain" +

+
+ +

+RetainPersistentVolumeClaimRetentionPolicyType is the default PersistentVolumeClaimRetentionPolicy +and specifies that PersistentVolumeClaims associated with VolumeClaimTemplates will not be deleted. +

+ +
+

+PersistentVolumeClaimSpec + +

+ +

+ +(Appears on:ClusterComponentVolumeClaimTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +`volumeAttributesClassName`
+ +string + + +
+ +(Optional) + +

+volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. +

+ +

+More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ClusterDefinitionStatus, ComponentDefinitionStatus, ComponentVersionStatus, ServiceDescriptorStatus, ShardingDefinitionStatus, SidecarDefinitionStatus) + +

+
+ +

+Phase represents the status of a CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that a CR is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that a CR is in an unavailable state. +

+ +
+

+PodUpdatePolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+ +

+PodUpdatePolicyType indicates how pods should be updated +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"PreferInPlace" +

+
+ +

+PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +

+ +
+ +

+"StrictInPlace" +

+
+ +

+StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +

+ +
+

+PreConditionType +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+PreConditionType defines the preCondition type of the action execution. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"ClusterReady" +

+
+ +
+ +

+"ComponentReady" +

+
+ +
+ +

+"Immediately" +

+
+ +
+ +

+"RuntimeReady" +

+
+ +
+

+Probe + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Action`
+ + +Action + + + +
+ + +

+ +(Members of `Action` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Minimum value is 1. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1. +

+ +
+

+PrometheusScheme +(`string` alias) +

+ +

+ +(Appears on:Exporter) + +

+
+ +

+PrometheusScheme defines the protocol of prometheus scrape metrics. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"http" +

+
+ +
+ +

+"https" +

+
+ +
+

+ProvisionSecretRef + +

+ +

+ +(Appears on:ComponentSystemAccount) + +

+
+ +

+ProvisionSecretRef represents the reference to a secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The unique identifier of the secret. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+The namespace where the secret is located. +

+ +
+ +`password`
+ +string + + +
+ +(Optional) + +

+The key in the secret data that contains the password. +

+ +
+

+Range + +

+ +

+ +(Appears on:Ordinals) + +

+
+ +

+Range represents a range with a start and an end value. +It is used to define a continuous segment. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`start`
+ +int32 + + +
+ + +
+ +`end`
+ +int32 + + +
+ + +
+

+ReplicaRole + +

+ +

+ +(Appears on:ComponentDefinitionSpec, InstanceSetSpec, MemberStatus) + +

+
+ +

+ReplicaRole represents a role that can be assigned to a component instance, defining its behavior and responsibilities. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the role’s unique identifier. This value is used to set the “apps.kubeblocks.io/role” label +on the corresponding object to identify its role. +

+ +

+For example, common role names include: +- “leader”: The primary/master instance that handles write operations +- “follower”: Secondary/replica instances that replicate data from the leader +- “learner”: Read-only instances that don’t participate in elections +

+ +

+This field is immutable once set. +

+ +
+ +`updatePriority`
+ +int + + +
+ +(Optional) + +

+UpdatePriority determines the order in which pods with different roles are updated. +Pods are sorted by this priority (higher numbers = higher priority) and updated accordingly. +Roles with the highest priority will be updated last. +The default priority is 0. +

+ +

+For example: +- Leader role may have priority 2 (updated last) +- Follower role may have priority 1 (updated before leader) +- Learner role may have priority 0 (updated first) +

+ +

+This field is immutable once set. +

+ +
+ +`participatesInQuorum`
+ +bool + + +
+ +(Optional) + +

+ParticipatesInQuorum indicates if pods with this role are counted when determining quorum. +This affects update strategies that need to maintain quorum for availability. Roles participate +in quorum should have higher update priority than roles do not participate in quorum. +The default value is false. +

+ +

+For example, in a 5-pod component where: +- 2 learner pods (participatesInQuorum=false) +- 2 follower pods (participatesInQuorum=true) +- 1 leader pod (participatesInQuorum=true) +The quorum size would be 3 (based on the 3 participating pods), allowing parallel updates +of 2 learners and 1 follower while maintaining quorum. +

+ +

+This field is immutable once set. +

+ +
+

+ReplicasLimit + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicasLimit defines the valid range of number of replicas supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minReplicas`
+ +int32 + + +
+ + +

+The minimum limit of replicas. +

+ +
+ +`maxReplicas`
+ +int32 + + +
+ + +

+The maximum limit of replicas. +

+ +
+

+ResourceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ResourceVarSelector selects a var from a kind of resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ResourceVars`
+ + +ResourceVars + + + +
+ + +

+ +(Members of `ResourceVars` are embedded into this type.) + +

+ +
+

+ResourceVars + +

+ +

+ +(Appears on:ResourceVarSelector) + +

+
+ +

+ResourceVars defines the vars that can be referenced from resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cpu`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`cpuLimit`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`memory`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`memoryLimit`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`storage`
+ + +NamedVar + + + +
+ +(Optional) + +
+

+RetryPolicy + +

+ +

+ +(Appears on:Action) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`maxRetries`
+ +int + + +
+ +(Optional) + +

+Defines the maximum number of retry attempts that should be made for a given Action. +This value is set to 0 by default, indicating that no retries will be made. +

+ +
+ +`retryInterval`
+ +time.Duration + + +
+ +(Optional) + +

+Indicates the duration of time to wait between each retry attempt. +This value is set to 0 by default, indicating that there will be no delay between retry attempts. +

+ +
+

+RoledVar + +

+ +

+ +(Appears on:ComponentVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+RollingUpdate + +

+ +

+ +(Appears on:InstanceUpdateStrategy) + +

+
+ +

+RollingUpdate specifies how the rolling update should be applied. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Indicates the number of instances that should be updated during a rolling update. +The remaining instances will remain untouched. This is helpful in defining how many instances +should participate in the update process. +Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%). +Absolute number is calculated from percentage by rounding up. +The default value is ComponentSpec.Replicas (i.e., update all instances). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of instances that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all instances. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate, InstanceTemplate) + +

+
+ +

+SchedulingPolicy defines the scheduling policy for instances. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+

+Service + +

+ +

+ +(Appears on:ClusterService, ComponentService) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the name of the service. +otherwise, it indicates the name of the service. +Others can refer to this service by its name. (e.g., connection credential) +Cannot be updated. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+ServiceName defines the name of the underlying service object. +If not specified, the default service name with different patterns will be used: +

+
    +
  • +CLUSTER_NAME: for cluster-level services +
  • +
  • +CLUSTER_NAME-COMPONENT_NAME: for component-level services +
  • +
+ +

+Only one default service name is allowed. +Cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`spec`
+ + +Kubernetes core/v1.ServiceSpec + + + +
+ +(Optional) + +

+Spec defines the behavior of a service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+Route service traffic to pods with label keys and values matching this +selector. If empty or not present, the service is assumed to have an +external process managing its endpoints, which Kubernetes will not +modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. +Ignored if type is ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/ +

+ +
+ +`clusterIP`
+ +string + + +
+ +(Optional) + +

+clusterIP is the IP address of the service and is usually assigned +randomly. If an address is specified manually, is in-range (as per +system configuration), and is not in use, it will be allocated to the +service; otherwise creation of the service will fail. This field may not +be changed through updates unless the type field is also being changed +to ExternalName (which requires this field to be blank) or the type +field is being changed from ExternalName (in which case this field may +optionally be specified, as describe above). Valid values are “None”, +empty string (“”), or a valid IP address. Setting this to “None” makes a +“headless service” (no virtual IP), which is useful when direct endpoint +connections are preferred and proxying is not required. Only applies to +types ClusterIP, NodePort, and LoadBalancer. If this field is specified +when creating a Service of type ExternalName, creation will fail. This +field will be wiped when updating a Service to type ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`clusterIPs`
+ +[]string + + +
+ +(Optional) + +

+ClusterIPs is a list of IP addresses assigned to this service, and are +usually assigned randomly. If an address is specified manually, is +in-range (as per system configuration), and is not in use, it will be +allocated to the service; otherwise creation of the service will fail. +This field may not be changed through updates unless the type field is +also being changed to ExternalName (which requires this field to be +empty) or the type field is being changed from ExternalName (in which +case this field may optionally be specified, as describe above). Valid +values are “None”, empty string (“”), or a valid IP address. Setting +this to “None” makes a “headless service” (no virtual IP), which is +useful when direct endpoint connections are preferred and proxying is +not required. Only applies to types ClusterIP, NodePort, and +LoadBalancer. If this field is specified when creating a Service of type +ExternalName, creation will fail. This field will be wiped when updating +a Service to type ExternalName. If this field is not specified, it will +be initialized from the clusterIP field. If this field is specified, +clients must ensure that clusterIPs[0] and clusterIP have the same +value. +

+ +

+This field may hold a maximum of two entries (dual-stack IPs, in either order). +These IPs must correspond to the values of the ipFamilies field. Both +clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`type`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+type determines how the Service is exposed. Defaults to ClusterIP. Valid +options are ExternalName, ClusterIP, NodePort, and LoadBalancer. +“ClusterIP” allocates a cluster-internal IP address for load-balancing +to endpoints. Endpoints are determined by the selector or if that is not +specified, by manual construction of an Endpoints object or +EndpointSlice objects. If clusterIP is “None”, no virtual IP is +allocated and the endpoints are published as a set of endpoints rather +than a virtual IP. +“NodePort” builds on ClusterIP and allocates a port on every node which +routes to the same endpoints as the clusterIP. +“LoadBalancer” builds on NodePort and creates an external load-balancer +(if supported in the current cloud) which routes to the same endpoints +as the clusterIP. +“ExternalName” aliases this service to the specified externalName. +Several other fields do not apply to ExternalName services. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +

+ +
+ +`externalIPs`
+ +[]string + + +
+ +(Optional) + +

+externalIPs is a list of IP addresses for which nodes in the cluster +will also accept traffic for this service. These IPs are not managed by +Kubernetes. The user is responsible for ensuring that traffic arrives +at a node with this IP. A common example is external load-balancers +that are not part of the Kubernetes system. +

+ +
+ +`sessionAffinity`
+ + +Kubernetes core/v1.ServiceAffinity + + + +
+ +(Optional) + +

+Supports “ClientIP” and “None”. Used to maintain session affinity. +Enable client IP based session affinity. +Must be ClientIP or None. +Defaults to None. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`loadBalancerIP`
+ +string + + +
+ +(Optional) + +

+Only applies to Service Type: LoadBalancer. +This feature depends on whether the underlying cloud-provider supports specifying +the loadBalancerIP when a load balancer is created. +This field will be ignored if the cloud-provider does not support the feature. +Deprecated: This field was under-specified and its meaning varies across implementations. +Using it is non-portable and it may not support dual-stack. +Users are encouraged to use implementation-specific annotations when available. +

+ +
+ +`loadBalancerSourceRanges`
+ +[]string + + +
+ +(Optional) + +

+If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature.” +More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ +

+ +
+ +`externalName`
+ +string + + +
+ +(Optional) + +

+externalName is the external reference that discovery mechanisms will +return as an alias for this service (e.g. a DNS CNAME record). No +proxying will be involved. Must be a lowercase RFC-1123 hostname +(https://tools.ietf.org/html/rfc1123) and requires `type` to be “ExternalName”. +

+ +
+ +`externalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceExternalTrafficPolicy + + + +
+ +(Optional) + +

+externalTrafficPolicy describes how nodes distribute service traffic they +receive on one of the Service’s “externally-facing” addresses (NodePorts, +ExternalIPs, and LoadBalancer IPs). If set to “Local”, the proxy will configure +the service in a way that assumes that external load balancers will take care +of balancing the service traffic between nodes, and so each node will deliver +traffic only to the node-local endpoints of the service, without masquerading +the client source IP. (Traffic mistakenly sent to a node with no endpoints will +be dropped.) The default value, “Cluster”, uses the standard behavior of +routing to all endpoints evenly (possibly modified by topology and other +features). Note that traffic sent to an External IP or LoadBalancer IP from +within the cluster will always get “Cluster” semantics, but clients sending to +a NodePort from within the cluster may need to take traffic policy into account +when picking a node. +

+ +
+ +`healthCheckNodePort`
+ +int32 + + +
+ +(Optional) + +

+healthCheckNodePort specifies the healthcheck nodePort for the service. +This only applies when type is set to LoadBalancer and +externalTrafficPolicy is set to Local. If a value is specified, is +in-range, and is not in use, it will be used. If not specified, a value +will be automatically allocated. External systems (e.g. load-balancers) +can use this port to determine if a given node holds endpoints for this +service or not. If this field is specified when creating a Service +which does not need it, creation will fail. This field will be wiped +when updating a Service to no longer need it (e.g. changing type). +This field cannot be updated once set. +

+ +
+ +`publishNotReadyAddresses`
+ +bool + + +
+ +(Optional) + +

+publishNotReadyAddresses indicates that any agent which deals with endpoints for this +Service should disregard any indications of ready/not-ready. +The primary use case for setting this field is for a StatefulSet’s Headless Service to +propagate SRV DNS records for its Pods for the purpose of peer discovery. +The Kubernetes controllers that generate Endpoints and EndpointSlice resources for +Services interpret this to mean that all endpoints are considered “ready” even if the +Pods themselves are not. Agents which consume only Kubernetes generated endpoints +through the Endpoints or EndpointSlice resources can safely assume this behavior. +

+ +
+ +`sessionAffinityConfig`
+ + +Kubernetes core/v1.SessionAffinityConfig + + + +
+ +(Optional) + +

+sessionAffinityConfig contains the configurations of session affinity. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this +service. This field is usually assigned automatically based on cluster +configuration and the ipFamilyPolicy field. If this field is specified +manually, the requested family is available in the cluster, +and ipFamilyPolicy allows it, it will be used; otherwise creation of +the service will fail. This field is conditionally mutable: it allows +for adding or removing a secondary IP family, but it does not allow +changing the primary IP family of the Service. Valid values are “IPv4” +and “IPv6”. This field only applies to Services of types ClusterIP, +NodePort, and LoadBalancer, and does apply to “headless” services. +This field will be wiped when updating a Service to type ExternalName. +

+ +

+This field may hold a maximum of two entries (dual-stack families, in +either order). These families must correspond to the values of the +clusterIPs field, if specified. Both clusterIPs and ipFamilies are +governed by the ipFamilyPolicy field. +

+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+IPFamilyPolicy represents the dual-stack-ness requested or required by +this Service. If there is no value provided, then this field will be set +to SingleStack. Services can be “SingleStack” (a single IP family), +“PreferDualStack” (two IP families on dual-stack configured clusters or +a single IP family on single-stack clusters), or “RequireDualStack” +(two IP families on dual-stack configured clusters, otherwise fail). The +ipFamilies and clusterIPs fields depend on the value of this field. This +field will be wiped when updating a service to type ExternalName. +

+ +
+ +`allocateLoadBalancerNodePorts`
+ +bool + + +
+ +(Optional) + +

+allocateLoadBalancerNodePorts defines if NodePorts will be automatically +allocated for services with type LoadBalancer. Default is “true”. It +may be set to “false” if the cluster load-balancer does not rely on +NodePorts. If the caller requests specific NodePorts (by specifying a +value), those requests will be respected, regardless of this field. +This field may only be set for services with type LoadBalancer and will +be cleared if the type is changed to any other type. +

+ +
+ +`loadBalancerClass`
+ +string + + +
+ +(Optional) + +

+loadBalancerClass is the class of the load balancer implementation this Service belongs to. +If specified, the value of this field must be a label-style identifier, with an optional prefix, +e.g. “internal-vip” or “example.com/internal-vip”. Unprefixed names are reserved for end-users. +This field can only be set when the Service type is ‘LoadBalancer’. If not set, the default load +balancer implementation is used, today this is typically done through the cloud provider integration, +but should apply for any default implementation. If set, it is assumed that a load balancer +implementation is watching for Services with a matching class. Any default load balancer +implementation (e.g. cloud providers) should ignore Services that set this field. +This field can only be set when creating or updating a Service to type ‘LoadBalancer’. +Once set, it can not be changed. This field will be wiped when a service is updated to a non ‘LoadBalancer’ type. +

+ +
+ +`internalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceInternalTrafficPolicy + + + +
+ +(Optional) + +

+InternalTrafficPolicy describes how nodes distribute service traffic they +receive on the ClusterIP. If set to “Local”, the proxy will assume that pods +only want to talk to endpoints of the service on the same node as the pod, +dropping the traffic if there are no local endpoints. The default value, +“Cluster”, uses the standard behavior of routing to all endpoints evenly +(possibly modified by topology and other features). +

+ +
+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Extends the above `serviceSpec.selector` by allowing you to specify defined role as selector for the service. +When `roleSelector` is set, it adds a label selector “kubeblocks.io/role: {roleSelector}” +to the `serviceSpec.selector`. +Example usage: +

+
+
+  roleSelector: "leader"
+
+
+ +

+In this example, setting `roleSelector` to “leader” will add a label selector +“kubeblocks.io/role: leader” to the `serviceSpec.selector`. +This means that the service will select and route traffic to Pods with the label +“kubeblocks.io/role” set to “leader”. +

+ +

+Note that if `podService` sets to true, RoleSelector will be ignored. +The `podService` flag takes precedence over `roleSelector` and generates a service for each Pod. +

+ +
+

+ServiceDescriptorSpec + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorSpec defines the desired state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`podFQDNs`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the pod FQDNs of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+

+ServiceDescriptorStatus + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorStatus defines the observed state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been processed by the controller. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the current lifecycle phase of the ServiceDescriptor. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ServiceConnectionCredential. +

+ +
+

+ServiceRef + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the service reference declaration. +It corresponds to the serviceRefDeclaration name defined in either: +

+
    +
  • +`componentDefinition.spec.serviceRefDeclarations[*].name` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].serviceRefDeclarations[*].name` (deprecated) +
  • +
+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced Cluster or the namespace of the referenced ServiceDescriptor object. +If not provided, the referenced Cluster and ServiceDescriptor will be searched in the namespace of the current +Cluster by default. +

+ +
+ +`cluster`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the KubeBlocks Cluster being referenced. +This is used when services from another KubeBlocks Cluster are consumed. +

+ +

+By default, the referenced KubeBlocks Cluster’s `clusterDefinition.spec.connectionCredential` +will be utilized to bind to the current Component. This credential should include: +`endpoint`, `port`, `username`, and `password`. +

+ +

+Note: +

+
    +
  • +The `ServiceKind` and `ServiceVersion` specified in the service reference within the +ClusterDefinition are not validated when using this approach. +
  • +
  • +If both `cluster` and `serviceDescriptor` are present, `cluster` will take precedence. +
  • +
+ +

+Deprecated since v0.9 since `clusterDefinition.spec.connectionCredential` is deprecated, +use `clusterServiceSelector` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`clusterServiceSelector`
+ + +ServiceRefClusterSelector + + + +
+ +(Optional) + +

+References a service provided by another KubeBlocks Cluster. +It specifies the ClusterService and the account credentials needed for access. +

+ +
+ +`serviceDescriptor`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceDescriptor object that describes a service provided by external sources. +

+ +

+When referencing a service provided by external sources, a ServiceDescriptor object is required to establish +the service binding. +The `serviceDescriptor.spec.serviceKind` and `serviceDescriptor.spec.serviceVersion` should match the serviceKind +and serviceVersion declared in the definition. +

+ +

+If both `cluster` and `serviceDescriptor` are specified, the `cluster` takes precedence. +

+ +
+

+ServiceRefClusterSelector + +

+ +

+ +(Appears on:ServiceRef) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cluster`
+ +string + + +
+ + +

+The name of the Cluster being referenced. +

+ +
+ +`service`
+ + +ServiceRefServiceSelector + + + +
+ +(Optional) + +

+Identifies a ClusterService from the list of Services defined in `cluster.spec.services` of the referenced Cluster. +

+ +
+ +`podFQDNs`
+ + +ServiceRefPodFQDNsSelector + + + +
+ +(Optional) + +
+ +`credential`
+ + +ServiceRefCredentialSelector + + + +
+ +(Optional) + +

+Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. +The SystemAccount should be defined in `componentDefinition.spec.systemAccounts` +of the Component providing the service in the referenced Cluster. +

+ +
+

+ServiceRefCredentialSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the credential resides in. +

+ +
+ +`name`
+ +string + + +
+ + +

+The name of the credential (SystemAccount) to reference. +

+ +
+

+ServiceRefDeclaration + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ServiceRefDeclaration represents a reference to a service that can be either provided by a KubeBlocks Cluster +or an external service. +It acts as a placeholder for the actual service reference, which is determined later when a Cluster is created. +

+ +

+The purpose of ServiceRefDeclaration is to declare a service dependency without specifying the concrete details +of the service. +It allows for flexibility and abstraction in defining service references within a Component. +By using ServiceRefDeclaration, you can define service dependencies in a declarative manner, enabling loose coupling +and easier management of service references across different components and clusters. +

+ +

+Upon Cluster creation, the ServiceRefDeclaration is bound to an actual service through the ServiceRef field, +effectively resolving and connecting to the specified service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the ServiceRefDeclaration. +

+ +
+ +`serviceRefDeclarationSpecs`
+ + +[]ServiceRefDeclarationSpec + + + +
+ + +

+Defines a list of constraints and requirements for services that can be bound to this ServiceRefDeclaration +upon Cluster creation. +Each ServiceRefDeclarationSpec defines a ServiceKind and ServiceVersion, +outlining the acceptable service types and versions that are compatible. +

+ +

+This flexibility allows a ServiceRefDeclaration to be fulfilled by any one of the provided specs. +For example, if it requires an OLTP database, specs for both MySQL and PostgreSQL are listed, +either MySQL or PostgreSQL services can be used when binding. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the service reference can be optional. +

+ +

+For an optional service-ref, the component can still be created even if the service-ref is not provided. +

+ +
+

+ServiceRefDeclarationSpec + +

+ +

+ +(Appears on:ServiceRefDeclaration) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Specifies the type or nature of the service. This should be a well-known application cluster type, such as +{mysql, redis, mongodb}. +The field is case-insensitive and supports abbreviations for some well-known databases. +For instance, both `zk` and `zookeeper` are considered as a ZooKeeper cluster, while `pg`, `postgres`, `postgresql` +are all recognized as a PostgreSQL cluster. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Defines the service version of the service reference. This is a regular expression that matches a version number pattern. +For instance, `^8.0.8$`, `8.0.\d{1,2}$`, `^[v\-]*?(\d{1,2}\.){0,3}\d{1,2}$` are all valid patterns. +

+ +
+

+ServiceRefPodFQDNsSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the pods reside in. +

+ +
+ +`role`
+ +string + + +
+ +(Optional) + +

+The role of the pods to reference. +

+ +
+

+ServiceRefServiceSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ +(Optional) + +

+The name of the Component where the Service resides in. +

+ +

+It is required when referencing a Component’s Service. +

+ +
+ +`service`
+ +string + + +
+ + +

+The name of the Service to be referenced. +

+ +

+Leave it empty to reference the default Service. Set it to “headless” to reference the default headless Service. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name,service2.name… +

+ +
+ +`port`
+ +string + + +
+ +(Optional) + +

+The port name of the Service to be referenced. +

+ +

+If there is a non-zero node-port exist for the matched Service port, the node-port will be selected first. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ServiceRefVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceRefVarSelector selects a var from a ServiceRefDeclaration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The ServiceRefDeclaration to select from. +

+ +
+ +`ServiceRefVars`
+ + +ServiceRefVars + + + +
+ + +

+ +(Members of `ServiceRefVars` are embedded into this type.) + +

+ +
+

+ServiceRefVars + +

+ +

+ +(Appears on:ServiceRefVarSelector) + +

+
+ +

+ServiceRefVars defines the vars that can be referenced from a ServiceRef. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`endpoint`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`port`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+ServiceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceVarSelector selects a var from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Service to select from. +It can be referenced from the default headless service by setting the name to “headless”. +

+ +
+ +`ServiceVars`
+ + +ServiceVars + + + +
+ + +

+ +(Members of `ServiceVars` are embedded into this type.) + +

+ +
+

+ServiceVars + +

+ +

+ +(Appears on:ServiceVarSelector) + +

+
+ +

+ServiceVars defines the vars that can be referenced from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceType`
+ + +VarOption + + + +
+ +(Optional) + +

+ServiceType references the type of the service. +

+ +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`loadBalancer`
+ + +VarOption + + + +
+ +(Optional) + +

+LoadBalancer represents the LoadBalancer ingress point of the service. +

+ +

+If multiple ingress points are available, the first one will be used automatically, choosing between IP and Hostname. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Port references a port or node-port defined in the service. +

+ +

+If the referenced service is a pod-service, there will be multiple service objects matched, +and the value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ShardingDefinitionSpec + +

+ +

+ +(Appears on:ShardingDefinition) + +

+
+ +

+ShardingDefinitionSpec defines the desired state of ShardingDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`template`
+ + +ShardingTemplate + + + +
+ + +

+This field is immutable. +

+ +
+ +`shardsLimit`
+ + +ShardsLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of shards supported by the sharding. +

+ +

+This field is immutable. +

+ +
+ +`provisionStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for provisioning shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating shards of the sharding. Only `Serial` and `Parallel` are supported. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ShardingLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a sharding throughout its lifecycle. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]ShardingSystemAccount + + + +
+ +(Optional) + +

+Defines the system accounts for the sharding. +

+ +

+This field is immutable. +

+ +
+ +`tls`
+ + +ShardingTLS + + + +
+ +(Optional) + +

+Defines the TLS for the sharding. +

+ +

+This field is immutable. +

+ +
+

+ShardingDefinitionStatus + +

+ +

+ +(Appears on:ShardingDefinition) + +

+
+ +

+ShardingDefinitionStatus defines the observed state of ShardingDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ShardingDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ShardingDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ShardingDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ShardingLifecycleActions + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+ +

+ShardingLifecycleActions defines a collection of Actions for customizing the behavior of a sharding. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a sharding’s creation. +

+ +

+By setting `postProvision.preCondition`, you can determine the specific lifecycle stage at which +the action should trigger, available conditions for sharding include: `Immediately`, `ComponentReady`, +and `ClusterReady`. For sharding, the `ComponentReady` condition means all components of the sharding are ready. +

+ +

+With `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a sharding. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a terminate operation for the sharding is initiated. +The actual termination and cleanup of the sharding and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`shardAdd`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a shard added. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`shardRemove`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to remove a shard. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ShardingSystemAccount + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account defined in the sharding template. +

+ +

+This field is immutable once set. +

+ +
+ +`shared`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the account is shared across all shards in the sharding. +

+ +
+

+ShardingTLS + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`shared`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the TLS configuration is shared across all shards in the sharding. +

+ +
+

+ShardingTemplate + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ + +

+The component definition(s) that the sharding is based on. +

+ +

+The component definition can be specified using one of the following: +

+
    +
  • +the full name +
  • +
  • +the regular expression pattern (‘^’ will be added to the beginning of the pattern automatically) +
  • +
+ +

+This field is immutable. +

+ +
+

+ShardsLimit + +

+ +

+ +(Appears on:ShardingDefinitionSpec) + +

+
+ +

+ShardsLimit defines the valid range of number of shards supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minShards`
+ +int32 + + +
+ + +

+The minimum limit of shards. +

+ +
+ +`maxShards`
+ +int32 + + +
+ + +

+The maximum limit of shards. +

+ +
+

+Sidecar + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the sidecar. +

+ +

+The name will be used as the name of the sidecar container in the Pod. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the exact component definition that the sidecar belongs to. +

+ +

+A sidecar will be updated when the owner component definition is updated only. +

+ +
+ +`sidecarDef`
+ +string + + +
+ + +

+Specifies the sidecar definition CR to be used to create the sidecar. +

+ +
+

+SidecarDefinitionSpec + +

+ +

+ +(Appears on:SidecarDefinition) + +

+
+ +

+SidecarDefinitionSpec defines the desired state of SidecarDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the sidecar. +

+ +
+ +`owner`
+ +string + + +
+ + +

+Specifies the component definition that the sidecar belongs to. +

+ +

+For a specific cluster object, if there is any components provided by the component definition of @owner, +the sidecar will be created and injected into the components which are provided by +the component definition of @selectors automatically. +

+ +

+This field is immutable. +

+ +
+ +`selectors`
+ +[]string + + +
+ + +

+Specifies the component definition of components that the sidecar along with. +

+ +

+This field is immutable. +

+ +
+ +`containers`
+ + +[]Kubernetes core/v1.Container + + + +
+ + +

+List of containers for the sidecar. +

+ +

+Cannot be updated. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are needed by the sidecar. +

+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the configuration file templates used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentFileTemplate + + + +
+ +(Optional) + +

+Specifies the scripts used by the Sidecar. +

+ +

+This field is immutable. +

+ +
+

+SidecarDefinitionStatus + +

+ +

+ +(Appears on:SidecarDefinition) + +

+
+ +

+SidecarDefinitionStatus defines the observed state of SidecarDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the SidecarDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the SidecarDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the SidecarDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`owners`
+ +string + + +
+ +(Optional) + +

+Resolved owners of the SidecarDefinition. +

+ +
+ +`selectors`
+ +string + + +
+ +(Optional) + +

+Resolved selectors of the SidecarDefinition. +

+ +
+

+SystemAccount + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the unique identifier for the account. This name is used by other entities to reference the account. +

+ +

+This field is immutable once set. +

+ +
+ +`initAccount`
+ +bool + + +
+ +(Optional) + +

+Indicates if this account is a system initialization account (e.g., MySQL root). +

+ +

+This field is immutable once set. +

+ +
+ +`statement`
+ + +SystemAccountStatement + + + +
+ +(Optional) + +

+Defines the statements used to create, delete, and update the account. +

+ +

+This field is immutable once set. +

+ +
+ +`passwordGenerationPolicy`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+

+SystemAccountStatement + +

+ +

+ +(Appears on:SystemAccount) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`create`
+ +string + + +
+ +(Optional) + +

+The statement to create a new account with the necessary privileges. +

+ +

+This field is immutable once set. +

+ +
+ +`delete`
+ +string + + +
+ +(Optional) + +

+The statement to delete a account. +

+ +

+This field is immutable once set. +

+ +
+ +`update`
+ +string + + +
+ +(Optional) + +

+The statement to update an existing account. +

+ +

+This field is immutable once set. +

+ +
+

+TLS + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumeName`
+ +string + + +
+ + +

+Specifies the volume name for the TLS secret. +The controller will create a volume object with the specified name and add it to the pod when the TLS is enabled. +

+ +

+This field is immutable once set. +

+ +
+ +`mountPath`
+ +string + + +
+ + +

+Specifies the mount path for the TLS secret to be mounted. +Similar to the volume, the controller will mount the created volume to the specified path within containers when the TLS is enabled. +

+ +

+This field is immutable once set. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The permissions for the mounted path. Defaults to 0600. +

+ +

+This field is immutable once set. +

+ +
+ +`caFile`
+ +string + + +
+ +(Optional) + +

+The CA file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+ +`certFile`
+ +string + + +
+ +(Optional) + +

+The certificate file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+ +`keyFile`
+ +string + + +
+ +(Optional) + +

+The key file of the TLS. +

+ +

+This field is immutable once set. +

+ +
+

+TLSConfig + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enable`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, +such as specifying TLS certificates and keys, to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+

+TLSSecretRef + +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+TLSSecretRef defines the Secret that contains TLS certs. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+The namespace where the secret is located. +If not provided, the secret is assumed to be in the same namespace as the Cluster object. +

+ +
+ +`name`
+ +string + + +
+ + +

+Name of the Secret that contains user-provided certificates. +

+ +
+ +`ca`
+ +string + + +
+ + +

+Key of CA cert in Secret +

+ +
+ +`cert`
+ +string + + +
+ + +

+Key of Cert in Secret +

+ +
+ +`key`
+ +string + + +
+ + +

+Key of TLS private key in Secret +

+ +
+

+TLSVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+TLSVarSelector selects a var from the TLS. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`TLSVars`
+ + +TLSVars + + + +
+ + +

+ +(Members of `TLSVars` are embedded into this type.) + +

+ +
+

+TLSVars + +

+ +

+ +(Appears on:TLSVarSelector) + +

+
+ +

+TLSVars defines the vars that can be referenced from the TLS. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ + +VarOption + + + +
+ +(Optional) + +
+

+TargetPodSelector +(`string` alias) +

+ +

+ +(Appears on:ExecAction) + +

+
+ +

+TargetPodSelector defines how to select pod(s) to execute an Action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+ +

+"Ordinal" +

+
+ +
+ +

+"Role" +

+
+ +
+

+TerminationPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec, ComponentSpec) + +

+
+ +

+TerminationPolicyType defines termination policy types. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+Delete will delete all runtime resources belong to the cluster. +

+ +
+ +

+"DoNotTerminate" +

+
+ +

+DoNotTerminate will block delete operation. +

+ +
+ +

+"WipeOut" +

+
+ +

+WipeOut is based on Delete and wipe out all volume snapshots and snapshot data from backup storage location. +

+ +
+

+UpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:ComponentDefinitionSpec, ShardingDefinitionSpec) + +

+
+ +

+UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied +across the cluster. +The available strategies are `Serial`, `BestEffortParallel`, and `Parallel`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +

+BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making +a best-effort attempt to update as many replicas as possible concurrently +while maintaining the component’s availability. +Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number +of replicas remain available during the update process to maintain the component’s quorum and functionality. +

+ +

+For example, consider a component with 5 replicas. To maintain the component’s availability and quorum, +the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least +3 replicas (a quorum) remain available and functional during the update process. +

+ +

+The `BestEffortParallel` strategy strikes a balance between update speed and component availability. +

+ +
+ +

+"Parallel" +

+
+ +

+ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component. +The replicas are updated in parallel, with the operator updating all replicas concurrently. +This strategy provides the fastest update time but may lead to a period of reduced availability or +capacity during the update process. +

+ +
+ +

+"Serial" +

+
+ +

+SerialStrategy indicates that updates are applied one at a time in a sequential manner. +The operator waits for each replica to be updated and ready before proceeding to the next one. +This ensures that only one replica is unavailable at a time during the update process. +

+ +
+

+VarOption +(`string` alias) +

+ +

+ +(Appears on:ClusterVars, ComponentVars, CredentialVars, NamedVar, ResourceVars, RoledVar, ServiceRefVars, ServiceVars, TLSVars) + +

+
+ +

+VarOption defines whether a variable is required or optional. +

+
+

+VarSource + +

+ +

+ +(Appears on:EnvVar) + +

+
+ +

+VarSource represents a source for the value of an EnvVar. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+ +`hostNetworkVarRef`
+ + +HostNetworkVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of host-network resources. +

+ +
+ +`serviceVarRef`
+ + +ServiceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Service. +

+ +
+ +`credentialVarRef`
+ + +CredentialVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Credential (SystemAccount). +

+ +
+ +`tlsVarRef`
+ + +TLSVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of the TLS. +

+ +
+ +`serviceRefVarRef`
+ + +ServiceRefVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a ServiceRef. +

+ +
+ +`resourceVarRef`
+ + +ResourceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a kind of resource. +

+ +
+ +`componentVarRef`
+ + +ComponentVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Component. +

+ +
+ +`clusterVarRef`
+ + +ClusterVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Cluster. +

+ +
+
+

apps.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+Cluster + +

+
+ +

+Cluster offers a unified management interface for a wide variety of database and storage systems: +

+
    +
  • +Relational databases: MySQL, PostgreSQL, MariaDB +
  • +
  • +NoSQL databases: Redis, MongoDB +
  • +
  • +KV stores: ZooKeeper, etcd +
  • +
  • +Analytics systems: ElasticSearch, OpenSearch, ClickHouse, Doris, StarRocks, Solr +
  • +
  • +Message queues: Kafka, Pulsar +
  • +
  • +Distributed SQL: TiDB, OceanBase +
  • +
  • +Vector databases: Qdrant, Milvus, Weaviate +
  • +
  • +Object storage: Minio +
  • +
+ +

+KubeBlocks utilizes an abstraction layer to encapsulate the characteristics of these diverse systems. +A Cluster is composed of multiple Components, each defined by vendors or KubeBlocks Addon developers via ComponentDefinition, +arranged in Directed Acyclic Graph (DAG) topologies. +The topologies, defined in a ClusterDefinition, coordinate reconciliation across Cluster’s lifecycle phases: +Creating, Running, Updating, Stopping, Stopped, Deleting. +Lifecycle management ensures that each Component operates in harmony, executing appropriate actions at each lifecycle stage. +

+ +

+For sharded-nothing architecture, the Cluster supports managing multiple shards, +each shard managed by a separate Component, supporting dynamic resharding. +

+ +

+The Cluster object is aimed to maintain the overall integrity and availability of a database cluster, +serves as the central control point, abstracting the complexity of multiple-component management, +and providing a unified interface for cluster-wide operations. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Cluster` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterDefinitionRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`clusterVersionRef`
+ +string + + +
+ +(Optional) + +

+Refers to the ClusterVersion name. +

+ +

+Deprecated since v0.9, use ComponentVersion instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), +allowing for data preservation while stopping other operations. +Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. +
  • +
  • +`Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while +removing all persistent data. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`shardingSpecs`
+ + +[]ShardingSpec + + + +
+ +(Optional) + +

+Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. +Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ShardingSpec. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Defines a set of node affinity scheduling rules for the Cluster’s Pods. +This field helps control the placement of Pods on nodes within the Cluster. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+An array that specifies tolerations attached to the Cluster’s Pods, +allowing them to be scheduled onto nodes with matching taints. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Describes how Pods are distributed across node. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`availabilityPolicy`
+ + +AvailabilityPolicyType + + + +
+ +(Optional) + +

+Describes the availability policy, including zone, node, and none. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the replicas of the first componentSpec, if the replicas of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`resources`
+ + +ClusterResources + + + +
+ +(Optional) + +

+Specifies the resources of the first componentSpec, if the resources of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`storage`
+ + +ClusterStorage + + + +
+ +(Optional) + +

+Specifies the storage of the first componentSpec, if the storage of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`network`
+ + +ClusterNetwork + + + +
+ +(Optional) + +

+The configuration of network. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +
+ +`status`
+ + +ClusterStatus + + + +
+ + +
+

+ClusterDefinition + +

+
+ +

+ClusterDefinition defines the topology for databases or storage systems, +offering a variety of topological configurations to meet diverse deployment needs and scenarios. +

+ +

+It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. +For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, +simplifying the setup of new systems. +

+ +

+Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, +ensuring a controlled and predictable management of component lifecycles. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ClusterDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ClusterDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`type`
+ +string + + +
+ +(Optional) + +

+Specifies the well-known database type, such as mysql, redis, or mongodb. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDefs`
+ + +[]ClusterComponentDefinition + + + +
+ +(Optional) + +

+Provides the definitions for the cluster components. +

+ +

+Deprecated since v0.9. +Components should now be individually defined using ComponentDefinition and +collectively referenced via `topology.components`. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`connectionCredential`
+ +map[string]string + + +
+ +(Optional) + +

+Connection credential template used for creating a connection credential secret for cluster objects. +

+ +

+Built-in objects are: +

+
    +
  • +`$(RANDOM_PASSWD)` random 8 characters. +
  • +
  • +`$(STRONG_RANDOM_PASSWD)` random 16 characters, with mixed cases, digits and symbols. +
  • +
  • +`$(UUID)` generate a random UUID v4 string. +
  • +
  • +`$(UUID_B64)` generate a random UUID v4 BASE64 encoded string. +
  • +
  • +`$(UUID_STR_B64)` generate a random UUID v4 string then BASE64 encoded. +
  • +
  • +`$(UUID_HEX)` generate a random UUID v4 HEX representation. +
  • +
  • +`$(HEADLESS_SVC_FQDN)` headless service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_FQDN)` service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_PORT_{PORT-NAME})` is ServicePort’s port value with specified port name, i.e, a servicePort JSON struct: +`{"name": "mysql", "targetPort": "mysqlContainerPort", "port": 3306}`, and `$(SVC_PORT_mysql)` in the +connection credential value is 3306. +
  • +
+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+ +
+ +`status`
+ + +ClusterDefinitionStatus + + + +
+ + +
+

+Component + +

+
+ +

+Component is a fundamental building block of a Cluster object. +For example, a Redis Cluster can include Components like ‘redis’, ‘sentinel’, and potentially a proxy like ‘twemproxy’. +

+ +

+The Component object is responsible for managing the lifecycle of all replicas within a Cluster component, +It supports a wide range of operations including provisioning, stopping, restarting, termination, upgrading, +configuration changes, vertical and horizontal scaling, failover, switchover, cross-node migration, +scheduling configuration, exposing Services, managing system accounts, enabling/disabling exporter, +and configuring log collection. +

+ +

+Component is an internal sub-object derived from the user-submitted Cluster object. +It is designed primarily to be used by the KubeBlocks controllers, +users are discouraged from modifying Component objects directly and should use them only for monitoring Component statuses. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Component` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition and exposes endpoints that can be accessed +by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Cluster. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}”, +bound to a default role defined during KubeBlocks installation. +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the Cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+ +
+ +`status`
+ + +ComponentStatus + + + +
+ + +
+

+ComponentDefinition + +

+
+ +

+ComponentDefinition serves as a reusable blueprint for creating Components, +encapsulating essential static settings such as Component description, +Pod templates, configuration file templates, scripts, parameter lists, +injected environment variables and their sources, and event handlers. +ComponentDefinition works in conjunction with dynamic settings from the ClusterComponentSpec, +to instantiate Components during Cluster creation. +

+ +

+Key aspects that can be defined in a ComponentDefinition include: +

+
    +
  • +PodSpec template: Specifies the PodSpec template used by the Component. +
  • +
  • +Configuration templates: Specify the configuration file templates required by the Component. +
  • +
  • +Scripts: Provide the necessary scripts for Component management and operations. +
  • +
  • +Storage volumes: Specify the storage volumes and their configurations for the Component. +
  • +
  • +Pod roles: Outlines various roles of Pods within the Component along with their capabilities. +
  • +
  • +Exposed Kubernetes Services: Specify the Services that need to be exposed by the Component. +
  • +
  • +System accounts: Define the system accounts required for the Component. +
  • +
  • +Monitoring and logging: Configure the exporter and logging settings for the Component. +
  • +
+ +

+ComponentDefinitions also enable defining reactive behaviors of the Component in response to events, +such as member join/leave, Component addition/deletion, role changes, switch over, and more. +This allows for automatic event handling, thus encapsulating complex behaviors within the Component. +

+ +

+Referencing a ComponentDefinition when creating individual Components ensures inheritance of predefined configurations, +promoting reusability and consistency across different deployments and cluster topologies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the configuration file templates and volume mount parameters used by the Component. +It also includes descriptions of the parameters in the ConfigMaps, such as value range limitations. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ configuration files. +Each template is represented as a ConfigMap and may contain multiple configuration files, +with each file being a key in the ConfigMap. +

+ +

+The rendered configuration files will be mounted into the Component’s containers +according to the specified volume mount parameters. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+Each script group is encapsulated in a ComponentTemplateSpec that includes: +

+
    +
  • +The ConfigMap containing the scripts. +
  • +
  • +The mount point where the scripts will be mounted inside the container. +
  • +
+ +

+This field is immutable. +

+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+Note: This field is currently non-functional and is reserved for future implementation. +

+ +

+This field is immutable. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero to multiple roles. +KubeBlocks operator determines the roles of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns a list of roles for each replica, and the returned roles must be predefined in the `roles` field. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`roleArbitrator`
+ + +RoleArbitrator + + + +
+ +(Optional) + +

+This field has been deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+ +
+ +`status`
+ + +ComponentDefinitionStatus + + + +
+ + +
+

+ComponentVersion + +

+
+ +

+ComponentVersion is the Schema for the componentversions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentVersion` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentVersionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+ +
+ +`status`
+ + +ComponentVersionStatus + + + +
+ + +
+

+ConfigConstraint + +

+
+ +

+ConfigConstraint manages the parameters across multiple configuration files contained in a single configure template. +These configuration files should have the same format (e.g. ini, xml, properties, json). +

+ +

+It provides the following functionalities: +

+
    +
  1. +Parameter Value Validation: Validates and ensures compliance of parameter values with defined constraints. +
  2. +
  3. +Dynamic Reload on Modification: Monitors parameter changes and triggers dynamic reloads to apply updates. +
  4. +
  5. +Parameter Rendering in Templates: Injects parameters into templates to generate up-to-date configuration files. +
  6. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ConfigConstraint` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigConstraintSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`reloadOptions`
+ + +ReloadOptions + + + +
+ +(Optional) + +

+Specifies the dynamic reload action supported by the engine. +When set, the controller executes the method defined here to execute hot parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `reloadStaticParamsBeforeRestart` is set to true, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadOptions` is set. +
  4. +
+ +

+If `reloadOptions` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+reloadOptions:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`dynamicActionCanBeMerged`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadOptions` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “true” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`toolsImageSpec`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`downwardAPIOptions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`scriptConfigs`
+ + +[]ScriptConfig + + + +
+ +(Optional) + +

+A list of ScriptConfig Object. +

+ +

+Each ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload +and DownwardAction to perform specific tasks or configurations. +

+ +
+ +`cfgSchemaTopLevelName`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configurationSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configurationSchema.cue’. +

+ +
+ +`configurationSchema`
+ + +CustomParametersValidation + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `selector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `selector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+ +`formatterConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+formatterConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +
+ +`status`
+ + +ConfigConstraintStatus + + + +
+ + +
+

+Configuration + +

+
+ +

+Configuration represents the complete set of configurations for a specific Component of a Cluster. +This includes templates for each configuration file, their corresponding ConfigConstraints, volume mounts, +and other relevant details. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Configuration` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigurationSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`clusterRef`
+ +string + + +
+ + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigurationItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigurationItemDetail objects. +

+ +

+Each ConfigurationItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigurationItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+ +
+ +`status`
+ + +ConfigurationStatus + + + +
+ + +
+

+ServiceDescriptor + +

+
+ +

+ServiceDescriptor describes a service provided by external sources. +It contains the necessary details such as the service’s address and connection credentials. +To enable a Cluster to access this service, the ServiceDescriptor’s name should be specified +in the Cluster configuration under `clusterComponent.serviceRefs[*].serviceDescriptor`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ServiceDescriptor` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ServiceDescriptorSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+ +
+ +`status`
+ + +ServiceDescriptorStatus + + + +
+ + +
+

+AccessMode +(`string` alias) +

+ +

+ +(Appears on:ConsensusMember) + +

+
+ +

+AccessMode defines the modes of access granted to the SVC. +The modes can be `None`, `Readonly`, or `ReadWrite`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"None" +

+
+ +

+None implies no access. +

+ +
+ +

+"ReadWrite" +

+
+ +

+ReadWrite permits both read and write operations. +

+ +
+ +

+"Readonly" +

+
+ +

+Readonly allows only read operations. +

+ +
+

+AccountName +(`string` alias) +

+ +

+ +(Appears on:SystemAccountConfig) + +

+
+ +

+AccountName defines system account names. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"kbadmin" +

+
+ +
+ +

+"kbdataprotection" +

+
+ +
+ +

+"kbmonitoring" +

+
+ +
+ +

+"kbprobe" +

+
+ +
+ +

+"kbreplicator" +

+
+ +
+

+Action + +

+ +

+ +(Appears on:ComponentSwitchover, LifecycleActionHandler, Probe) + +

+
+ +

+Action defines a customizable hook or procedure tailored for different database engines, +designed to be invoked at predetermined points within the lifecycle of a Component instance. +It provides a modular and extensible way to customize a Component’s behavior through the execution of defined actions. +

+ +

+Available Action triggers include: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as during planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: Defines the procedure to transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+Actions can be executed in different ways: +

+
    +
  • +ExecAction: Executes a command inside a container. +which may run as a K8s job or be executed inside the Lorry sidecar container, depending on the implementation. +Future implementations will standardize execution within Lorry. +A set of predefined environment variables are available and can be leveraged within the `exec.command` +to access context information such as details about pods, components, the overall cluster state, +or database connection credentials. +These variables provide a dynamic and context-aware mechanism for script execution. +
  • +
  • +HTTPAction: Performs an HTTP request. +HTTPAction is to be implemented in future version. +
  • +
  • +GRPCAction: In future version, Actions will support initiating gRPC calls. +This allows developers to implement Actions using plugins written in programming language like Go, +providing greater flexibility and extensibility. +
  • +
+ +

+An action is considered successful on returning 0, or HTTP 200 for status HTTP(s) Actions. +Any other return value or HTTP status codes indicate failure, +and the action may be retried based on the configured retry policy. +

+
    +
  • +If an action exceeds the specified timeout duration, it will be terminated, and the action is considered failed. +
  • +
  • +If an action produces any data as output, it should be written to stdout, +or included in the HTTP response payload for HTTP(s) actions. +
  • +
  • +If an action encounters any errors, error messages should be written to stderr, +or detailed in the HTTP response with the appropriate non-200 status code. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the container image to be used for running the Action. +

+ +

+When specified, a dedicated container will be created using this image to execute the Action. +This field is mutually exclusive with the `container` field; only one of them should be provided. +

+ +

+This field cannot be updated. +

+ +
+ +`exec`
+ + +ExecAction + + + +
+ +(Optional) + +

+Defines the command to run. +

+ +

+This field cannot be updated. +

+ +
+ +`http`
+ + +HTTPAction + + + +
+ +(Optional) + +

+Specifies the HTTP request to perform. +

+ +

+This field cannot be updated. +

+ +

+Note: HTTPAction is to be implemented in future version. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Represents a list of environment variables that will be injected into the container. +These variables enable the container to adapt its behavior based on the environment it’s running in. +

+ +

+This field cannot be updated. +

+ +
+ +`targetPodSelector`
+ + +TargetPodSelector + + + +
+ +(Optional) + +

+Defines the criteria used to select the target Pod(s) for executing the Action. +This is useful when there is no default target replica identified. +It allows for precise control over which Pod(s) the Action should run in. +

+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`matchingKey`
+ +string + + +
+ +(Optional) + +

+Used in conjunction with the `targetPodSelector` field to refine the selection of target pod(s) for Action execution. +The impact of this field depends on the `targetPodSelector` value: +

+
    +
  • +When `targetPodSelector` is set to `Any` or `All`, this field will be ignored. +
  • +
  • +When `targetPodSelector` is set to `Role`, only those replicas whose role matches the `matchingKey` +will be selected for the Action. +
  • +
+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`container`
+ +string + + +
+ +(Optional) + +

+Defines the name of the container within the target Pod where the action will be executed. +

+ +

+This name must correspond to one of the containers defined in `componentDefinition.spec.runtime`. +If this field is not specified, the default behavior is to use the first container listed in +`componentDefinition.spec.runtime`. +

+ +

+This field cannot be updated. +

+ +

+Note: This field is reserved for future use and is not currently active. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration in seconds that the Action is allowed to run. +

+ +

+If the Action does not complete within this time frame, it will be terminated. +

+ +

+This field cannot be updated. +

+ +
+ +`retryPolicy`
+ + +RetryPolicy + + + +
+ +(Optional) + +

+Defines the strategy to be taken when retrying the Action after a failure. +

+ +

+It specifies the conditions under which the Action should be retried and the limits to apply, +such as the maximum number of retries and backoff strategy. +

+ +

+This field cannot be updated. +

+ +
+ +`preCondition`
+ + +PreConditionType + + + +
+ +(Optional) + +

+Specifies the state that the cluster must reach before the Action is executed. +Currently, this is only applicable to the `postProvision` action. +

+ +

+The conditions are as follows: +

+
    +
  • +`Immediately`: Executed right after the Component object is created. +The readiness of the Component and its resources is not guaranteed at this stage. +
  • +
  • +`RuntimeReady`: The Action is triggered after the Component object has been created and all associated +runtime resources (e.g. Pods) are in a ready state. +
  • +
  • +`ComponentReady`: The Action is triggered after the Component itself is in a ready state. +This process does not affect the readiness state of the Component or the Cluster. +
  • +
  • +`ClusterReady`: The Action is executed after the Cluster is in a ready state. +This execution does not alter the Component or the Cluster’s state of readiness. +
  • +
+ +

+This field cannot be updated. +

+ +
+

+Affinity + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podAntiAffinity`
+ + +PodAntiAffinity + + + +
+ +(Optional) + +

+Specifies the anti-affinity level of Pods within a Component. +It determines how pods should be spread across nodes to improve availability and performance. +It can have the following values: `Preferred` and `Required`. +The default value is `Preferred`. +

+ +
+ +`topologyKeys`
+ +[]string + + +
+ +(Optional) + +

+Represents the key of node labels used to define the topology domain for Pod anti-affinity +and Pod spread constraints. +

+ +

+In K8s, a topology domain is a set of nodes that have the same value for a specific label key. +Nodes with labels containing any of the specified TopologyKeys and identical values are considered +to be in the same topology domain. +

+ +

+Note: The concept of topology in the context of K8s TopologyKeys is different from the concept of +topology in the ClusterDefinition. +

+ +

+When a Pod has anti-affinity or spread constraints specified, Kubernetes will attempt to schedule the +Pod on nodes with different values for the specified TopologyKeys. +This ensures that Pods are spread across different topology domains, promoting high availability and +reducing the impact of node failures. +

+ +

+Some well-known label keys, such as `kubernetes.io/hostname` and `topology.kubernetes.io/zone`, +are often used as TopologyKey. +These keys represent the hostname and zone of a node, respectively. +By including these keys in the TopologyKeys list, Pods will be spread across nodes with +different hostnames or zones. +

+ +

+In addition to the well-known keys, users can also specify custom label keys as TopologyKeys. +This allows for more flexible and custom topology definitions based on the specific needs +of the application or environment. +

+ +

+The TopologyKeys field is a slice of strings, where each string represents a label key. +The order of the keys in the slice does not matter. +

+ +
+ +`nodeLabels`
+ +map[string]string + + +
+ +(Optional) + +

+Indicates the node labels that must be present on nodes for pods to be scheduled on them. +It is a map where the keys are the label keys and the values are the corresponding label values. +Pods will only be scheduled on nodes that have all the specified labels with the corresponding values. +

+ +

+For example, if NodeLabels is set to {“nodeType”: “ssd”, “environment”: “production”}, +pods will only be scheduled on nodes that have both the “nodeType” label with value “ssd” +and the “environment” label with value “production”. +

+ +

+This field allows users to control Pod placement based on specific node labels. +It can be used to ensure that Pods are scheduled on nodes with certain characteristics, +such as specific hardware (e.g., SSD), environment (e.g., production, staging), +or any other custom labels assigned to nodes. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Determines the level of resource isolation between Pods. +It can have the following values: `SharedNode` and `DedicatedNode`. +

+
    +
  • +SharedNode: Allow that multiple Pods may share the same node, which is the default behavior of K8s. +
  • +
  • +DedicatedNode: Each Pod runs on a dedicated node, ensuring that no two Pods share the same node. +In other words, if a Pod is already running on a node, no other Pods will be scheduled on that node. +Which provides a higher level of isolation and resource guarantee for Pods. +
  • +
+ +

+The default value is `SharedNode`. +

+ +
+

+AvailabilityPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+AvailabilityPolicyType defines the type of availability policy to be applied for cluster affinity, influencing how +resources are distributed across zones or nodes for high availability and resilience. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"node" +

+
+ +

+AvailabilityPolicyNode specifies that resources should be distributed across different nodes within the same zone. +This policy aims to provide resilience against node failures, ensuring that the failure of a single node does not +impact the overall service availability. +

+ +
+ +

+"none" +

+
+ +

+AvailabilityPolicyNone specifies that no specific availability policy is applied. +Resources may not be explicitly distributed for high availability, potentially concentrating them in a single +zone or node based on other scheduling decisions. +

+ +
+ +

+"zone" +

+
+ +

+AvailabilityPolicyZone specifies that resources should be distributed across different availability zones. +This policy aims to ensure high availability and protect against zone failures, spreading the resources to reduce +the risk of simultaneous downtime. +

+ +
+

+BackupStatusUpdateStage +(`string` alias) +

+
+ +

+BackupStatusUpdateStage defines the stage of backup status update. +

+
+

+BaseBackupType +(`string` alias) +

+
+ +

+BaseBackupType the base backup type, keep synchronized with the BaseBackupType of the data protection API. +

+
+

+BuiltinActionHandlerType +(`string` alias) +

+ +

+ +(Appears on:LifecycleActionHandler) + +

+
+ +

+BuiltinActionHandlerType defines build-in action handlers provided by Lorry, including: +

+
    +
  • +`mysql` +
  • +
  • +`wesql` +
  • +
  • +`oceanbase` +
  • +
  • +`redis` +
  • +
  • +`mongodb` +
  • +
  • +`etcd` +
  • +
  • +`postgresql` +
  • +
  • +`vanilla-postgresql` +
  • +
  • +`apecloud-postgresql` +
  • +
  • +`polardbx` +
  • +
  • +`custom` +
  • +
  • +`unknown` +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"apecloud-postgresql" +

+
+ +
+ +

+"custom" +

+
+ +
+ +

+"etcd" +

+
+ +
+ +

+"mongodb" +

+
+ +
+ +

+"mysql" +

+
+ +
+ +

+"oceanbase" +

+
+ +
+ +

+"polardbx" +

+
+ +
+ +

+"postgresql" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"unknown" +

+
+ +
+ +

+"vanilla-postgresql" +

+
+ +
+ +

+"wesql" +

+
+ +
+

+ClassDefRef + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+ClassDefRef is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ComponentClassDefinition. +

+ +
+ +`class`
+ +string + + +
+ + +

+Defines the name of the class that is defined in the ComponentClassDefinition. +

+ +
+

+ClusterBackup + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether automated backup is enabled for the Cluster. +

+ +
+ +`retentionPeriod`
+ +github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.RetentionPeriod + + +
+ +(Optional) + +

+Determines the duration to retain backups. Backups older than this period are automatically removed. +

+ +

+For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +Default value is 7d. +

+ +
+ +`method`
+ +string + + +
+ + +

+Specifies the backup method to use, as defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Specifies the maximum time in minutes that the system will wait to start a missed backup job. +If the scheduled backup time is missed for any reason, the backup job must start within this deadline. +Values must be between 0 (immediate execution) and 1440 (one day). +

+ +
+ +`repoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the backupRepo. If not set, the default backupRepo will be used. +

+ +
+ +`pitrEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable point-in-time recovery. +

+ +
+ +`continuousMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the backup method to use, if not set, use the first continuous method. +

+ +
+ +`incrementalBackupEnabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to enable incremental backup. +

+ +
+ +`incrementalCronExpression`
+ +string + + +
+ +(Optional) + +

+The cron expression for the incremental backup schedule. The timezone is in UTC. See https://en.wikipedia.org/wiki/Cron. +

+ +
+

+ClusterComponentConfig + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+ClusterComponentConfig represents a config with its source bound. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the config. +

+ +
+ +`ClusterComponentConfigSource`
+ + +ClusterComponentConfigSource + + + +
+ + +

+ +(Members of `ClusterComponentConfigSource` are embedded into this type.) + +

+ +

+The source of the config. +

+ +
+

+ClusterComponentConfigSource + +

+ +

+ +(Appears on:ClusterComponentConfig) + +

+
+ +

+ClusterComponentConfigSource represents the source of a config. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ +(Optional) + +

+ConfigMap source for the config. +

+ +
+

+ClusterComponentDefinition + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterComponentDefinition defines a Component within a ClusterDefinition but is deprecated and +has been replaced by ComponentDefinition. +

+ +

+Deprecated: Use ComponentDefinition instead. This type is deprecated as of version 0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+This name could be used as default name of `cluster.spec.componentSpecs.name`, and needs to conform with same +validation rules as `cluster.spec.componentSpecs.name`, currently complying with IANA Service Naming rule. +This name will apply to cluster objects as the value of label “apps.kubeblocks.io/component-name”. +

+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Description of the component definition. +

+ +
+ +`workloadType`
+ + +WorkloadType + + + +
+ + +

+Defines the type of the workload. +

+
    +
  • +`Stateless` describes stateless applications. +
  • +
  • +`Stateful` describes common stateful applications. +
  • +
  • +`Consensus` describes applications based on consensus protocols, such as raft and paxos. +
  • +
  • +`Replication` describes applications based on the primary-secondary data replication protocol. +
  • +
+ +
+ +`characterType`
+ +string + + +
+ +(Optional) + +

+Defines well-known database component name, such as mongos(mongodb), proxy(redis), mariadb(mysql). +

+ +
+ +`configSpecs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Defines the template of configurations. +

+ +
+ +`scriptSpecs`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Defines the template of scripts. +

+ +
+ +`probes`
+ + +ClusterDefinitionProbes + + + +
+ +(Optional) + +

+Settings for health checks. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Specify the logging files which can be observed and configured by cluster users. +

+ +
+ +`podSpec`
+ + +Kubernetes core/v1.PodSpec + + + +
+ +(Optional) + +

+Defines the pod spec template of component. +

+ +
+ +`service`
+ + +ServiceSpec + + + +
+ +(Optional) + +

+Defines the service spec. +

+ +
+ +`statelessSpec`
+ + +StatelessSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Stateless` workloads. +

+ +
+ +`statefulSpec`
+ + +StatefulSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Stateful` workloads. +

+ +
+ +`consensusSpec`
+ + +ConsensusSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Consensus` workloads. It’s required if the workload type is `Consensus`. +

+ +
+ +`replicationSpec`
+ + +ReplicationSetSpec + + + +
+ +(Optional) + +

+Defines spec for `Replication` workloads. +

+ +
+ +`rsmSpec`
+ + +RSMSpec + + + +
+ +(Optional) + +

+Defines workload spec of this component. +From KB 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which powers all kinds of workload in KB. +RSM is an enhanced stateful workload extension dedicated for heavy-state workloads like databases. +

+ +
+ +`horizontalScalePolicy`
+ + +HorizontalScalePolicy + + + +
+ +(Optional) + +

+Defines the behavior of horizontal scale. +

+ +
+ +`systemAccounts`
+ + +SystemAccountSpec + + + +
+ +(Optional) + +

+Defines system accounts needed to manage the component, and the statement to create them. +

+ +
+ +`volumeTypes`
+ + +[]VolumeTypeSpec + + + +
+ +(Optional) + +

+Used to describe the purpose of the volumes mapping the name of the VolumeMounts in the PodSpec.Container field, +such as data volume, log volume, etc. When backing up the volume, the volume can be correctly backed up according +to the volumeType. +

+ +

+For example: +

+
    +
  • +`name: data, type: data` means that the volume named `data` is used to store `data`. +
  • +
  • +`name: binlog, type: log` means that the volume named `binlog` is used to store `log`. +
  • +
+ +

+NOTE: When volumeTypes is not defined, the backup function will not be supported, even if a persistent volume has +been specified. +

+ +
+ +`customLabelSpecs`
+ + +[]CustomLabelSpec + + + +
+ +(Optional) + +

+Used for custom label tags which you want to add to the component resources. +

+ +
+ +`switchoverSpec`
+ + +SwitchoverSpec + + + +
+ +(Optional) + +

+Defines command to do switchover. +In particular, when workloadType=Replication, the command defined in switchoverSpec will only be executed under +the condition of cluster.componentSpecs[x].SwitchPolicy.type=Noop. +

+ +
+ +`postStartSpec`
+ + +PostStartAction + + + +
+ +(Optional) + +

+Defines the command to be executed when the component is ready, and the command will only be executed once after +the component becomes ready. +

+ +
+ +`volumeProtectionSpec`
+ + +VolumeProtectionSpec + + + +
+ +(Optional) + +

+Defines settings to do volume protect. +

+ +
+ +`componentDefRef`
+ + +[]ComponentDefRef + + + +
+ +(Optional) + +

+Used to inject values from other components into the current component. Values will be saved and updated in a +configmap and mounted to the current component. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Used to declare the service reference of the current component. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the metrics exporter. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+

+ClusterComponentPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+ +

+ClusterComponentPhase defines the phase of a cluster component as represented in cluster.status.components.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterCompPhase indicates the component has more than zero replicas, but there are some failed pods. +The component is functioning, but it is in a fragile state. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterCompPhase indicates the component is being created. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterCompPhase indicates the component is currently being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterCompPhase indicates the component has more than zero replicas, but there are some failed pods. +The component is not functioning. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterCompPhase indicates the component has more than zero replicas, and all pods are up-to-date and +in a ‘Running’ state. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterCompPhase indicates the component has zero replicas, and all pods have been deleted. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterCompPhase indicates the component has zero replicas, and there are pods that are terminating. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterCompPhase indicates the component has more than zero replicas, and there are no failed pods, +it is currently being updated. +

+ +
+

+ClusterComponentService + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+References the ComponentService name defined in the `componentDefinition.spec.services[*].name`. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP` allocates a Cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort` builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the ClusterIP. +
  • +
  • +`LoadBalancer` builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the ClusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for ClusterComponentService. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to generate individual Services for each Pod. +If set to true, a separate Service will be created for each Pod in the Cluster. +

+ +
+

+ClusterComponentSpec + +

+ +

+ +(Appears on:ClusterSpec, ShardingSpec) + +

+
+ +

+ClusterComponentSpec defines the specification of a Component within a Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the Component’s name. +It’s part of the Service DNS name and must comply with the IANA service naming rule. +The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), +but required otherwise. +

+ +
+ +`componentDefRef`
+ +string + + +
+ +(Optional) + +

+References a ClusterComponentDefinition defined in the `clusterDefinition.spec.componentDef` field. +Must comply with the IANA service naming rule. +

+ +

+Deprecated since v0.9, +because defining Components in `clusterDefinition.spec.componentDef` field has been deprecated. +This field is replaced by the `componentDef` field, use `componentDef` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+If both `componentDefRef` and `componentDef` are provided, +the `componentDef` will take precedence over `componentDefRef`. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`classDefRef`
+ + +ClassDefRef + + + +
+ +(Optional) + +

+References the class defined in ComponentClassDefinition. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Component. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +These environment variables will be placed after the environment variables declared in the Pod. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the K8s cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ClusterComponentService + + + +
+ +(Optional) + +

+Overrides services defined in referenced ComponentDefinition and expose endpoints that can be accessed by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`switchPolicy`
+ + +ClusterSwitchPolicy + + + +
+ +(Optional) + +

+Defines the strategy for switchover and failover. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`tls`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys, +to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +To perform certain operational tasks, agent sidecars running in Pods require specific RBAC permissions. +The service account will be bound to a default role named “kubeblocks-cluster-pod-role” which is installed together with KubeBlocks. +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}” +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Defines the update strategy for the Component. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`userResourceRefs`
+ + +UserResourceRefs + + + +
+ +(Optional) + +

+Allows users to specify custom ConfigMaps and Secrets to be mounted as volumes +in the Cluster’s Pods. +This is useful in scenarios where users need to provide additional resources to the Cluster, such as: +

+
    +
  • +Mounting custom scripts or configuration files during Cluster startup. +
  • +
  • +Mounting Secrets as volumes to provide sensitive information, like S3 AK/SK, to the Cluster. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`monitor`
+ +bool + + +
+ +(Optional) + +

+Deprecated since v0.9 +Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ClusterComponentStatus + +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterComponentStatus records Component status. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ClusterComponentPhase + + + +
+ + +

+Specifies the current state of the Component. +

+ +
+ +`message`
+ + +ComponentMessageMap + + + +
+ +(Optional) + +

+Records detailed information about the Component in its current phase. +The keys are either podName, deployName, or statefulSetName, formatted as ‘ObjectKind/Name’. +

+ +
+ +`podsReady`
+ +bool + + +
+ +(Optional) + +

+Checks if all Pods of the Component are ready. +

+ +
+ +`podsReadyTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Indicates the time when all Component Pods became ready. +This is the readiness time of the last Component Pod. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Represents the status of the members. +

+ +
+

+ClusterComponentVolumeClaimTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refers to the name of a volumeMount defined in either: +

+
    +
  • +`componentDefinition.spec.runtime.containers[*].volumeMounts` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].podSpec.containers[*].volumeMounts` (deprecated) +
  • +
+ +

+The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the labels for the PVC of the volume. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies the annotations for the PVC of the volume. +

+ +
+ +`spec`
+ + +PersistentVolumeClaimSpec + + + +
+ +(Optional) + +

+Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume +with the mount name specified in the `name` field. +

+ +

+When a Pod is created for this ClusterComponent, a new PVC will be created based on the specification +defined in the `spec` field. The PVC will be associated with the volume mount specified by the `name` field. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+ +
+

+ClusterDefinitionProbe + +

+ +

+ +(Appears on:ClusterDefinitionProbes) + +

+
+ +

+ClusterDefinitionProbe is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`periodSeconds`
+ +int32 + + +
+ + +

+How often (in seconds) to perform the probe. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ + +

+Number of seconds after which the probe times out. Defaults to 1 second. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +

+ +
+ +`commands`
+ + +ClusterDefinitionProbeCMDs + + + +
+ +(Optional) + +

+Commands used to execute for probe. +

+ +
+

+ClusterDefinitionProbeCMDs + +

+ +

+ +(Appears on:ClusterDefinitionProbe) + +

+
+ +

+ClusterDefinitionProbeCMDs is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`writes`
+ +[]string + + +
+ +(Optional) + +

+Defines write checks that are executed on the probe sidecar. +

+ +
+ +`queries`
+ +[]string + + +
+ +(Optional) + +

+Defines read checks that are executed on the probe sidecar. +

+ +
+

+ClusterDefinitionProbes + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ClusterDefinitionProbes is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`runningProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the running status of the component. +

+ +
+ +`statusProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the status of the component. +

+ +
+ +`roleProbe`
+ + +ClusterDefinitionProbe + + + +
+ +(Optional) + +

+Specifies the probe used for checking the role of the component. +

+ +
+ +`roleProbeTimeoutAfterPodsReady`
+ +int32 + + +
+ +(Optional) + +

+Defines the timeout (in seconds) for the role probe after all pods of the component are ready. +The system will check if the application is available in the pod. +If pods exceed the InitializationTimeoutSeconds time without a role label, this component will enter the +Failed/Abnormal phase. +

+ +

+Note that this configuration will only take effect if the component supports RoleProbe +and will not affect the life cycle of the pod. default values are 60 seconds. +

+ +
+

+ClusterDefinitionSpec + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionSpec defines the desired state of ClusterDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ +string + + +
+ +(Optional) + +

+Specifies the well-known database type, such as mysql, redis, or mongodb. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`componentDefs`
+ + +[]ClusterComponentDefinition + + + +
+ +(Optional) + +

+Provides the definitions for the cluster components. +

+ +

+Deprecated since v0.9. +Components should now be individually defined using ComponentDefinition and +collectively referenced via `topology.components`. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`connectionCredential`
+ +map[string]string + + +
+ +(Optional) + +

+Connection credential template used for creating a connection credential secret for cluster objects. +

+ +

+Built-in objects are: +

+
    +
  • +`$(RANDOM_PASSWD)` random 8 characters. +
  • +
  • +`$(STRONG_RANDOM_PASSWD)` random 16 characters, with mixed cases, digits and symbols. +
  • +
  • +`$(UUID)` generate a random UUID v4 string. +
  • +
  • +`$(UUID_B64)` generate a random UUID v4 BASE64 encoded string. +
  • +
  • +`$(UUID_STR_B64)` generate a random UUID v4 string then BASE64 encoded. +
  • +
  • +`$(UUID_HEX)` generate a random UUID v4 HEX representation. +
  • +
  • +`$(HEADLESS_SVC_FQDN)` headless service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME)-headless.$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_FQDN)` service FQDN placeholder, value pattern is `$(CLUSTER_NAME)-$(1ST_COMP_NAME).$(NAMESPACE).svc`, +where 1ST_COMP_NAME is the 1st component that provide `ClusterDefinition.spec.componentDefs[].service` attribute; +
  • +
  • +`$(SVC_PORT_{PORT-NAME})` is ServicePort’s port value with specified port name, i.e, a servicePort JSON struct: +`{"name": "mysql", "targetPort": "mysqlContainerPort", "port": 3306}`, and `$(SVC_PORT_mysql)` in the +connection credential value is 3306. +
  • +
+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topologies`
+ + +[]ClusterTopology + + + +
+ +(Optional) + +

+Topologies defines all possible topologies within the cluster. +

+ +
+

+ClusterDefinitionStatus + +

+ +

+ +(Appears on:ClusterDefinition) + +

+
+ +

+ClusterDefinitionStatus defines the observed state of ClusterDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this ClusterDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the ClusterDefinition. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the ClusterDefinition is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`topologies`
+ +string + + +
+ +(Optional) + +

+Topologies this ClusterDefinition supported. +

+ +
+ +`serviceRefs`
+ +string + + +
+ +(Optional) + +

+The service references declared by this ClusterDefinition. +

+ +
+

+ClusterNetwork + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterNetwork is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`hostNetworkAccessible`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the host network can be accessed. By default, this is set to false. +

+ +
+ +`publiclyAccessible`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the network is accessible to the public. By default, this is set to false. +

+ +
+

+ClusterObjectReference + +

+ +

+ +(Appears on:ComponentVarSelector, CredentialVarSelector, HostNetworkVarSelector, ServiceRefVarSelector, ServiceVarSelector) + +

+
+ +

+ClusterObjectReference defines information to let you locate the referenced object inside the same Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ +(Optional) + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) used by the component that the referent object resident in. +

+ +

+If not specified, the component itself will be used. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Name of the referent object. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specify whether the object must be defined. +

+ +
+ +`multipleClusterObjectOption`
+ + +MultipleClusterObjectOption + + + +
+ +(Optional) + +

+This option defines the behavior when multiple component objects match the specified @CompDef. +If not provided, an error will be raised when handling multiple matches. +

+ +
+

+ClusterPhase +(`string` alias) +

+ +

+ +(Appears on:ClusterStatus) + +

+
+ +

+ClusterPhase defines the phase of the Cluster within the .status.phase field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Abnormal" +

+
+ +

+AbnormalClusterPhase represents some components are in `Failed` or `Abnormal` phase, indicates that the cluster +is in a fragile state and troubleshooting is required. +

+ +
+ +

+"Creating" +

+
+ +

+CreatingClusterPhase represents all components are in `Creating` phase. +

+ +
+ +

+"Deleting" +

+
+ +

+DeletingClusterPhase indicates the cluster is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+FailedClusterPhase represents all components are in `Failed` phase, indicates that the cluster is unavailable. +

+ +
+ +

+"Running" +

+
+ +

+RunningClusterPhase represents all components are in `Running` phase, indicates that the cluster is functioning properly. +

+ +
+ +

+"Stopped" +

+
+ +

+StoppedClusterPhase represents all components are in `Stopped` phase, indicates that the cluster has stopped and +is not providing any functionality. +

+ +
+ +

+"Stopping" +

+
+ +

+StoppingClusterPhase represents at least one component is in `Stopping` phase, indicates that the cluster is in +the process of stopping. +

+ +
+ +

+"Updating" +

+
+ +

+UpdatingClusterPhase represents all components are in `Creating`, `Running` or `Updating` phase, and at least one +component is in `Creating` or `Updating` phase, indicates that the cluster is undergoing an update. +

+ +
+

+ClusterResources + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterResources is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cpu`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of CPU resource the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+ +`memory`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of memory resource the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+

+ClusterService + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterService defines a service that is exposed externally, allowing entities outside the cluster to access it. +For example, external applications, or other Clusters. +And another Cluster managed by the same KubeBlocks operator can resolve the address exposed by a ClusterService +using the `serviceRef` field. +

+ +

+When a Component needs to access another Cluster’s ClusterService using the `serviceRef` field, +it must also define the service type and version information in the `componentDefinition.spec.serviceRefDeclarations` +section. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`shardingSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of a sharding name, which is defined in +`cluster.spec.shardingSpecs[*].name`, to be used as a selector for the service. +Note that this and the `componentSelector` are mutually exclusive and cannot be set simultaneously. +

+ +
+ +`componentSelector`
+ +string + + +
+ +(Optional) + +

+Extends the ServiceSpec.Selector by allowing the specification of a component, to be used as a selector for the service. +Note that this and the `shardingSelector` are mutually exclusive and cannot be set simultaneously. +

+ +
+

+ClusterSpec + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterSpec defines the desired state of Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterDefinitionRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterDefinition to use when creating a Cluster. +

+ +

+This field enables users to create a Cluster based on a specific ClusterDefinition. +Which, in conjunction with the `topology` field, determine: +

+
    +
  • +The Components to be included in the Cluster. +
  • +
  • +The sequences in which the Components are created, updated, and terminate. +
  • +
+ +

+This facilitates multiple-components management with predefined ClusterDefinition. +

+ +

+Users with advanced requirements can bypass this general setting and specify more precise control over +the composition of the Cluster by directly referencing specific ComponentDefinitions for each component +within `componentSpecs[*].componentDef`. +

+ +

+If this field is not provided, each component must be explicitly defined in `componentSpecs[*].componentDef`. +

+ +

+Note: Once set, this field cannot be modified; it is immutable. +

+ +
+ +`clusterVersionRef`
+ +string + + +
+ +(Optional) + +

+Refers to the ClusterVersion name. +

+ +

+Deprecated since v0.9, use ComponentVersion instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`topology`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ClusterTopology to be used when creating the Cluster. +

+ +

+This field defines which set of Components, as outlined in the ClusterDefinition, will be used to +construct the Cluster based on the named topology. +The ClusterDefinition may list multiple topologies under `clusterdefinition.spec.topologies[*]`, +each tailored to different use cases or environments. +

+ +

+If `topology` is not specified, the Cluster will use the default topology defined in the ClusterDefinition. +

+ +

+Note: Once set during the Cluster creation, the `topology` field cannot be modified. +It establishes the initial composition and structure of the Cluster and is intended for one-time configuration. +

+ +
+ +`terminationPolicy`
+ + +TerminationPolicyType + + + +
+ + +

+Specifies the behavior when a Cluster is deleted. +It defines how resources, data, and backups associated with a Cluster are managed during termination. +Choose a policy based on the desired level of resource cleanup and data preservation: +

+
    +
  • +`DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. +
  • +
  • +`Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), +allowing for data preservation while stopping other operations. +Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. +
  • +
  • +`Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while +removing all persistent data. +
  • +
  • +`WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and +backups in external storage. +This results in complete data removal and should be used cautiously, primarily in non-production environments +to avoid irreversible data loss. +
  • +
+ +

+Warning: Choosing an inappropriate termination policy can result in data loss. +The `WipeOut` policy is particularly risky in production environments due to its irreversible nature. +

+ +
+ +`shardingSpecs`
+ + +[]ShardingSpec + + + +
+ +(Optional) + +

+Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. +Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. +

+ +

+This field supports dynamic resharding by facilitating the addition or removal of shards +through the `shards` field in ShardingSpec. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`componentSpecs`
+ + +[]ClusterComponentSpec + + + +
+ +(Optional) + +

+Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. +This field allows for detailed configuration of each Component within the Cluster. +

+ +

+Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. +

+ +
+ +`services`
+ + +[]ClusterService + + + +
+ +(Optional) + +

+Defines a list of additional Services that are exposed by a Cluster. +This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, +alongside Services defined with ComponentService. +

+ +

+Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. +

+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Defines a set of node affinity scheduling rules for the Cluster’s Pods. +This field helps control the placement of Pods on nodes within the Cluster. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+An array that specifies tolerations attached to the Cluster’s Pods, +allowing them to be scheduled onto nodes with matching taints. +

+ +

+Deprecated since v0.10. Use the `schedulingPolicy` field instead. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Cluster. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Specifies runtimeClassName for all Pods managed by this Cluster. +

+ +
+ +`backup`
+ + +ClusterBackup + + + +
+ +(Optional) + +

+Specifies the backup configuration of the Cluster. +

+ +
+ +`tenancy`
+ + +TenancyType + + + +
+ +(Optional) + +

+Describes how Pods are distributed across node. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`availabilityPolicy`
+ + +AvailabilityPolicyType + + + +
+ +(Optional) + +

+Describes the availability policy, including zone, node, and none. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the replicas of the first componentSpec, if the replicas of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`resources`
+ + +ClusterResources + + + +
+ +(Optional) + +

+Specifies the resources of the first componentSpec, if the resources of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`storage`
+ + +ClusterStorage + + + +
+ +(Optional) + +

+Specifies the storage of the first componentSpec, if the storage of the first componentSpec is specified, +this value will be ignored. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`network`
+ + +ClusterNetwork + + + +
+ +(Optional) + +

+The configuration of network. +

+ +

+Deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+

+ClusterStatus + +

+ +

+ +(Appears on:Cluster) + +

+
+ +

+ClusterStatus defines the observed state of the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the Cluster object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ClusterPhase + + + +
+ +(Optional) + +

+The current phase of the Cluster includes: +`Creating`, `Running`, `Updating`, `Stopping`, `Stopped`, `Deleting`, `Failed`, `Abnormal`. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1alpha1.ClusterComponentStatus + + + +
+ +(Optional) + +

+Records the current status information of all Components within the Cluster. +

+ +
+ +`clusterDefGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number of the referenced ClusterDefinition. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Cluster object. +Each condition in the list provides real-time information about certain aspect of the Cluster object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Cluster. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ClusterStorage + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ClusterStorage is deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`size`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the amount of storage the Cluster needs. +For more information, refer to: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +

+ +
+

+ClusterSwitchPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+ClusterSwitchPolicy defines the switch policy for a Cluster. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +SwitchPolicyType + + + +
+ +(Optional) + +

+Type specifies the type of switch policy to be applied. +

+ +
+

+ClusterTopology + +

+ +

+ +(Appears on:ClusterDefinitionSpec) + +

+
+ +

+ClusterTopology represents the definition for a specific cluster topology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the unique identifier for the cluster topology. +Cannot be updated. +

+ +
+ +`components`
+ + +[]ClusterTopologyComponent + + + +
+ + +

+Components specifies the components in the topology. +

+ +
+ +`orders`
+ + +ClusterTopologyOrders + + + +
+ +(Optional) + +

+Specifies the sequence in which components within a cluster topology are +started, stopped, and upgraded. +This ordering is crucial for maintaining the correct dependencies and operational flow across components. +

+ +
+ +`default`
+ +bool + + +
+ +(Optional) + +

+Default indicates whether this topology serves as the default configuration. +When set to true, this topology is automatically used unless another is explicitly specified. +

+ +
+

+ClusterTopologyComponent + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyComponent defines a Component within a ClusterTopology. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the component within the cluster topology. +It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number. +

+ +

+Cannot be updated once set. +

+ +
+ +`compDef`
+ +string + + +
+ + +

+Specifies the exact name, name prefix, or regular expression pattern for matching the name of the ComponentDefinition +custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +

+The system selects the ComponentDefinition CR with the latest version that matches the pattern. +This approach allows: +

+
    +
  1. +Precise selection by providing the exact name of a ComponentDefinition CR. +
  2. +
  3. +Flexible and automatic selection of the most up-to-date ComponentDefinition CR +by specifying a name prefix or regular expression pattern. +
  4. +
+ +

+Once set, this field cannot be updated. +

+ +
+

+ClusterTopologyOrders + +

+ +

+ +(Appears on:ClusterTopology) + +

+
+ +

+ClusterTopologyOrders manages the lifecycle of components within a cluster by defining their provisioning, +terminating, and updating sequences. +It organizes components into stages or groups, where each group indicates a set of components +that can be managed concurrently. +These groups are processed sequentially, allowing precise control based on component dependencies and requirements. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provision`
+ +[]string + + +
+ +(Optional) + +

+Specifies the order for creating and initializing components. +This is designed for components that depend on one another. Components without dependencies can be grouped together. +

+ +

+Components that can be provisioned independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`terminate`
+ +[]string + + +
+ +(Optional) + +

+Outlines the order for stopping and deleting components. +This sequence is designed for components that require a graceful shutdown or have interdependencies. +

+ +

+Components that can be terminated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+ +`update`
+ +[]string + + +
+ +(Optional) + +

+Update determines the order for updating components’ specifications, such as image upgrades or resource scaling. +This sequence is designed for components that have dependencies or require specific update procedures. +

+ +

+Components that can be updated independently or have no dependencies can be listed together in the same stage, +separated by commas. +

+ +
+

+CmdExecutorConfig + +

+ +

+ +(Appears on:PostStartAction, SwitchoverAction, SystemAccountSpec) + +

+
+ +

+CmdExecutorConfig specifies how to perform creation and deletion statements. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`CommandExecutorEnvItem`
+ + +CommandExecutorEnvItem + + + +
+ + +

+ +(Members of `CommandExecutorEnvItem` are embedded into this type.) + +

+ +
+ +`CommandExecutorItem`
+ + +CommandExecutorItem + + + +
+ + +

+ +(Members of `CommandExecutorItem` are embedded into this type.) + +

+ +
+

+CommandExecutorEnvItem + +

+ +

+ +(Appears on:CmdExecutorConfig) + +

+
+ +

+CommandExecutorEnvItem is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Specifies the image used to execute the command. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+A list of environment variables that will be injected into the command execution context. +

+ +
+

+CommandExecutorItem + +

+ +

+ +(Appears on:CmdExecutorConfig) + +

+
+ +

+CommandExecutorItem is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+The command to be executed. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Additional parameters used in the execution of the command. +

+ +
+

+ComponentConfigSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec, ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentTemplateSpec`
+ + +ComponentTemplateSpec + + + +
+ + +

+ +(Members of `ComponentTemplateSpec` are embedded into this type.) + +

+ +
+ +`keys`
+ +[]string + + +
+ +(Optional) + +

+Specifies the configuration files within the ConfigMap that support dynamic updates. +

+ +

+A configuration template (provided in the form of a ConfigMap) may contain templates for multiple +configuration files. +Each configuration file corresponds to a key in the ConfigMap. +Some of these configuration files may support dynamic modification and reloading without requiring +a pod restart. +

+ +

+If empty or omitted, all configuration files in the ConfigMap are assumed to support dynamic updates, +and ConfigConstraint applies to all keys. +

+ +
+ +`legacyRenderedConfigSpec`
+ + +LegacyRenderedTemplateSpec + + + +
+ +(Optional) + +

+Specifies the secondary rendered config spec for pod-specific customization. +

+ +

+The template is rendered inside the pod (by the “config-manager” sidecar container) and merged with the main +template’s render result to generate the final configuration file. +

+ +

+This field is intended to handle scenarios where different pods within the same Component have +varying configurations. It allows for pod-specific customization of the configuration. +

+ +

+Note: This field will be deprecated in future versions, and the functionality will be moved to +`cluster.spec.componentSpecs[*].instances[*]`. +

+ +
+ +`constraintRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced configuration constraints object. +

+ +
+ +`asEnvFrom`
+ +[]string + + +
+ +(Optional) + +

+Specifies the containers to inject the ConfigMap parameters as environment variables. +

+ +

+This is useful when application images accept parameters through environment variables and +generate the final configuration file in the startup script based on these variables. +

+ +

+This field allows users to specify a list of container names, and KubeBlocks will inject the environment +variables converted from the ConfigMap into these designated containers. This provides a flexible way to +pass the configuration items from the ConfigMap to the container without modifying the image. +

+ +

+Deprecated: `asEnvFrom` has been deprecated since 0.9.0 and will be removed in 0.10.0. +Use `injectEnvTo` instead. +

+ +
+ +`injectEnvTo`
+ +[]string + + +
+ +(Optional) + +

+Specifies the containers to inject the ConfigMap parameters as environment variables. +

+ +

+This is useful when application images accept parameters through environment variables and +generate the final configuration file in the startup script based on these variables. +

+ +

+This field allows users to specify a list of container names, and KubeBlocks will inject the environment +variables converted from the ConfigMap into these designated containers. This provides a flexible way to +pass the configuration items from the ConfigMap to the container without modifying the image. +

+ +
+ +`reRenderResourceTypes`
+ + +[]RerenderResourceType + + + +
+ +(Optional) + +

+Specifies whether the configuration needs to be re-rendered after v-scale or h-scale operations to reflect changes. +

+ +

+In some scenarios, the configuration may need to be updated to reflect the changes in resource allocation +or cluster topology. Examples: +

+
    +
  • +Redis: adjust maxmemory after v-scale operation. +
  • +
  • +MySQL: increase max connections after v-scale operation. +
  • +
  • +Zookeeper: update zoo.cfg with new node addresses after h-scale operation. +
  • +
+ +
+ +`asSecret`
+ +bool + + +
+ +(Optional) + +

+Whether to store the final rendered parameters as a secret. +

+ +
+

+ComponentDefRef + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ComponentDefRef is used to select the component and its fields to be referenced. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDefName`
+ +string + + +
+ + +

+The name of the componentDef to be selected. +

+ +
+ +`failurePolicy`
+ + +FailurePolicyType + + + +
+ +(Optional) + +

+Defines the policy to be followed in case of a failure in finding the component. +

+ +
+ +`componentRefEnv`
+ + +[]ComponentRefEnv + + + +
+ +(Optional) + +

+The values that are to be injected as environment variables into each component. +

+ +
+

+ComponentDefinitionSpec + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`provider`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component provider, typically the vendor or developer name. +It identifies the entity responsible for creating and maintaining the Component. +

+ +

+When specifying the provider name, consider the following guidelines: +

+
    +
  • +Keep the name concise and relevant to the Component. +
  • +
  • +Use a consistent naming convention across Components from the same provider. +
  • +
  • +Avoid using trademarked or copyrighted names without proper permission. +
  • +
+ +
+ +`description`
+ +string + + +
+ +(Optional) + +

+Provides a brief and concise explanation of the Component’s purpose, functionality, and any relevant details. +It serves as a quick reference for users to understand the Component’s role and characteristics. +

+ +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the Component provides. +It specifies the standard or widely recognized protocol used by the Component to offer its Services. +

+ +

+The `serviceKind` field allows users to quickly identify the type of Service provided by the Component +based on common protocols or service types. This information helps in understanding the compatibility, +interoperability, and usage of the Component within a system. +

+ +

+Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store. +
  • +
+ +

+The `serviceKind` value is case-insensitive, allowing for flexibility in specifying the protocol name. +

+ +

+When specifying the `serviceKind`, consider the following guidelines: +

+
    +
  • +Use well-established and widely recognized protocol names or service types. +
  • +
  • +Ensure that the `serviceKind` accurately represents the primary service type offered by the Component. +
  • +
  • +If the Component provides multiple services, choose the most prominent or commonly used protocol. +
  • +
  • +Limit the `serviceKind` to a maximum of 32 characters for conciseness and readability. +
  • +
+ +

+Note: The `serviceKind` field is optional and can be left empty if the Component does not fit into a well-known +service category or if the protocol is not widely recognized. It is primarily used to convey information about +the Component’s service type to users and facilitate discovery and integration. +

+ +

+The `serviceKind` field is immutable and cannot be updated. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service provided by the Component. +It follows the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +

+The Semantic Versioning specification defines a version number format of X.Y.Z (MAJOR.MINOR.PATCH), where: +

+
    +
  • +X represents the major version and indicates incompatible API changes. +
  • +
  • +Y represents the minor version and indicates added functionality in a backward-compatible manner. +
  • +
  • +Z represents the patch version and indicates backward-compatible bug fixes. +
  • +
+ +

+Additional labels for pre-release and build metadata are available as extensions to the X.Y.Z format: +

+
    +
  • +Use pre-release labels (e.g., -alpha, -beta) for versions that are not yet stable or ready for production use. +
  • +
  • +Use build metadata (e.g., +build.1) for additional version information if needed. +
  • +
+ +

+Examples of valid ServiceVersion values: +

+
    +
  • +“1.0.0” +
  • +
  • +“2.3.1” +
  • +
  • +“3.0.0-alpha.1” +
  • +
  • +“4.5.2+build.1” +
  • +
+ +

+The `serviceVersion` field is immutable and cannot be updated. +

+ +
+ +`runtime`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec template used in the Component. +It includes the following elements: +

+
    +
  • +Init containers +
  • +
  • +Containers +
      +
    • +Image +
    • +
    • +Commands +
    • +
    • +Args +
    • +
    • +Envs +
    • +
    • +Mounts +
    • +
    • +Ports +
    • +
    • +Security context +
    • +
    • +Probes +
    • +
    • +Lifecycle +
    • +
    +
  • +
  • +Volumes +
  • +
+ +

+This field is intended to define static settings that remain consistent across all instantiated Components. +Dynamic settings such as CPU and memory resource limits, as well as scheduling settings (affinity, +toleration, priority), may vary among different instantiated Components. +They should be specified in the `cluster.spec.componentSpecs` (ClusterComponentSpec). +

+ +

+Specific instances of a Component may override settings defined here, such as using a different container image +or modifying environment variable values. +These instance-specific overrides can be specified in `cluster.spec.componentSpecs[*].instances`. +

+ +

+This field is immutable and cannot be updated once set. +

+ +
+ +`monitor`
+ + +MonitorConfig + + + +
+ +(Optional) + +

+Deprecated since v0.9 +monitor is monitoring config which provided by provider. +

+ +
+ +`exporter`
+ + +Exporter + + + +
+ +(Optional) + +

+Defines the built-in metrics exporter container. +

+ +
+ +`vars`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Defines variables which are determined after Cluster instantiation and reflect +dynamic or runtime attributes of instantiated Clusters. +These variables serve as placeholders for setting environment variables in Pods and Actions, +or for rendering configuration and script templates before actual values are finalized. +

+ +

+These variables are placed in front of the environment variables declared in the Pod if used as +environment variables. +

+ +

+Variable values can be sourced from: +

+
    +
  • +ConfigMap: Select and extract a value from a specific key within a ConfigMap. +
  • +
  • +Secret: Select and extract a value from a specific key within a Secret. +
  • +
  • +HostNetwork: Retrieves values (including ports) from host-network resources. +
  • +
  • +Service: Retrieves values (including address, port, NodePort) from a selected Service. +Intended to obtain the address of a ComponentService within the same Cluster. +
  • +
  • +Credential: Retrieves account name and password from a SystemAccount variable. +
  • +
  • +ServiceRef: Retrieves address, port, account name and password from a selected ServiceRefDeclaration. +Designed to obtain the address bound to a ServiceRef, such as a ClusterService or +ComponentService of another cluster or an external service. +
  • +
  • +Component: Retrieves values from a selected Component, including replicas and instance name list. +
  • +
+ +

+This field is immutable. +

+ +
+ +`volumes`
+ + +[]ComponentVolume + + + +
+ +(Optional) + +

+Defines the volumes used by the Component and some static attributes of the volumes. +After defining the volumes here, user can reference them in the +`cluster.spec.componentSpecs[*].volumeClaimTemplates` field to configure dynamic properties such as +volume capacity and storage class. +

+ +

+This field allows you to specify the following: +

+
    +
  • +Snapshot behavior: Determines whether a snapshot of the volume should be taken when performing +a snapshot backup of the Component. +
  • +
  • +Disk high watermark: Sets the high watermark for the volume’s disk usage. +When the disk usage reaches the specified threshold, it triggers an alert or action. +
  • +
+ +

+By configuring these volume behaviors, you can control how the volumes are managed and monitored within the Component. +

+ +

+This field is immutable. +

+ +
+ +`hostNetwork`
+ + +HostNetwork + + + +
+ +(Optional) + +

+Specifies the host network configuration for the Component. +

+ +

+When `hostNetwork` option is enabled, the Pods share the host’s network namespace and can directly access +the host’s network interfaces. +This means that if multiple Pods need to use the same port, they cannot run on the same host simultaneously +due to port conflicts. +

+ +

+The DNSPolicy field in the Pod spec determines how containers within the Pod perform DNS resolution. +When using hostNetwork, the operator will set the DNSPolicy to ‘ClusterFirstWithHostNet’. +With this policy, DNS queries will first go through the K8s cluster’s DNS service. +If the query fails, it will fall back to the host’s DNS settings. +

+ +

+If set, the DNS policy will be automatically set to “ClusterFirstWithHostNet”. +

+ +

+This field is immutable. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Defines additional Services to expose the Component’s endpoints. +

+ +

+A default headless Service, named `{cluster.name}-{component.name}-headless`, is automatically created +for internal Cluster communication. +

+ +

+This field enables customization of additional Services to expose the Component’s endpoints to +other Components within the same or different Clusters, and to external applications. +Each Service entry in this list can include properties such as ports, type, and selectors. +

+
    +
  • +For intra-Cluster access, Components can reference Services using variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceVarRef`. +
  • +
  • +For inter-Cluster access, reference Services use variables declared in +`componentDefinition.spec.vars[*].valueFrom.serviceRefVarRef`, +and bind Services at Cluster creation time with `clusterComponentSpec.ServiceRef[*].clusterServiceSelector`. +
  • +
+ +

+This field is immutable. +

+ +
+ +`configs`
+ + +[]ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the configuration file templates and volume mount parameters used by the Component. +It also includes descriptions of the parameters in the ConfigMaps, such as value range limitations. +

+ +

+This field specifies a list of templates that will be rendered into Component containers’ configuration files. +Each template is represented as a ConfigMap and may contain multiple configuration files, +with each file being a key in the ConfigMap. +

+ +

+The rendered configuration files will be mounted into the Component’s containers +according to the specified volume mount parameters. +

+ +

+This field is immutable. +

+ +
+ +`logConfigs`
+ + +[]LogConfig + + + +
+ +(Optional) + +

+Defines the types of logs generated by instances of the Component and their corresponding file paths. +These logs can be collected for further analysis and monitoring. +

+ +

+The `logConfigs` field is an optional list of LogConfig objects, where each object represents +a specific log type and its configuration. +It allows you to specify multiple log types and their respective file paths for the Component. +

+ +

+Examples: +

+
+
+ logConfigs:
+ - filePathPattern: /data/mysql/log/mysqld-error.log
+   name: error
+ - filePathPattern: /data/mysql/log/mysqld.log
+   name: general
+ - filePathPattern: /data/mysql/log/mysqld-slowquery.log
+   name: slow
+
+
+ +

+This field is immutable. +

+ +
+ +`scripts`
+ + +[]ComponentTemplateSpec + + + +
+ +(Optional) + +

+Specifies groups of scripts, each provided via a ConfigMap, to be mounted as volumes in the container. +These scripts can be executed during container startup or via specific actions. +

+ +

+Each script group is encapsulated in a ComponentTemplateSpec that includes: +

+
    +
  • +The ConfigMap containing the scripts. +
  • +
  • +The mount point where the scripts will be mounted inside the container. +
  • +
+ +

+This field is immutable. +

+ +
+ +`policyRules`
+ + +[]Kubernetes rbac/v1.PolicyRule + + + +
+ +(Optional) + +

+Defines the namespaced policy rules required by the Component. +

+ +

+The `policyRules` field is an array of `rbacv1.PolicyRule` objects that define the policy rules +needed by the Component to operate within a namespace. +These policy rules determine the permissions and verbs the Component is allowed to perform on +Kubernetes resources within the namespace. +

+ +

+The purpose of this field is to automatically generate the necessary RBAC roles +for the Component based on the specified policy rules. +This ensures that the Pods in the Component has appropriate permissions to function. +

+ +

+Note: This field is currently non-functional and is reserved for future implementation. +

+ +

+This field is immutable. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static labels that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If a label key in the `labels` field conflicts with any system labels or user-specified labels, +it will be silently ignored to avoid overriding higher-priority labels. +

+ +

+This field is immutable. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies static annotations that will be patched to all Kubernetes resources created for the Component. +

+ +

+Note: If an annotation key in the `annotations` field conflicts with any system annotations +or user-specified annotations, it will be silently ignored to avoid overriding higher-priority annotations. +

+ +

+This field is immutable. +

+ +
+ +`replicasLimit`
+ + +ReplicasLimit + + + +
+ +(Optional) + +

+Defines the upper limit of the number of replicas supported by the Component. +

+ +

+It defines the maximum number of replicas that can be created for the Component. +This field allows you to set a limit on the scalability of the Component, preventing it from exceeding a certain number of replicas. +

+ +

+This field is immutable. +

+ +
+ +`systemAccounts`
+ + +[]SystemAccount + + + +
+ +(Optional) + +

+An array of `SystemAccount` objects that define the system accounts needed +for the management operations of the Component. +

+ +

+Each `SystemAccount` includes: +

+
    +
  • +Account name. +
  • +
  • +The SQL statement template: Used to create the system account. +
  • +
  • +Password Source: Either generated based on certain rules or retrieved from a Secret. +
  • +
+ +

+Use cases for system accounts typically involve tasks like system initialization, backups, monitoring, +health checks, replication, and other system-level operations. +

+ +

+System accounts are distinct from user accounts, although both are database accounts. +

+
    +
  • +System Accounts: Created during Cluster setup by the KubeBlocks operator, +these accounts have higher privileges for system management and are fully managed +through a declarative API by the operator. +
  • +
  • +User Accounts: Managed by users or administrator. +User account permissions should follow the principle of least privilege, +granting only the necessary access rights to complete their required tasks. +
  • +
+ +

+This field is immutable. +

+ +
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the concurrency strategy for updating multiple instances of the Component. +Available strategies: +

+
    +
  • +`Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready +before updating the next. +
  • +
  • +`Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability +during the update. +
  • +
  • +`BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum +number of operational replicas for maintaining quorum. + For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps +at least 3 operational for quorum. +
  • +
+ +

+This field is immutable and defaults to ‘Serial’. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Enumerate all possible roles assigned to each replica of the Component, influencing its behavior. +

+ +

+A replica can have zero to multiple roles. +KubeBlocks operator determines the roles of each replica by invoking the `lifecycleActions.roleProbe` method. +This action returns a list of roles for each replica, and the returned roles must be predefined in the `roles` field. +

+ +

+The roles assigned to a replica can influence various aspects of the Component’s behavior, such as: +

+
    +
  • +Service selection: The Component’s exposed Services may target replicas based on their roles using `roleSelector`. +
  • +
  • +Update order: The roles can determine the order in which replicas are updated during a Component update. +For instance, replicas with a “follower” role can be updated first, while the replica with the “leader” +role is updated last. This helps minimize the number of leader changes during the update process. +
  • +
+ +

+This field is immutable. +

+ +
+ +`roleArbitrator`
+ + +RoleArbitrator + + + +
+ +(Optional) + +

+This field has been deprecated since v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +

+This field is immutable. +

+ +
+ +`lifecycleActions`
+ + +ComponentLifecycleActions + + + +
+ +(Optional) + +

+Defines a set of hooks and procedures that customize the behavior of a Component throughout its lifecycle. +Actions are triggered at specific lifecycle stages: +

+
    +
  • +`postProvision`: Defines the hook to be executed after the creation of a Component, +with `preCondition` specifying when the action should be fired relative to the Component’s lifecycle stages: +`Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +
  • +
  • +`preTerminate`: Defines the hook to be executed before terminating a Component. +
  • +
  • +`roleProbe`: Defines the procedure which is invoked regularly to assess the role of replicas. +
  • +
  • +`switchover`: Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +such as before planned maintenance or upgrades on the current leader node. +
  • +
  • +`memberJoin`: Defines the procedure to add a new replica to the replication group. +
  • +
  • +`memberLeave`: Defines the method to remove a replica from the replication group. +
  • +
  • +`readOnly`: Defines the procedure to switch a replica into the read-only state. +
  • +
  • +`readWrite`: transition a replica from the read-only state back to the read-write state. +
  • +
  • +`dataDump`: Defines the procedure to export the data from a replica. +
  • +
  • +`dataLoad`: Defines the procedure to import data into a replica. +
  • +
  • +`reconfigure`: Defines the procedure that update a replica with new configuration file. +
  • +
  • +`accountProvision`: Defines the procedure to generate a new database account. +
  • +
+ +

+This field is immutable. +

+ +
+ +`serviceRefDeclarations`
+ + +[]ServiceRefDeclaration + + + +
+ +(Optional) + +

+Lists external service dependencies of the Component, including services from other Clusters or outside the K8s environment. +

+ +

+This field is immutable. +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+`minReadySeconds` is the minimum duration in seconds that a new Pod should remain in the ready +state without any of its containers crashing to be considered available. +This ensures the Pod’s stability and readiness to serve requests. +

+ +

+A default value of 0 seconds means the Pod is considered available as soon as it enters the ready state. +

+ +
+

+ComponentDefinitionStatus + +

+ +

+ +(Appears on:ComponentDefinition) + +

+
+ +

+ComponentDefinitionStatus defines the observed state of ComponentDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation that has been observed for the ComponentDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current status of the ComponentDefinition. Valid values include ``,`Available`, and`Unavailable. +When the status isAvailable`, the ComponentDefinition is ready and can be utilized by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+ComponentLifecycleActions + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ComponentLifecycleActions defines a collection of Actions for customizing the behavior of a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`postProvision`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Specifies the hook to be executed after a component’s creation. +

+ +

+By setting `postProvision.customHandler.preCondition`, you can determine the specific lifecycle stage +at which the action should trigger: `Immediately`, `RuntimeReady`, `ComponentReady`, and `ClusterReady`. +with `ComponentReady` being the default. +

+ +

+The PostProvision Action is intended to run only once. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • + +

    +KB_CLUSTER_POD_IP_LIST: Comma-separated list of the cluster’s pod IP addresses (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_NAME_LIST: Comma-separated list of the cluster’s pod names (e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_NAME_LIST: Comma-separated list of host names, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_IP_LIST: Comma-separated list of host IP addresses, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_NAME_LIST: Comma-separated list of all pod names within the component +(e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_IP_LIST: Comma-separated list of pod IP addresses, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_NAME_LIST: Comma-separated list of host names for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_IP_LIST: Comma-separated list of host IP addresses for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_LIST: Comma-separated list of all cluster components (e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_DELETING_LIST: Comma-separated list of components that are currently being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_UNDELETED_LIST: Comma-separated list of components that are not being deleted +(e.g., “comp1,comp2”). +

    +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`preTerminate`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Specifies the hook to be executed prior to terminating a component. +

+ +

+The PreTerminate Action is intended to run only once. +

+ +

+This action is executed immediately when a scale-down operation for the Component is initiated. +The actual termination and cleanup of the Component and its associated resources will not proceed +until the PreTerminate action has completed successfully. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • + +

    +KB_CLUSTER_POD_IP_LIST: Comma-separated list of the cluster’s pod IP addresses (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_NAME_LIST: Comma-separated list of the cluster’s pod names (e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_NAME_LIST: Comma-separated list of host names, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_POD_HOST_IP_LIST: Comma-separated list of host IP addresses, each corresponding to a pod in +KB_CLUSTER_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_NAME_LIST: Comma-separated list of all pod names within the component +(e.g., “pod1,pod2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_IP_LIST: Comma-separated list of pod IP addresses, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “podIp1,podIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_NAME_LIST: Comma-separated list of host names for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostName1,hostName2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_POD_HOST_IP_LIST: Comma-separated list of host IP addresses for each pod, +matching the order of pods in KB_CLUSTER_COMPONENT_POD_NAME_LIST (e.g., “hostIp1,hostIp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_LIST: Comma-separated list of all cluster components (e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_DELETING_LIST: Comma-separated list of components that are currently being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_UNDELETED_LIST: Comma-separated list of components that are not being deleted +(e.g., “comp1,comp2”). +

    +
  • +
  • + +

    +KB_CLUSTER_COMPONENT_IS_SCALING_IN: Indicates whether the component is currently scaling in. +If this variable is present and set to “true”, it denotes that the component is undergoing a scale-in operation. +During scale-in, data rebalancing is necessary to maintain cluster integrity. +Contrast this with a cluster deletion scenario where data rebalancing is not required as the entire cluster +is being cleaned up. +

    +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Defines the procedure which is invoked regularly to assess the role of replicas. +

+ +

+This action is periodically triggered by Lorry at the specified interval to determine the role of each replica. +Upon successful execution, the action’s output designates the role of the replica, +which should match one of the predefined role names within `componentDefinition.spec.roles`. +The output is then compared with the previous successful execution result. +If a role change is detected, an event is generated to inform the controller, +which initiates an update of the replica’s role. +

+ +

+Defining a RoleProbe Action for a Component is required if roles are defined for the Component. +It ensures replicas are correctly labeled with their respective roles. +Without this, services that rely on roleSelectors might improperly direct traffic to wrong replicas. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the Pod whose role is being assessed. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected output of this action: +- On Success: The determined role of the replica, which must align with one of the roles specified + in the component definition. +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`switchover`
+ + +ComponentSwitchover + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of leadership from the current leader to a new replica. +This approach aims to minimize downtime and maintain availability in systems with a leader-follower topology, +during events such as planned maintenance or when performing stop, shutdown, restart, or upgrade operations +involving the current leader node. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SWITCHOVER_CANDIDATE_NAME: The name of the pod for the new leader candidate, which may not be specified (empty). +
  • +
  • +KB_SWITCHOVER_CANDIDATE_FQDN: The FQDN of the new leader candidate’s pod, which may not be specified (empty). +
  • +
  • +KB_LEADER_POD_IP: The IP address of the current leader’s pod prior to the switchover. +
  • +
  • +KB_LEADER_POD_NAME: The name of the current leader’s pod prior to the switchover. +
  • +
  • +KB_LEADER_POD_FQDN: The FQDN of the current leader’s pod prior to the switchover. +
  • +
+ +

+The environment variables with the following prefixes are deprecated and will be removed in future releases: +

+
    +
  • +KB_REPLICATION_PRIMARYPOD +
  • +
  • +KB_CONSENSUS_LEADERPOD +
  • +
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberJoin`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to add a new replica to the replication group. +

+ +

+This action is initiated after a replica pod becomes ready. +

+ +

+The role of the replica (e.g., primary, secondary) will be determined and assigned as part of the action command +implementation, or automatically by the database kernel or a sidecar utility like Patroni that implements +a consensus algorithm. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
  • +KB_PRIMARY_POD_FQDN: The FQDN of the primary Pod within the replication group. +
  • +
  • +KB_MEMBER_ADDRESSES: A comma-separated list of Pod addresses for all replicas in the group. +
  • +
  • +KB_NEW_MEMBER_POD_NAME: The pod name of the replica being added to the group. +
  • +
  • +KB_NEW_MEMBER_POD_IP: The IP address of the replica being added to the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message detailing the reason for any failure encountered +during the addition of the new member. +

+ +

+For example, to add a new OBServer to an OceanBase Cluster in ‘zone1’, the following command may be used: +

+
+
+command:
+- bash
+- -c
+- |
+   ADDRESS=$(KB_MEMBER_ADDRESSES%%,*)
+   HOST=$(echo $ADDRESS | cut -d ':' -f 1)
+   PORT=$(echo $ADDRESS | cut -d ':' -f 2)
+   CLIENT="mysql -u $KB_SERVICE_USER -p$KB_SERVICE_PASSWORD -P $PORT -h $HOST -e"
+       $CLIENT "ALTER SYSTEM ADD SERVER '$KB_NEW_MEMBER_POD_IP:$KB_SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`memberLeave`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to remove a replica from the replication group. +

+ +

+This action is initiated before remove a replica from the group. +The operator will wait for MemberLeave to complete successfully before releasing the replica and cleaning up +related Kubernetes resources. +

+ +

+The process typically includes updating configurations and informing other group members about the removal. +Data migration is generally not part of this action and should be handled separately if needed. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
  • +KB_PRIMARY_POD_FQDN: The FQDN of the primary Pod within the replication group. +
  • +
  • +KB_MEMBER_ADDRESSES: A comma-separated list of Pod addresses for all replicas in the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_NAME: The pod name of the replica being removed from the group. +
  • +
  • +KB_LEAVE_MEMBER_POD_IP: The IP address of the replica being removed from the group. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+For example, to remove an OBServer from an OceanBase Cluster in ‘zone1’, the following command can be executed: +

+
+
+command:
+- bash
+- -c
+- |
+   ADDRESS=$(KB_MEMBER_ADDRESSES%%,*)
+   HOST=$(echo $ADDRESS | cut -d ':' -f 1)
+   PORT=$(echo $ADDRESS | cut -d ':' -f 2)
+   CLIENT="mysql -u $KB_SERVICE_USER  -p$KB_SERVICE_PASSWORD -P $PORT -h $HOST -e"
+       $CLIENT "ALTER SYSTEM DELETE SERVER '$KB_LEAVE_MEMBER_POD_IP:$KB_SERVICE_PORT' ZONE 'zone1'"
+
+
+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readonly`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to switch a replica into the read-only state. +

+ +

+Use Case: +This action is invoked when the database’s volume capacity nears its upper limit and space is about to be exhausted. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`readwrite`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to transition a replica from the read-only state back to the read-write state. +

+ +

+Use Case: +This action is used to bring back a replica that was previously in a read-only state, +which restricted write operations, to its normal operational state where it can handle +both read and write operations. +

+ +

+The container executing this action has access to following environment variables: +

+
    +
  • +KB_POD_FQDN: The FQDN of the replica pod whose role is being checked. +
  • +
  • +KB_SERVICE_PORT: The port used by the database service. +
  • +
  • +KB_SERVICE_USER: The username with the necessary permissions to interact with the database service. +
  • +
  • +KB_SERVICE_PASSWORD: The corresponding password for KB_SERVICE_USER to authenticate with the database service. +
  • +
+ +

+Expected action output: +- On Failure: An error message, if applicable, indicating why the action failed. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataDump`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure for exporting the data from a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Applicability: +Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+The output should be a valid data dump streamed to stdout. It must exclude any irrelevant information to ensure +that only the necessary data is exported for import into the new replica. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`dataLoad`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure for importing data into a replica. +

+ +

+Use Case: +This action is intended for initializing a newly created replica with data. It involves exporting data +from an existing replica and importing it into the new, empty replica. This is essential for synchronizing +the state of replicas across the system. +

+ +

+Some database engines or associated sidecar applications (e.g., Patroni) may already provide this functionality. +In such cases, this action may not be required. +

+ +

+Data should be received through stdin. If any error occurs during the process, +the action must be able to guarantee idempotence to allow for retries from the beginning. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+ +`reconfigure`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure that update a replica with new configuration. +

+ +

+Note: This field is immutable once it has been set. +

+ +

+This Action is reserved for future versions. +

+ +
+ +`accountProvision`
+ + +LifecycleActionHandler + + + +
+ +(Optional) + +

+Defines the procedure to generate a new database account. +

+ +

+Use Case: +This action is designed to create system accounts that are utilized for replication, monitoring, backup, +and other administrative tasks. +

+ +

+Note: This field is immutable once it has been set. +

+ +
+

+ComponentMessageMap +(`map[string]string` alias) +

+ +

+ +(Appears on:ClusterComponentStatus, ComponentStatus) + +

+
+
+

+ComponentRefEnv + +

+ +

+ +(Appears on:ComponentDefRef) + +

+
+ +

+ComponentRefEnv specifies name and value of an env. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the env, it must be a C identifier. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+The value of the env. +

+ +
+ +`valueFrom`
+ + +ComponentValueFrom + + + +
+ +(Optional) + +

+The source from which the value of the env. +

+ +
+

+ComponentService + +

+ +

+ +(Appears on:ComponentDefinitionSpec, ComponentSpec) + +

+
+ +

+ComponentService defines a service that would be exposed as an inter-component service within a Cluster. +A Service defined in the ComponentService is expected to be accessed by other Components within the same Cluster. +

+ +

+When a Component needs to use a ComponentService provided by another Component within the same Cluster, +it can declare a variable in the `componentDefinition.spec.vars` section and bind it to the specific exposed address +of the ComponentService using the `serviceVarRef` field. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Service`
+ + +Service + + + +
+ + +

+ +(Members of `Service` are embedded into this type.) + +

+ +
+ +`podService`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to create a corresponding Service for each Pod of the selected Component. +When set to true, a set of Services will be automatically generated for each Pod, +and the `roleSelector` field will be ignored. +

+ +

+The names of the generated Services will follow the same suffix naming pattern: `$(serviceName)-$(podOrdinal)`. +The total number of generated Services will be equal to the number of replicas specified for the Component. +

+ +

+Example usage: +

+
+
+name: my-service
+serviceName: my-service
+podService: true
+disableAutoProvision: true
+spec:
+  type: NodePort
+  ports:
+  - name: http
+    port: 80
+    targetPort: 8080
+
+
+ +

+In this example, if the Component has 3 replicas, three Services will be generated: +- my-service-0: Points to the first Pod (podOrdinal: 0) +- my-service-1: Points to the second Pod (podOrdinal: 1) +- my-service-2: Points to the third Pod (podOrdinal: 2) +

+ +

+Each generated Service will have the specified spec configuration and will target its respective Pod. +

+ +

+This feature is useful when you need to expose each Pod of a Component individually, allowing external access +to specific instances of the Component. +

+ +
+ +`disableAutoProvision`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the automatic provisioning of the service should be disabled. +

+ +

+If set to true, the service will not be automatically created at the component provisioning. +Instead, you can enable the creation of this service by specifying it explicitly in the cluster API. +

+ +
+

+ComponentSpec + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentSpec defines the desired state of Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDef`
+ +string + + +
+ + +

+Specifies the name of the referenced ComponentDefinition. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +

+ +
+ +`serviceRefs`
+ + +[]ServiceRef + + + +
+ +(Optional) + +

+Defines a list of ServiceRef for a Component, enabling access to both external services and +Services provided by other Clusters. +

+ +

+Types of services: +

+
    +
  • +External services: Not managed by KubeBlocks or managed by a different KubeBlocks operator; +Require a ServiceDescriptor for connection details. +
  • +
  • +Services provided by a Cluster: Managed by the same KubeBlocks operator; +identified using Cluster, Component and Service names. +
  • +
+ +

+ServiceRefs with identical `serviceRef.name` in the same Cluster are considered the same. +

+ +

+Example: +

+
+
+serviceRefs:
+  - name: "redis-sentinel"
+    serviceDescriptor:
+      name: "external-redis-sentinel"
+  - name: "postgres-cluster"
+    clusterServiceSelector:
+      cluster: "my-postgres-cluster"
+      service:
+        component: "postgresql"
+
+
+ +

+The example above includes ServiceRefs to an external Redis Sentinel service and a PostgreSQL Cluster. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to add. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resources required by the Component. +It allows defining the CPU, memory requirements and limits for the Component’s containers. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for the Component. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+List of volumes to override. +

+ +
+ +`services`
+ + +[]ComponentService + + + +
+ +(Optional) + +

+Overrides Services defined in referenced ComponentDefinition and exposes endpoints that can be accessed +by clients. +

+ +
+ +`systemAccounts`
+ + +[]ComponentSystemAccount + + + +
+ +(Optional) + +

+Overrides system accounts defined in referenced ComponentDefinition. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the desired number of replicas in the Component for enhancing availability and durability, or load balancing. +

+ +
+ +`configs`
+ + +[]ClusterComponentConfig + + + +
+ +(Optional) + +

+Specifies the configuration content of a config template. +

+ +
+ +`enabledLogs`
+ +[]string + + +
+ +(Optional) + +

+Specifies which types of logs should be collected for the Cluster. +The log types are defined in the `componentDefinition.spec.logConfigs` field with the LogConfig entries. +

+ +

+The elements in the `enabledLogs` array correspond to the names of the LogConfig entries. +For example, if the `componentDefinition.spec.logConfigs` defines LogConfig entries with +names “slow_query_log” and “error_log”, +you can enable the collection of these logs by including their names in the `enabledLogs` array: +

+
+
+enabledLogs:
+- slow_query_log
+- error_log
+
+
+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceAccount required by the running Component. +This ServiceAccount is used to grant necessary permissions for the Component’s Pods to interact +with other Kubernetes resources, such as modifying Pod labels or sending events. +

+ +

+Defaults: +If not specified, KubeBlocks automatically assigns a default ServiceAccount named “kb-{cluster.name}”, +bound to a default role defined during KubeBlocks installation. +

+ +

+Future Changes: +Future versions might change the default ServiceAccount creation strategy to one per Component, +potentially revising the naming to “kb-{cluster.name}-{component.name}”. +

+ +

+Users can override the automatic ServiceAccount assignment by explicitly setting the name of +an existed ServiceAccount in this field. +

+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Indicates the InstanceUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`affinity`
+ + +Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules for the Component. +It allows users to control how the Component’s Pods are scheduled onto nodes in the Cluster. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +

+Deprecated since v0.10, replaced by the `schedulingPolicy` field. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`tlsConfig`
+ + +TLSConfig + + + +
+ +(Optional) + +

+Specifies the TLS configuration for the Component, including: +

+
    +
  • +A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) for secure communication. +
  • +
  • +An optional field that specifies the configuration for the TLS certificates issuer when TLS is enabled. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +
  • +
+ +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Allows for the customization of configuration values for each instance within a Component. +An Instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps). +While instances typically share a common configuration as defined in the ClusterComponentSpec, +they can require unique settings in various scenarios: +

+ +

+For example: +- A database Component might require different resource allocations for primary and secondary instances, + with primaries needing more resources. +- During a rolling upgrade, a Component may first update the image for one or a few instances, +and then update the remaining instances after verifying that the updated instances are functioning correctly. +

+ +

+InstanceTemplate allows for specifying these unique configurations per instance. +Each instance’s name is constructed using the pattern: $(component.name)-$(template.name)-$(ordinal), +starting with an ordinal of 0. +It is crucial to maintain unique names for each InstanceTemplate to avoid conflicts. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the Component. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated Pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the Cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`runtimeClassName`
+ +string + + +
+ +(Optional) + +

+Defines runtimeClassName for all Pods managed by this Component. +

+ +
+ +`disableExporter`
+ +bool + + +
+ +(Optional) + +

+Determines whether metrics exporter information is annotated on the Component’s headless Service. +

+ +

+If set to true, the following annotations will not be patched into the Service: +

+
    +
  • +“monitor.kubeblocks.io/path” +
  • +
  • +“monitor.kubeblocks.io/port” +
  • +
  • +“monitor.kubeblocks.io/scheme” +
  • +
+ +

+These annotations allow the Prometheus installed by KubeBlocks to discover and scrape metrics from the exporter. +

+ +
+ +`stop`
+ +bool + + +
+ +(Optional) + +

+Stop the Component. +If set, all the computing resources will be released. +

+ +
+

+ComponentStatus + +

+ +

+ +(Appears on:Component) + +

+
+ +

+ComponentStatus represents the observed state of a Component within the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Specifies the most recent generation observed for this Component object. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the Component object. +Each condition in the list provides real-time information about certain aspect of the Component object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the Component. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+ +`phase`
+ + +ClusterComponentPhase + + + +
+ + +

+Indicates the current phase of the Component, with each phase indicating specific conditions: +

+
    +
  • +Creating: The initial phase for new Components, transitioning from ‘empty’(“”). +
  • +
  • +Running: All Pods in a Running state. +
  • +
  • +Updating: The Component is currently being updated, with no failed Pods present. +
  • +
  • +Abnormal: Some Pods have failed, indicating a potentially unstable state. +However, the cluster remains available as long as a quorum of members is functioning. +
  • +
  • +Failed: A significant number of Pods or critical Pods have failed +The cluster may be non-functional or may offer only limited services (e.g, read-only). +
  • +
  • +Stopping: All Pods are being terminated, with current replica count at zero. +
  • +
  • +Stopped: All associated Pods have been successfully deleted. +
  • +
  • +Deleting: The Component is being deleted. +
  • +
+ +
+ +`message`
+ + +ComponentMessageMap + + + +
+ +(Optional) + +

+A map that stores detailed message about the Component. +Each entry in the map provides insights into specific elements of the Component, such as Pods or workloads. +

+ +

+Keys in this map are formatted as `ObjectKind/Name`, where `ObjectKind` could be a type like Pod, +and `Name` is the specific name of the object. +

+ +
+

+ComponentSwitchover + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withCandidate`
+ + +Action + + + +
+ +(Optional) + +

+Represents the switchover process for a specified candidate primary or leader instance. +Note that only Action.Exec is currently supported, while Action.HTTP is not. +

+ +
+ +`withoutCandidate`
+ + +Action + + + +
+ +(Optional) + +

+Represents a switchover process that does not involve a specific candidate primary or leader instance. +As with the previous field, only Action.Exec is currently supported, not Action.HTTP. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to define the selectors for the scriptSpecs that need to be referenced. +If this field is set, the scripts defined under the ‘scripts’ field can be invoked or referenced within an Action. +

+ +

+This field is deprecated from v0.9. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+

+ComponentSystemAccount + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the system account. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+This field is immutable once set. +

+ +
+

+ComponentTemplateSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentConfigSpec, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. +

+ +
+ +`templateRef`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+Refers to the volume name of PodTemplate. The configuration file produced through the configuration +template will be mounted to the corresponding volume. Must be a DNS_LABEL name. +The volume name must be defined in podSpec.containers[*].volumeMounts. +

+ +
+ +`defaultMode`
+ +int32 + + +
+ +(Optional) + +

+The operator attempts to set default file permissions for scripts (0555) and configurations (0444). +However, certain database engines may require different file permissions. +You can specify the desired file permissions here. +

+ +

+Must be specified as an octal value between 0000 and 0777 (inclusive), +or as a decimal value between 0 and 511 (inclusive). +YAML supports both octal and decimal values for file permissions. +

+ +

+Please note that this setting only affects the permissions of the files themselves. +Directories within the specified path are not impacted by this setting. +It’s important to be aware that this setting might conflict with other options +that influence the file mode, such as fsGroup. +In such cases, the resulting file mode may have additional bits set. +Refers to documents of k8s.ConfigMapVolumeSource.defaultMode for more information. +

+ +
+

+ComponentValueFrom + +

+ +

+ +(Appears on:ComponentRefEnv) + +

+
+ +

+ComponentValueFrom is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +ComponentValueFromType + + + +
+ + +

+Specifies the source to select. It can be one of three types: `FieldRef`, `ServiceRef`, `HeadlessServiceRef`. +

+ +
+ +`fieldPath`
+ +string + + +
+ +(Optional) + +

+The jsonpath of the source to select when the Type is `FieldRef`. +Two objects are registered in the jsonpath: `componentDef` and `components`: +

+
    +
  • +`componentDef` is the component definition object specified in `componentRef.componentDefName`. +
  • +
  • +`components` are the component list objects referring to the component definition object. +
  • +
+ +
+ +`format`
+ +string + + +
+ +(Optional) + +

+Defines the format of each headless service address. +Three builtin variables can be used as placeholders: `$POD_ORDINAL`, `$POD_FQDN`, `$POD_NAME` +

+
    +
  • +`$POD_ORDINAL` represents the ordinal of the pod. +
  • +
  • +`$POD_FQDN` represents the fully qualified domain name of the pod. +
  • +
  • +`$POD_NAME` represents the name of the pod. +
  • +
+ +
+ +`joinWith`
+ +string + + +
+ +(Optional) + +

+The string used to join the values of headless service addresses. +

+ +
+

+ComponentValueFromType +(`string` alias) +

+ +

+ +(Appears on:ComponentValueFrom) + +

+
+ +

+ComponentValueFromType specifies the type of component value from which the data is derived. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"FieldRef" +

+
+ +

+FromFieldRef refers to the value of a specific field in the object. +

+ +
+ +

+"HeadlessServiceRef" +

+
+ +

+FromHeadlessServiceRef refers to a headless service within the same namespace as the object. +

+ +
+ +

+"ServiceRef" +

+
+ +

+FromServiceRef refers to a service within the same namespace as the object. +

+ +
+

+ComponentVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ComponentVarSelector selects a var from a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Component to select from. +

+ +
+ +`ComponentVars`
+ + +ComponentVars + + + +
+ + +

+ +(Members of `ComponentVars` are embedded into this type.) + +

+ +
+

+ComponentVars + +

+ +

+ +(Appears on:ComponentVarSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the name of the Component object. +

+ +
+ +`replicas`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the replicas of the component. +

+ +
+ +`instanceNames`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod name list of the component. +and the value will be presented in the following format: name1,name2,… +

+ +
+ +`podFQDNs`
+ + +VarOption + + + +
+ +(Optional) + +

+Reference to the pod FQDN list of the component. +The value will be presented in the following format: FQDN1,FQDN2,… +

+ +
+

+ComponentVersionCompatibilityRule + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionCompatibilityRule defines the compatibility between a set of component definitions and a set of releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this ComponentVersion. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`releases`
+ +[]string + + +
+ + +

+Releases is a list of identifiers for the releases. +

+ +
+

+ComponentVersionRelease + +

+ +

+ +(Appears on:ComponentVersionSpec) + +

+
+ +

+ComponentVersionRelease represents a release of component instances within a ComponentVersion. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is a unique identifier for this release. +Cannot be updated. +

+ +
+ +`changes`
+ +string + + +
+ +(Optional) + +

+Changes provides information about the changes made in this release. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+ServiceVersion defines the version of the well-known service that the component provides. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If the release is used, it will serve as the service version for component instances, overriding the one defined in the component definition. +Cannot be updated. +

+ +
+ +`images`
+ +map[string]string + + +
+ + +

+Images define the new images for different containers within the release. +

+ +
+

+ComponentVersionSpec + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionSpec defines the desired state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`compatibilityRules`
+ + +[]ComponentVersionCompatibilityRule + + + +
+ + +

+CompatibilityRules defines compatibility rules between sets of component definitions and releases. +

+ +
+ +`releases`
+ + +[]ComponentVersionRelease + + + +
+ + +

+Releases represents different releases of component instances within this ComponentVersion. +

+ +
+

+ComponentVersionStatus + +

+ +

+ +(Appears on:ComponentVersion) + +

+
+ +

+ComponentVersionStatus defines the observed state of ComponentVersion +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this ComponentVersion. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase valid values are ``,`Available`, 'Unavailable`. +Available is ComponentVersion become available, and can be used for co-related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Extra message for current phase. +

+ +
+ +`serviceVersions`
+ +string + + +
+ +(Optional) + +

+ServiceVersions represent the supported service versions of this ComponentVersion. +

+ +
+

+ComponentVolume + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the volume. +It must be a DNS_LABEL and unique within the pod. +More info can be found at: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +Note: This field cannot be updated. +

+ +
+ +`needSnapshot`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the creation of a snapshot of this volume is necessary when performing a backup of the Component. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Sets the critical threshold for volume space utilization as a percentage (0-100). +

+ +

+Exceeding this percentage triggers the system to switch the volume to read-only mode as specified in +`componentDefinition.spec.lifecycleActions.readOnly`. +This precaution helps prevent space depletion while maintaining read-only access. +If the space utilization later falls below this threshold, the system reverts the volume to read-write mode +as defined in `componentDefinition.spec.lifecycleActions.readWrite`, restoring full functionality. +

+ +

+Note: This field cannot be updated. +

+ +
+

+ConfigConstraintSpec + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintSpec defines the desired state of ConfigConstraint +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`reloadOptions`
+ + +ReloadOptions + + + +
+ +(Optional) + +

+Specifies the dynamic reload action supported by the engine. +When set, the controller executes the method defined here to execute hot parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `reloadStaticParamsBeforeRestart` is set to true, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadOptions` is set. +
  4. +
+ +

+If `reloadOptions` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+reloadOptions:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`dynamicActionCanBeMerged`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadOptions` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “true” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`toolsImageSpec`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`downwardAPIOptions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`scriptConfigs`
+ + +[]ScriptConfig + + + +
+ +(Optional) + +

+A list of ScriptConfig Object. +

+ +

+Each ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload +and DownwardAction to perform specific tasks or configurations. +

+ +
+ +`cfgSchemaTopLevelName`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configurationSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configurationSchema.cue’. +

+ +
+ +`configurationSchema`
+ + +CustomParametersValidation + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `selector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `selector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+ +`formatterConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+formatterConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+

+ConfigConstraintStatus + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintStatus represents the observed state of a ConfigConstraint. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ConfigConstraintPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to CCAvailablePhase, the ConfigConstraint can be referenced by ClusterDefinition. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides descriptions for abnormal states. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation observed for this ConfigConstraint. This value is updated by the API Server. +

+ +
+

+ConfigMapRef + +

+ +

+ +(Appears on:UserResourceRefs) + +

+
+ +

+ConfigMapRef defines a reference to a ConfigMap. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ResourceMeta`
+ + +ResourceMeta + + + +
+ + +

+ +(Members of `ResourceMeta` are embedded into this type.) + +

+ +
+ +`configMap`
+ + +Kubernetes core/v1.ConfigMapVolumeSource + + + +
+ + +

+ConfigMap specifies the ConfigMap to be mounted as a volume. +

+ +
+

+ConfigParams + +

+ +

+ +(Appears on:ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`content`
+ +string + + +
+ +(Optional) + +

+Holds the configuration keys and values. This field is a workaround for issues found in kubebuilder and code-generator. +Refer to https://github.com/kubernetes-sigs/kubebuilder/issues/528 and https://github.com/kubernetes/code-generator/issues/50 for more details. +

+ +

+Represents the content of the configuration file. +

+ +
+ +`parameters`
+ +map[string]*string + + +
+ +(Optional) + +

+Represents the updated parameters for a single configuration file. +

+ +
+

+ConfigTemplateExtension + +

+ +

+ +(Appears on:ConfigurationItemDetail, LegacyRenderedTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templateRef`
+ +string + + +
+ + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`policy`
+ + +MergedPolicy + + + +
+ +(Optional) + +

+Defines the strategy for merging externally imported templates into component templates. +

+ +
+

+ConfigurationItemDetail + +

+ +

+ +(Appears on:ConfigurationSpec) + +

+
+ +

+ConfigurationItemDetail corresponds to settings of a configuration template (a ConfigMap). +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the configuration template. +

+ +

+It must be a string of maximum 63 characters, and can only include lowercase alphanumeric characters, +hyphens, and periods. +The name must start and end with an alphanumeric character. +

+ +
+ +`version`
+ +string + + +
+ +(Optional) + +

+Deprecated: No longer used. Please use ‘Payload’ instead. Previously represented the version of the configuration template. +

+ +
+ +`payload`
+ + +Payload + + + +
+ +(Optional) + +

+External controllers can trigger a configuration rerender by modifying this field. +

+ +

+Note: Currently, the `payload` field is opaque and its content is not interpreted by the system. +Modifying this field will cause a rerender, regardless of the specific content of this field. +

+ +
+ +`configSpec`
+ + +ComponentConfigSpec + + + +
+ +(Optional) + +

+Specifies the name of the configuration template (a ConfigMap), ConfigConstraint, and other miscellaneous options. +

+ +

+The configuration template is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+ConfigConstraint allows defining constraints and validation rules for configuration parameters. +It ensures that the configuration adheres to certain requirements and limitations. +

+ +
+ +`importTemplateRef`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+ +`configFileParams`
+ + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1alpha1.ConfigParams + + + +
+ +(Optional) + +

+Specifies the user-defined configuration parameters. +

+ +

+When provided, the parameter values in `configFileParams` override the default configuration parameters. +This allows users to override the default configuration according to their specific needs. +

+ +
+

+ConfigurationItemDetailStatus + +

+ +

+ +(Appears on:ConfigurationStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. It is a required field and must be a string of maximum 63 characters. +The name should only contain lowercase alphanumeric characters, hyphens, or periods. It should start and end with an alphanumeric character. +

+ +
+ +`phase`
+ + +ConfigurationPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`lastDoneRevision`
+ +string + + +
+ +(Optional) + +

+Represents the last completed revision of the configuration item. This field is optional. +

+ +
+ +`updateRevision`
+ +string + + +
+ +(Optional) + +

+Represents the updated revision of the configuration item. This field is optional. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. This field is optional. +

+ +
+ +`reconcileDetail`
+ + +ReconcileDetail + + + +
+ +(Optional) + +

+Provides detailed information about the execution of the configuration change. This field is optional. +

+ +
+

+ConfigurationPhase +(`string` alias) +

+ +

+ +(Appears on:ConfigurationItemDetailStatus) + +

+
+ +

+ConfigurationPhase defines the Configuration FSM phase +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"FailedAndPause" +

+
+ +
+ +

+"FailedAndRetry" +

+
+ +
+ +

+"Finished" +

+
+ +
+ +

+"Init" +

+
+ +
+ +

+"MergeFailed" +

+
+ +
+ +

+"Merged" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Upgrading" +

+
+ +
+

+ConfigurationSpec + +

+ +

+ +(Appears on:Configuration) + +

+
+ +

+ConfigurationSpec defines the desired state of a Configuration resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterRef`
+ +string + + +
+ + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigurationItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigurationItemDetail objects. +

+ +

+Each ConfigurationItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigurationItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+

+ConfigurationStatus + +

+ +

+ +(Appears on:Configuration) + +

+
+ +

+ConfigurationStatus represents the observed state of a Configuration resource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides detailed status information for opsRequest. +

+ +
+ +`configurationStatus`
+ + +[]ConfigurationItemDetailStatus + + + +
+ + +

+Provides the status of each component undergoing reconfiguration. +

+ +
+

+ConnectionCredentialAuth + +

+ +

+ +(Appears on:ServiceDescriptorSpec) + +

+
+ +

+ConnectionCredentialAuth specifies the authentication credentials required for accessing an external service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the username for the external service. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the password for the external service. +

+ +
+

+ConsensusMember + +

+ +

+ +(Appears on:ConsensusSetSpec) + +

+
+ +

+ConsensusMember is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the consensus member. +

+ +
+ +`accessMode`
+ + +AccessMode + + + +
+ + +

+Specifies the services that this member is capable of providing. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Indicates the number of Pods that perform this role. +The default is 1 for `Leader`, 0 for `Learner`, others for `Followers`. +

+ +
+

+ConsensusSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ConsensusSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`StatefulSetSpec`
+ + +StatefulSetSpec + + + +
+ + +

+ +(Members of `StatefulSetSpec` are embedded into this type.) + +

+ +
+ +`leader`
+ + +ConsensusMember + + + +
+ + +

+Represents a single leader in the consensus set. +

+ +
+ +`followers`
+ + +[]ConsensusMember + + + +
+ +(Optional) + +

+Members of the consensus set that have voting rights but are not the leader. +

+ +
+ +`learner`
+ + +ConsensusMember + + + +
+ +(Optional) + +

+Represents a member of the consensus set that does not have voting rights. +

+ +
+

+ContainerVars + +

+ +

+ +(Appears on:HostNetworkVars) + +

+
+ +

+ContainerVars defines the vars that can be referenced from a Container. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the container. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Container port to reference. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:ConnectionCredentialAuth, ServiceDescriptorSpec) + +

+
+ +

+CredentialVar represents a variable that retrieves its value either directly from a specified expression +or from a source defined in `valueFrom`. +Only one of these options may be used at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Holds a direct string or an expression that can be evaluated to a string. +

+ +

+It can include variables denoted by $(VAR_NAME). +These variables are expanded to the value of the environment variables defined in the container. +If a variable cannot be resolved, it remains unchanged in the output. +

+ +

+To escape variable expansion and retain the literal value, use double $ characters. +

+ +

+For example: +

+
    +
  • +”$(VAR_NAME)” will be expanded to the value of the environment variable VAR_NAME. +
  • +
  • +”$$(VAR_NAME)” will result in “$(VAR_NAME)” in the output, without any variable expansion. +
  • +
+ +

+Default value is an empty string. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Specifies the source for the variable’s value. +

+ +
+

+CredentialVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+CredentialVarSelector selects a var from a Credential (SystemAccount). +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Credential (SystemAccount) to select from. +

+ +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+CredentialVars + +

+ +

+ +(Appears on:CredentialVarSelector, ServiceRefVars) + +

+
+ +

+CredentialVars defines the vars that can be referenced from a Credential (SystemAccount). +!!!!! CredentialVars will only be used as environment variables for Pods & Actions, and will not be used to render the templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`password`
+ + +VarOption + + + +
+ +(Optional) + +
+

+CustomLabelSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+CustomLabelSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ +string + + +
+ + +

+The key of the label. +

+ +
+ +`value`
+ +string + + +
+ + +

+The value of the label. +

+ +
+ +`resources`
+ + +[]GVKResource + + + +
+ + +

+The resources that will be patched with the label. +

+ +
+

+CustomParametersValidation + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+CustomParametersValidation Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+EnvVar + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+EnvVar represents a variable present in the env of Pod/Action or the template of config/script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the variable. Must be a C_IDENTIFIER. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Variable references `$(VAR_NAME)` are expanded using the previously defined variables in the current context. +

+ +

+If a variable cannot be resolved, the reference in the input string will be unchanged. +Double `$$` are reduced to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. +

+
    +
  • +`$$(VAR_NAME)` will produce the string literal `$(VAR_NAME)`. +
  • +
+ +

+Escaped references will never be expanded, regardless of whether the variable exists or not. +Defaults to “”. +

+ +
+ +`valueFrom`
+ + +VarSource + + + +
+ +(Optional) + +

+Source for the variable’s value. Cannot be used if value is not empty. +

+ +
+ +`expression`
+ +string + + +
+ +(Optional) + +

+A Go template expression that will be applied to the resolved value of the var. +

+ +

+The expression will only be evaluated if the var is successfully resolved to a non-credential value. +

+ +

+The resolved value can be accessed by its name within the expression, system vars and other user-defined +non-credential vars can be used within the expression in the same way. +Notice that, when accessing vars by its name, you should replace all the “-” in the name with “_”, because of +that “-” is not a valid identifier in Go. +

+ +

+All expressions are evaluated in the order the vars are defined. If a var depends on any vars that also +have expressions defined, be careful about the evaluation order as it may use intermediate values. +

+ +

+The result of evaluation will be used as the final value of the var. If the expression fails to evaluate, +the resolving of var will also be considered failed. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+ExecAction describes an Action that executes a command inside a container. +Which may run as a K8s job or be executed inside the Lorry sidecar container, depending on the implementation. +Future implementations will standardize execution within Lorry. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed inside the container. +The working directory for this command is the container’s root directory(‘/’). +Commands are executed directly without a shell environment, meaning shell-specific syntax (‘|’, etc.) is not supported. +If the shell is required, it must be explicitly invoked in the command. +

+ +

+A successful execution is indicated by an exit status of 0; any non-zero status signifies a failure. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Args represents the arguments that are passed to the `command` for execution. +

+ +
+

+Exporter + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the built-in metrics exporter container. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+Specifies the http/https url path to scrape for metrics. +If empty, Prometheus uses the default value (e.g. `/metrics`). +

+ +
+ +`scrapePort`
+ +string + + +
+ +(Optional) + +

+Specifies the port name to scrape for metrics. +

+ +
+ +`scrapeScheme`
+ + +PrometheusScheme + + + +
+ +(Optional) + +

+Specifies the schema to use for scraping. +`http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. +If empty, Prometheus uses the default value `http`. +

+ +
+

+ExporterConfig + +

+ +

+ +(Appears on:MonitorConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scrapePort`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ + +

+scrapePort is exporter port for Time Series Database to scrape metrics. +

+ +
+ +`scrapePath`
+ +string + + +
+ +(Optional) + +

+scrapePath is exporter url path for Time Series Database to scrape metrics. +

+ +
+

+FailurePolicyType +(`string` alias) +

+ +

+ +(Appears on:ComponentDefRef) + +

+
+ +

+FailurePolicyType specifies the type of failure policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Fail" +

+
+ +

+FailurePolicyFail means that an error will be reported. +

+ +
+ +

+"Ignore" +

+
+ +

+FailurePolicyIgnore means that an error will be ignored but logged. +

+ +
+

+GVKResource + +

+ +

+ +(Appears on:CustomLabelSpec) + +

+
+ +

+GVKResource is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`gvk`
+ +string + + +
+ + +

+Represents the GVK of a resource, such as “v1/Pod”, “apps/v1/StatefulSet”, etc. +When a resource matching this is found by the selector, a custom label will be added if it doesn’t already exist, +or updated if it does. +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+A label query used to filter a set of resources. +

+ +
+

+HScaleDataClonePolicyType +(`string` alias) +

+ +

+ +(Appears on:HorizontalScalePolicy) + +

+
+ +

+HScaleDataClonePolicyType defines the data clone policy to be used during horizontal scaling. +This policy determines how data is handled when new nodes are added to the cluster. +The policy can be set to `None`, `CloneVolume`, or `Snapshot`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"CloneVolume" +

+
+ +

+HScaleDataClonePolicyCloneVolume indicates that data will be cloned from existing volumes during horizontal scaling. +

+ +
+ +

+"Snapshot" +

+
+ +

+HScaleDataClonePolicyFromSnapshot indicates that data will be cloned from a snapshot during horizontal scaling. +

+ +
+ +

+"None" +

+
+ +

+HScaleDataClonePolicyNone indicates that no data cloning will occur during horizontal scaling. +

+ +
+

+HTTPAction + +

+ +

+ +(Appears on:Action) + +

+
+ +

+HTTPAction describes an Action that triggers HTTP requests. +HTTPAction is to be implemented in future version. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`path`
+ +string + + +
+ +(Optional) + +

+Specifies the endpoint to be requested on the HTTP server. +

+ +
+ +`port`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ + +

+Specifies the target port for the HTTP request. +It can be specified either as a numeric value in the range of 1 to 65535, +or as a named port that meets the IANA_SVC_NAME specification. +

+ +
+ +`host`
+ +string + + +
+ +(Optional) + +

+Indicates the server’s domain name or IP address. Defaults to the Pod’s IP. +Prefer setting the “Host” header in httpHeaders when needed. +

+ +
+ +`scheme`
+ + +Kubernetes core/v1.URIScheme + + + +
+ +(Optional) + +

+Designates the protocol used to make the request, such as HTTP or HTTPS. +If not specified, HTTP is used by default. +

+ +
+ +`method`
+ +string + + +
+ +(Optional) + +

+Represents the type of HTTP request to be made, such as “GET,” “POST,” “PUT,” etc. +If not specified, “GET” is the default method. +

+ +
+ +`httpHeaders`
+ + +[]Kubernetes core/v1.HTTPHeader + + + +
+ +(Optional) + +

+Allows for the inclusion of custom headers in the request. +HTTP permits the use of repeated headers. +

+ +
+

+HorizontalScalePolicy + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+HorizontalScalePolicy is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +HScaleDataClonePolicyType + + + +
+ +(Optional) + +

+Determines the data synchronization method when a component scales out. +The policy can be one of the following: {None, CloneVolume}. The default policy is `None`. +

+
    +
  • +`None`: This is the default policy. It creates an empty volume without data cloning. +
  • +
  • +`CloneVolume`: This policy clones data to newly scaled pods. It first tries to use a volume snapshot. +If volume snapshot is not enabled, it will attempt to use a backup tool. If neither method works, it will report an error. +
  • +
  • +`Snapshot`: This policy is deprecated and is an alias for CloneVolume. +
  • +
+ +
+ +`backupPolicyTemplateName`
+ +string + + +
+ +(Optional) + +

+Refers to the backup policy template. +

+ +
+ +`volumeMountsName`
+ +string + + +
+ +(Optional) + +

+Specifies the volumeMount of the container to backup. +This only works if Type is not None. If not specified, the first volumeMount will be selected. +

+ +
+

+HostNetwork + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerPorts`
+ + +[]HostNetworkContainerPort + + + +
+ +(Optional) + +

+The list of container ports that are required by the component. +

+ +
+

+HostNetworkContainerPort + +

+ +

+ +(Appears on:HostNetwork) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ + +

+Container specifies the target container within the Pod. +

+ +
+ +`ports`
+ +[]string + + +
+ + +

+Ports are named container ports within the specified container. +These container ports must be defined in the container for proper port allocation. +

+ +
+

+HostNetworkVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+HostNetworkVarSelector selects a var from host-network resources. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The component to select from. +

+ +
+ +`HostNetworkVars`
+ + +HostNetworkVars + + + +
+ + +

+ +(Members of `HostNetworkVars` are embedded into this type.) + +

+ +
+

+HostNetworkVars + +

+ +

+ +(Appears on:HostNetworkVarSelector) + +

+
+ +

+HostNetworkVars defines the vars that can be referenced from host-network resources. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ + +ContainerVars + + + +
+ +(Optional) + +
+

+InstanceTemplate + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies an override for the first container’s image in the Pod. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+Defines Volumes to override. +Add new or override existing volumes. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines VolumeMounts to override. +Add new or override existing volume mounts of the first container in the Pod. +

+ +
+ +`volumeClaimTemplates`
+ + +[]ClusterComponentVolumeClaimTemplate + + + +
+ +(Optional) + +

+Defines VolumeClaimTemplates to override. +Add new or override existing volume claim templates. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+ +

+InstanceUpdateStrategy indicates the strategy that the InstanceSet +controller will use to perform updates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`partition`
+ +int32 + + +
+ +(Optional) + +

+Partition indicates the number of pods that should be updated during a rolling update. +The remaining pods will remain untouched. This is helpful in defining how many pods +should participate in the update process. The update process will follow the order +of pod names in descending lexicographical (dictionary) order. The default value is +ComponentSpec.Replicas (i.e., update all pods). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all pods. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+

+Issuer + +

+ +

+ +(Appears on:ClusterComponentSpec, TLSConfig) + +

+
+ +

+Issuer defines the TLS certificates issuer for the Cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +IssuerName + + + +
+ + +

+The issuer for TLS certificates. +It only allows two enum values: `KubeBlocks` and `UserProvided`. +

+
    +
  • +`KubeBlocks` indicates that the self-signed TLS certificates generated by the KubeBlocks Operator will be used. +
  • +
  • +`UserProvided` means that the user is responsible for providing their own CA, Cert, and Key. +In this case, the user-provided CA certificate, server certificate, and private key will be used +for TLS communication. +
  • +
+ +
+ +`secretRef`
+ + +TLSSecretRef + + + +
+ +(Optional) + +

+SecretRef is the reference to the secret that contains user-provided certificates. +It is required when the issuer is set to `UserProvided`. +

+ +
+

+IssuerName +(`string` alias) +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+IssuerName defines the name of the TLS certificates issuer. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"KubeBlocks" +

+
+ +

+IssuerKubeBlocks represents certificates that are signed by the KubeBlocks Operator. +

+ +
+ +

+"UserProvided" +

+
+ +

+IssuerUserProvided indicates that the user has provided their own CA-signed certificates. +

+ +
+

+LegacyRenderedTemplateSpec + +

+ +

+ +(Appears on:ComponentConfigSpec) + +

+
+ +

+LegacyRenderedTemplateSpec describes the configuration extension for the lazy rendered template. +Deprecated: LegacyRenderedTemplateSpec has been deprecated since 0.9.0 and will be removed in 0.10.0 +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ConfigTemplateExtension`
+ + +ConfigTemplateExtension + + + +
+ + +

+ +(Members of `ConfigTemplateExtension` are embedded into this type.) + +

+ +

+Extends the configuration template. +

+ +
+

+LetterCase +(`string` alias) +

+ +

+ +(Appears on:PasswordConfig) + +

+
+ +

+LetterCase defines the available cases to be used in password generation. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"LowerCases" +

+
+ +

+LowerCases represents the use of lower case letters only. +

+ +
+ +

+"MixedCases" +

+
+ +

+MixedCases represents the use of a mix of both lower and upper case letters. +

+ +
+ +

+"UpperCases" +

+
+ +

+UpperCases represents the use of upper case letters only. +

+ +
+

+LifecycleActionHandler + +

+ +

+ +(Appears on:ComponentLifecycleActions, RoleProbe) + +

+
+ +

+LifecycleActionHandler describes the implementation of a specific lifecycle action. +

+ +

+Each action is deemed successful if it returns an exit code of 0 for command executions, +or an HTTP 200 status for HTTP(s) actions. +Any other exit code or HTTP status is considered an indication of failure. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtinHandler`
+ + +BuiltinActionHandlerType + + + +
+ +(Optional) + +

+Specifies the name of the predefined action handler to be invoked for lifecycle actions. +

+ +

+Lorry, as a sidecar agent co-located with the database container in the same Pod, +includes a suite of built-in action implementations that are tailored to different database engines. +These are known as “builtin” handlers, includes: `mysql`, `redis`, `mongodb`, `etcd`, +`postgresql`, `vanilla-postgresql`, `apecloud-postgresql`, `wesql`, `oceanbase`, `polardbx`. +

+ +

+If the `builtinHandler` field is specified, it instructs Lorry to utilize its internal built-in action handler +to execute the specified lifecycle actions. +

+ +

+The `builtinHandler` field is of type `BuiltinActionHandlerType`, +which represents the name of the built-in handler. +The `builtinHandler` specified within the same `ComponentLifecycleActions` should be consistent across all +actions. +This means that if you specify a built-in handler for one action, you should use the same handler +for all other actions throughout the entire `ComponentLifecycleActions` collection. +

+ +

+If you need to define lifecycle actions for database engines not covered by the existing built-in support, +or when the pre-existing built-in handlers do not meet your specific needs, +you can use the `customHandler` field to define your own action implementation. +

+ +

+Deprecation Notice: +

+
    +
  • +In the future, the `builtinHandler` field will be deprecated in favor of using the `customHandler` field +for configuring all lifecycle actions. +
  • +
  • +Instead of using a name to indicate the built-in action implementations in Lorry, +the recommended approach will be to explicitly invoke the desired action implementation through +a gRPC interface exposed by the sidecar agent. +
  • +
  • +Developers will have the flexibility to either use the built-in action implementations provided by Lorry +or develop their own sidecar agent to implement custom actions and expose them via gRPC interfaces. +
  • +
  • +This change will allow for greater customization and extensibility of lifecycle actions, +as developers can create their own “builtin” implementations tailored to their specific requirements. +
  • +
+ +
+ +`customHandler`
+ + +Action + + + +
+ +(Optional) + +

+Specifies a user-defined hook or procedure that is called to perform the specific lifecycle action. +It offers a flexible and expandable approach for customizing the behavior of a Component by leveraging +tailored actions. +

+ +

+An Action can be implemented as either an ExecAction or an HTTPAction, with future versions planning +to support GRPCAction, +thereby accommodating unique logic for different database systems within the Action’s framework. +

+ +

+In future iterations, all built-in handlers are expected to transition to GRPCAction. +This change means that Lorry or other sidecar agents will expose the implementation of actions +through a GRPC interface for external invocation. +Then the controller will interact with these actions via GRPCAction calls. +

+ +
+

+LogConfig + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a descriptive label for the log type, such as ‘slow’ for a MySQL slow log file. +It provides a clear identification of the log’s purpose and content. +

+ +
+ +`filePathPattern`
+ +string + + +
+ + +

+Specifies the paths or patterns identifying where the log files are stored. +This field allows the system to locate and manage log files effectively. +

+ +

+Examples: +

+
    +
  • +/home/postgres/pgdata/pgroot/data/log/postgresql-* +
  • +
  • +/data/mysql/log/mysqld-error.log +
  • +
+ +
+

+MergedPolicy +(`string` alias) +

+ +

+ +(Appears on:ConfigTemplateExtension) + +

+
+ +

+MergedPolicy defines how to merge external imported templates into component templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"none" +

+
+ +
+ +

+"add" +

+
+ +
+ +

+"patch" +

+
+ +
+ +

+"replace" +

+
+ +
+

+MonitorConfig + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtIn`
+ +bool + + +
+ +(Optional) + +

+builtIn is a switch to enable KubeBlocks builtIn monitoring. +If BuiltIn is set to true, monitor metrics will be scraped automatically. +If BuiltIn is set to false, the provider should set ExporterConfig and Sidecar container own. +

+ +
+ +`exporterConfig`
+ + +ExporterConfig + + + +
+ +(Optional) + +

+exporterConfig provided by provider, which specify necessary information to Time Series Database. +exporterConfig is valid when builtIn is false. +

+ +
+

+MultipleClusterObjectCombinedOption + +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectCombinedOption defines options for handling combined variables. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`newVarSuffix`
+ +string + + +
+ +(Optional) + +

+If set, the existing variable will be kept, and a new variable will be defined with the specified suffix +in pattern: $(var.name)_$(suffix). +The new variable will be auto-created and placed behind the existing one. +If not set, the existing variable will be reused with the value format defined below. +

+ +
+ +`valueFormat`
+ + +MultipleClusterObjectValueFormat + + + +
+ +(Optional) + +

+The format of the value that the operator will use to compose values from multiple components. +

+ +
+ +`flattenFormat`
+ + +MultipleClusterObjectValueFormatFlatten + + + +
+ +(Optional) + +

+The flatten format, default is: $(comp-name-1):value,$(comp-name-2):value. +

+ +
+

+MultipleClusterObjectOption + +

+ +

+ +(Appears on:ClusterObjectReference) + +

+
+ +

+MultipleClusterObjectOption defines the options for handling multiple cluster objects matched. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requireAllComponentObjects`
+ +bool + + +
+ +(Optional) + +

+RequireAllComponentObjects controls whether all component objects must exist before resolving. +If set to true, resolving will only proceed if all component objects are present. +

+ +
+ +`strategy`
+ + +MultipleClusterObjectStrategy + + + +
+ + +

+Define the strategy for handling multiple cluster objects. +

+ +
+ +`combinedOption`
+ + +MultipleClusterObjectCombinedOption + + + +
+ +(Optional) + +

+Define the options for handling combined variables. +Valid only when the strategy is set to “combined”. +

+ +
+

+MultipleClusterObjectStrategy +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectOption) + +

+
+ +

+MultipleClusterObjectStrategy defines the strategy for handling multiple cluster objects. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"combined" +

+
+ +

+MultipleClusterObjectStrategyCombined - the values from all matched components will be combined into a single +variable using the specified option. +

+ +
+ +

+"individual" +

+
+ +

+MultipleClusterObjectStrategyIndividual - each matched component will have its individual variable with its name +as the suffix. +This is required when referencing credential variables that cannot be passed by values. +

+ +
+

+MultipleClusterObjectValueFormat +(`string` alias) +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormat defines the format details for the value. +

+
+ + + + + + + + + + + + + + +
ValueDescription
+ +

+"Flatten" +

+
+ +
+

+MultipleClusterObjectValueFormatFlatten + +

+ +

+ +(Appears on:MultipleClusterObjectCombinedOption) + +

+
+ +

+MultipleClusterObjectValueFormatFlatten defines the flatten format for the value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`delimiter`
+ +string + + +
+ + +

+Pair delimiter. +

+ +
+ +`keyValueDelimiter`
+ +string + + +
+ + +

+Key-value delimiter. +

+ +
+

+NamedVar + +

+ +

+ +(Appears on:ContainerVars, ServiceVars) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +
+ +`option`
+ + +VarOption + + + +
+ +(Optional) + +
+

+PasswordConfig + +

+ +

+ +(Appears on:ComponentSystemAccount, SystemAccount, SystemAccountSpec) + +

+
+ +

+PasswordConfig helps provide to customize complexity of password generation pattern. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`length`
+ +int32 + + +
+ +(Optional) + +

+The length of the password. +

+ +
+ +`numDigits`
+ +int32 + + +
+ +(Optional) + +

+The number of digits in the password. +

+ +
+ +`numSymbols`
+ +int32 + + +
+ +(Optional) + +

+The number of symbols in the password. +

+ +
+ +`letterCase`
+ + +LetterCase + + + +
+ +(Optional) + +

+The case of the letters in the password. +

+ +
+ +`seed`
+ +string + + +
+ +(Optional) + +

+Seed to generate the account’s password. +Cannot be updated. +

+ +
+

+Payload + +

+ +

+ +(Appears on:ConfigurationItemDetail) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`-`
+ +map[string]any + + +
+ +(Optional) + +

+Holds the payload data. This field is optional and can contain any type of data. +Not included in the JSON representation of the object. +

+ +
+

+PersistentVolumeClaimSpec + +

+ +

+ +(Appears on:ClusterComponentVolumeClaimTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`accessModes`
+ + +[]Kubernetes core/v1.PersistentVolumeAccessMode + + + +
+ +(Optional) + +

+Contains the desired access modes the volume should have. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.VolumeResourceRequirements + + + +
+ +(Optional) + +

+Represents the minimum resources the volume should have. +If the RecoverVolumeExpansionFailure feature is enabled, users are allowed to specify resource requirements that +are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources. +

+ +
+ +`storageClassName`
+ +string + + +
+ +(Optional) + +

+The name of the StorageClass required by the claim. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. +

+ +
+ +`volumeMode`
+ + +Kubernetes core/v1.PersistentVolumeMode + + + +
+ +(Optional) + +

+Defines what type of volume is required by the claim, either Block or Filesystem. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ClusterDefinitionStatus, ComponentDefinitionStatus, ComponentVersionStatus, ServiceDescriptorStatus) + +

+
+ +

+Phase represents the current status of the ClusterDefinition CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that the object is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that the object is in an unavailable state. +

+ +
+

+PodAntiAffinity +(`string` alias) +

+ +

+ +(Appears on:Affinity) + +

+
+ +

+PodAntiAffinity defines the pod anti-affinity strategy. +

+ +

+This strategy determines how pods are scheduled in relation to other pods, with the aim of either spreading pods +across nodes (Preferred) or ensuring that certain pods do not share a node (Required). +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Preferred" +

+
+ +

+Preferred indicates that the scheduler will try to enforce the anti-affinity rules, but it will not guarantee it. +

+ +
+ +

+"Required" +

+
+ +

+Required indicates that the scheduler must enforce the anti-affinity rules and will not schedule the pods unless +the rules are met. +

+ +
+

+PodAvailabilityPolicy +(`string` alias) +

+
+ +

+PodAvailabilityPolicy pod availability strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"None" +

+
+ +
+ +

+"UnAvailable" +

+
+ +
+

+PostStartAction + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+PostStartAction is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Specifies the post-start command to be executed. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to select the script that need to be referenced. +When defined, the scripts defined in scriptSpecs can be referenced within the CmdExecutorConfig. +

+ +
+

+PreConditionType +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+PreConditionType defines the preCondition type of the action execution. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"ClusterReady" +

+
+ +
+ +

+"ComponentReady" +

+
+ +
+ +

+"Immediately" +

+
+ +
+ +

+"RuntimeReady" +

+
+ +
+

+Probe + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`Action`
+ + +Action + + + +
+ + +

+ +(Members of `Action` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Minimum value is 1. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1. +

+ +
+

+PrometheusScheme +(`string` alias) +

+ +

+ +(Appears on:Exporter) + +

+
+ +

+PrometheusScheme defines the protocol of prometheus scrape metrics. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"http" +

+
+ +
+ +

+"https" +

+
+ +
+

+ProtectedVolume + +

+ +

+ +(Appears on:VolumeProtectionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The Name of the volume to protect. +

+ +
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+Defines the high watermark threshold for the volume, it will override the component level threshold. +If the value is invalid, it will be ignored and the component level threshold will be used. +

+ +
+

+ProvisionPolicy + +

+ +

+ +(Appears on:SystemAccountConfig) + +

+
+ +

+ProvisionPolicy defines the policy details for creating accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +ProvisionPolicyType + + + +
+ + +

+Specifies the method to provision an account. +

+ +
+ +`scope`
+ + +ProvisionScope + + + +
+ + +

+Defines the scope within which the account is provisioned. +

+ +
+ +`statements`
+ + +ProvisionStatements + + + +
+ +(Optional) + +

+The statement to provision an account. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+The external secret to refer. +

+ +
+

+ProvisionPolicyType +(`string` alias) +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionPolicyType defines the policy for creating accounts. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"CreateByStmt" +

+
+ +

+CreateByStmt will create account w.r.t. deletion and creation statement given by provider. +

+ +
+ +

+"ReferToExisting" +

+
+ +

+ReferToExisting will not create account, but create a secret by copying data from referred secret file. +

+ +
+

+ProvisionScope +(`string` alias) +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionScope defines the scope of provision within a component. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"AllPods" +

+
+ +

+AllPods indicates that accounts will be created for all pods within the component. +

+ +
+ +

+"AnyPods" +

+
+ +

+AnyPods indicates that accounts will be created only on a single pod within the component. +

+ +
+

+ProvisionSecretRef + +

+ +

+ +(Appears on:ComponentSystemAccount, ProvisionPolicy, SystemAccount) + +

+
+ +

+ProvisionSecretRef represents the reference to a secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The unique identifier of the secret. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+The namespace where the secret is located. +

+ +
+

+ProvisionStatements + +

+ +

+ +(Appears on:ProvisionPolicy) + +

+
+ +

+ProvisionStatements defines the statements used to create accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`creation`
+ +string + + +
+ + +

+Specifies the statement required to create a new account with the necessary privileges. +

+ +
+ +`update`
+ +string + + +
+ +(Optional) + +

+Defines the statement required to update the password of an existing account. +

+ +
+ +`deletion`
+ +string + + +
+ +(Optional) + +

+Defines the statement required to delete an existing account. +Typically used in conjunction with the creation statement to delete an account before recreating it. +For example, one might use a `drop user if exists` statement followed by a `create user` statement to ensure a fresh account. +

+ +

+Deprecated: This field is deprecated and the update statement should be used instead. +

+ +
+

+RSMSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+RSMSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+Specifies a list of roles defined within the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Defines the method used to probe a role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Indicates the actions required for dynamic membership reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Describes the strategy for updating Members (Pods). +

+
    +
  • +`Serial`: Updates Members sequentially to ensure minimum component downtime. +
  • +
  • +`BestEffortParallel`: Updates Members in parallel to ensure minimum component write downtime. +
  • +
  • +`Parallel`: Forces parallel updates. +
  • +
+ +
+

+ReconcileDetail + +

+ +

+ +(Appears on:ConfigurationItemDetailStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`policy`
+ +string + + +
+ +(Optional) + +

+Represents the policy applied during the most recent execution. +

+ +
+ +`execResult`
+ +string + + +
+ +(Optional) + +

+Represents the outcome of the most recent execution. +

+ +
+ +`currentRevision`
+ +string + + +
+ +(Optional) + +

+Represents the current revision of the configuration item. +

+ +
+ +`succeedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of pods where configuration changes were successfully applied. +

+ +
+ +`expectedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the total number of pods that require execution of configuration changes. +

+ +
+ +`errMessage`
+ +string + + +
+ +(Optional) + +

+Represents the error message generated when the execution of configuration changes fails. +

+ +
+

+ReloadOptions + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ReloadOptions defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+

+ReplicaRole + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicaRole represents a role that can be assumed by a component instance. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the role’s identifier. It is used to set the “apps.kubeblocks.io/role” label value +on the corresponding object. +

+ +

+This field is immutable once set. +

+ +
+ +`serviceable`
+ +bool + + +
+ +(Optional) + +

+Indicates whether a replica assigned this role is capable of providing services. +

+ +

+This field is immutable once set. +

+ +
+ +`writable`
+ +bool + + +
+ +(Optional) + +

+Determines if a replica in this role has the authority to perform write operations. +A writable replica can modify data, handle update operations. +

+ +

+This field is immutable once set. +

+ +
+ +`votable`
+ +bool + + +
+ +(Optional) + +

+Specifies whether a replica with this role has voting rights. +In distributed systems, this typically means the replica can participate in consensus decisions, +configuration changes, or other processes that require a quorum. +

+ +

+This field is immutable once set. +

+ +
+

+ReplicasLimit + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+ReplicasLimit defines the valid range of number of replicas supported. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`minReplicas`
+ +int32 + + +
+ + +

+The minimum limit of replicas. +

+ +
+ +`maxReplicas`
+ +int32 + + +
+ + +

+The maximum limit of replicas. +

+ +
+

+ReplicationSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ReplicationSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`StatefulSetSpec`
+ + +StatefulSetSpec + + + +
+ + +

+ +(Members of `StatefulSetSpec` are embedded into this type.) + +

+ +
+

+RerenderResourceType +(`string` alias) +

+ +

+ +(Appears on:ComponentConfigSpec) + +

+
+ +

+RerenderResourceType defines the resource requirements for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"hscale" +

+
+ +
+ +

+"vscale" +

+
+ +
+ +

+"shardingHScale" +

+
+ +
+

+ResourceMeta + +

+ +

+ +(Appears on:ConfigMapRef, SecretRef) + +

+
+ +

+ResourceMeta encapsulates metadata and configuration for referencing ConfigMaps and Secrets as volumes. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name is the name of the referenced ConfigMap or Secret object. It must conform to DNS label standards. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+MountPoint is the filesystem path where the volume will be mounted. +

+ +
+ +`subPath`
+ +string + + +
+ +(Optional) + +

+SubPath specifies a path within the volume from which to mount. +

+ +
+ +`asVolumeFrom`
+ +[]string + + +
+ +(Optional) + +

+AsVolumeFrom lists the names of containers in which the volume should be mounted. +

+ +
+

+RetryPolicy + +

+ +

+ +(Appears on:Action) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`maxRetries`
+ +int + + +
+ +(Optional) + +

+Defines the maximum number of retry attempts that should be made for a given Action. +This value is set to 0 by default, indicating that no retries will be made. +

+ +
+ +`retryInterval`
+ +time.Duration + + +
+ +(Optional) + +

+Indicates the duration of time to wait between each retry attempt. +This value is set to 0 by default, indicating that there will be no delay between retry attempts. +

+ +
+

+RoleArbitrator +(`string` alias) +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+ +

+RoleArbitrator defines how to arbitrate the role of replicas. +

+ +

+Deprecated since v0.9 +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"External" +

+
+ +
+ +

+"Lorry" +

+
+ +
+

+RoleProbe + +

+ +

+ +(Appears on:ComponentLifecycleActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`LifecycleActionHandler`
+ + +LifecycleActionHandler + + + +
+ + +

+ +(Members of `LifecycleActionHandler` are embedded into this type.) + +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before the RoleProbe +begins to detect the container’s role. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency at which the probe is conducted. This value is expressed in seconds. +Default to 10 seconds. Minimum value is 1. +

+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+

+ScriptSpecSelector + +

+ +

+ +(Appears on:ComponentSwitchover, PostStartAction, SwitchoverAction) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the name of the ScriptSpec referent. +

+ +
+

+SecretRef + +

+ +

+ +(Appears on:UserResourceRefs) + +

+
+ +

+SecretRef defines a reference to a Secret. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ResourceMeta`
+ + +ResourceMeta + + + +
+ + +

+ +(Members of `ResourceMeta` are embedded into this type.) + +

+ +
+ +`secret`
+ + +Kubernetes core/v1.SecretVolumeSource + + + +
+ + +

+Secret specifies the Secret to be mounted as a volume. +

+ +
+

+Service + +

+ +

+ +(Appears on:ClusterService, ComponentService) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name defines the name of the service. +otherwise, it indicates the name of the service. +Others can refer to this service by its name. (e.g., connection credential) +Cannot be updated. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+ServiceName defines the name of the underlying service object. +If not specified, the default service name with different patterns will be used: +

+
    +
  • +CLUSTER_NAME: for cluster-level services +
  • +
  • +CLUSTER_NAME-COMPONENT_NAME: for component-level services +
  • +
+ +

+Only one default service name is allowed. +Cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+If ServiceType is LoadBalancer, cloud provider related parameters can be put here +More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`spec`
+ + +Kubernetes core/v1.ServiceSpec + + + +
+ +(Optional) + +

+Spec defines the behavior of a service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`selector`
+ +map[string]string + + +
+ +(Optional) + +

+Route service traffic to pods with label keys and values matching this +selector. If empty or not present, the service is assumed to have an +external process managing its endpoints, which Kubernetes will not +modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. +Ignored if type is ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/ +

+ +
+ +`clusterIP`
+ +string + + +
+ +(Optional) + +

+clusterIP is the IP address of the service and is usually assigned +randomly. If an address is specified manually, is in-range (as per +system configuration), and is not in use, it will be allocated to the +service; otherwise creation of the service will fail. This field may not +be changed through updates unless the type field is also being changed +to ExternalName (which requires this field to be blank) or the type +field is being changed from ExternalName (in which case this field may +optionally be specified, as describe above). Valid values are “None”, +empty string (“”), or a valid IP address. Setting this to “None” makes a +“headless service” (no virtual IP), which is useful when direct endpoint +connections are preferred and proxying is not required. Only applies to +types ClusterIP, NodePort, and LoadBalancer. If this field is specified +when creating a Service of type ExternalName, creation will fail. This +field will be wiped when updating a Service to type ExternalName. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`clusterIPs`
+ +[]string + + +
+ +(Optional) + +

+ClusterIPs is a list of IP addresses assigned to this service, and are +usually assigned randomly. If an address is specified manually, is +in-range (as per system configuration), and is not in use, it will be +allocated to the service; otherwise creation of the service will fail. +This field may not be changed through updates unless the type field is +also being changed to ExternalName (which requires this field to be +empty) or the type field is being changed from ExternalName (in which +case this field may optionally be specified, as describe above). Valid +values are “None”, empty string (“”), or a valid IP address. Setting +this to “None” makes a “headless service” (no virtual IP), which is +useful when direct endpoint connections are preferred and proxying is +not required. Only applies to types ClusterIP, NodePort, and +LoadBalancer. If this field is specified when creating a Service of type +ExternalName, creation will fail. This field will be wiped when updating +a Service to type ExternalName. If this field is not specified, it will +be initialized from the clusterIP field. If this field is specified, +clients must ensure that clusterIPs[0] and clusterIP have the same +value. +

+ +

+This field may hold a maximum of two entries (dual-stack IPs, in either order). +These IPs must correspond to the values of the ipFamilies field. Both +clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`type`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+type determines how the Service is exposed. Defaults to ClusterIP. Valid +options are ExternalName, ClusterIP, NodePort, and LoadBalancer. +“ClusterIP” allocates a cluster-internal IP address for load-balancing +to endpoints. Endpoints are determined by the selector or if that is not +specified, by manual construction of an Endpoints object or +EndpointSlice objects. If clusterIP is “None”, no virtual IP is +allocated and the endpoints are published as a set of endpoints rather +than a virtual IP. +“NodePort” builds on ClusterIP and allocates a port on every node which +routes to the same endpoints as the clusterIP. +“LoadBalancer” builds on NodePort and creates an external load-balancer +(if supported in the current cloud) which routes to the same endpoints +as the clusterIP. +“ExternalName” aliases this service to the specified externalName. +Several other fields do not apply to ExternalName services. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +

+ +
+ +`externalIPs`
+ +[]string + + +
+ +(Optional) + +

+externalIPs is a list of IP addresses for which nodes in the cluster +will also accept traffic for this service. These IPs are not managed by +Kubernetes. The user is responsible for ensuring that traffic arrives +at a node with this IP. A common example is external load-balancers +that are not part of the Kubernetes system. +

+ +
+ +`sessionAffinity`
+ + +Kubernetes core/v1.ServiceAffinity + + + +
+ +(Optional) + +

+Supports “ClientIP” and “None”. Used to maintain session affinity. +Enable client IP based session affinity. +Must be ClientIP or None. +Defaults to None. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +`loadBalancerIP`
+ +string + + +
+ +(Optional) + +

+Only applies to Service Type: LoadBalancer. +This feature depends on whether the underlying cloud-provider supports specifying +the loadBalancerIP when a load balancer is created. +This field will be ignored if the cloud-provider does not support the feature. +Deprecated: This field was under-specified and its meaning varies across implementations. +Using it is non-portable and it may not support dual-stack. +Users are encouraged to use implementation-specific annotations when available. +

+ +
+ +`loadBalancerSourceRanges`
+ +[]string + + +
+ +(Optional) + +

+If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature.” +More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ +

+ +
+ +`externalName`
+ +string + + +
+ +(Optional) + +

+externalName is the external reference that discovery mechanisms will +return as an alias for this service (e.g. a DNS CNAME record). No +proxying will be involved. Must be a lowercase RFC-1123 hostname +(https://tools.ietf.org/html/rfc1123) and requires `type` to be “ExternalName”. +

+ +
+ +`externalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceExternalTrafficPolicy + + + +
+ +(Optional) + +

+externalTrafficPolicy describes how nodes distribute service traffic they +receive on one of the Service’s “externally-facing” addresses (NodePorts, +ExternalIPs, and LoadBalancer IPs). If set to “Local”, the proxy will configure +the service in a way that assumes that external load balancers will take care +of balancing the service traffic between nodes, and so each node will deliver +traffic only to the node-local endpoints of the service, without masquerading +the client source IP. (Traffic mistakenly sent to a node with no endpoints will +be dropped.) The default value, “Cluster”, uses the standard behavior of +routing to all endpoints evenly (possibly modified by topology and other +features). Note that traffic sent to an External IP or LoadBalancer IP from +within the cluster will always get “Cluster” semantics, but clients sending to +a NodePort from within the cluster may need to take traffic policy into account +when picking a node. +

+ +
+ +`healthCheckNodePort`
+ +int32 + + +
+ +(Optional) + +

+healthCheckNodePort specifies the healthcheck nodePort for the service. +This only applies when type is set to LoadBalancer and +externalTrafficPolicy is set to Local. If a value is specified, is +in-range, and is not in use, it will be used. If not specified, a value +will be automatically allocated. External systems (e.g. load-balancers) +can use this port to determine if a given node holds endpoints for this +service or not. If this field is specified when creating a Service +which does not need it, creation will fail. This field will be wiped +when updating a Service to no longer need it (e.g. changing type). +This field cannot be updated once set. +

+ +
+ +`publishNotReadyAddresses`
+ +bool + + +
+ +(Optional) + +

+publishNotReadyAddresses indicates that any agent which deals with endpoints for this +Service should disregard any indications of ready/not-ready. +The primary use case for setting this field is for a StatefulSet’s Headless Service to +propagate SRV DNS records for its Pods for the purpose of peer discovery. +The Kubernetes controllers that generate Endpoints and EndpointSlice resources for +Services interpret this to mean that all endpoints are considered “ready” even if the +Pods themselves are not. Agents which consume only Kubernetes generated endpoints +through the Endpoints or EndpointSlice resources can safely assume this behavior. +

+ +
+ +`sessionAffinityConfig`
+ + +Kubernetes core/v1.SessionAffinityConfig + + + +
+ +(Optional) + +

+sessionAffinityConfig contains the configurations of session affinity. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this +service. This field is usually assigned automatically based on cluster +configuration and the ipFamilyPolicy field. If this field is specified +manually, the requested family is available in the cluster, +and ipFamilyPolicy allows it, it will be used; otherwise creation of +the service will fail. This field is conditionally mutable: it allows +for adding or removing a secondary IP family, but it does not allow +changing the primary IP family of the Service. Valid values are “IPv4” +and “IPv6”. This field only applies to Services of types ClusterIP, +NodePort, and LoadBalancer, and does apply to “headless” services. +This field will be wiped when updating a Service to type ExternalName. +

+ +

+This field may hold a maximum of two entries (dual-stack families, in +either order). These families must correspond to the values of the +clusterIPs field, if specified. Both clusterIPs and ipFamilies are +governed by the ipFamilyPolicy field. +

+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+IPFamilyPolicy represents the dual-stack-ness requested or required by +this Service. If there is no value provided, then this field will be set +to SingleStack. Services can be “SingleStack” (a single IP family), +“PreferDualStack” (two IP families on dual-stack configured clusters or +a single IP family on single-stack clusters), or “RequireDualStack” +(two IP families on dual-stack configured clusters, otherwise fail). The +ipFamilies and clusterIPs fields depend on the value of this field. This +field will be wiped when updating a service to type ExternalName. +

+ +
+ +`allocateLoadBalancerNodePorts`
+ +bool + + +
+ +(Optional) + +

+allocateLoadBalancerNodePorts defines if NodePorts will be automatically +allocated for services with type LoadBalancer. Default is “true”. It +may be set to “false” if the cluster load-balancer does not rely on +NodePorts. If the caller requests specific NodePorts (by specifying a +value), those requests will be respected, regardless of this field. +This field may only be set for services with type LoadBalancer and will +be cleared if the type is changed to any other type. +

+ +
+ +`loadBalancerClass`
+ +string + + +
+ +(Optional) + +

+loadBalancerClass is the class of the load balancer implementation this Service belongs to. +If specified, the value of this field must be a label-style identifier, with an optional prefix, +e.g. “internal-vip” or “example.com/internal-vip”. Unprefixed names are reserved for end-users. +This field can only be set when the Service type is ‘LoadBalancer’. If not set, the default load +balancer implementation is used, today this is typically done through the cloud provider integration, +but should apply for any default implementation. If set, it is assumed that a load balancer +implementation is watching for Services with a matching class. Any default load balancer +implementation (e.g. cloud providers) should ignore Services that set this field. +This field can only be set when creating or updating a Service to type ‘LoadBalancer’. +Once set, it can not be changed. This field will be wiped when a service is updated to a non ‘LoadBalancer’ type. +

+ +
+ +`internalTrafficPolicy`
+ + +Kubernetes core/v1.ServiceInternalTrafficPolicy + + + +
+ +(Optional) + +

+InternalTrafficPolicy describes how nodes distribute service traffic they +receive on the ClusterIP. If set to “Local”, the proxy will assume that pods +only want to talk to endpoints of the service on the same node as the pod, +dropping the traffic if there are no local endpoints. The default value, +“Cluster”, uses the standard behavior of routing to all endpoints evenly +(possibly modified by topology and other features). +

+ +
+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Extends the above `serviceSpec.selector` by allowing you to specify defined role as selector for the service. +When `roleSelector` is set, it adds a label selector “kubeblocks.io/role: {roleSelector}” +to the `serviceSpec.selector`. +Example usage: +

+
+
+  roleSelector: "leader"
+
+
+ +

+In this example, setting `roleSelector` to “leader” will add a label selector +“kubeblocks.io/role: leader” to the `serviceSpec.selector`. +This means that the service will select and route traffic to Pods with the label +“kubeblocks.io/role” set to “leader”. +

+ +

+Note that if `podService` sets to true, RoleSelector will be ignored. +The `podService` flag takes precedence over `roleSelector` and generates a service for each Pod. +

+ +
+

+ServiceDescriptorSpec + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorSpec defines the desired state of ServiceDescriptor. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Describes the type of database service provided by the external service. +For example, “mysql”, “redis”, “mongodb”. +This field categorizes databases by their functionality, protocol and compatibility, facilitating appropriate +service integration based on their unique capabilities. +

+ +

+This field is case-insensitive. +

+ +

+It also supports abbreviations for some well-known databases: +- “pg”, “pgsql”, “postgres”, “postgresql”: PostgreSQL service +- “zk”, “zookeeper”: ZooKeeper service +- “es”, “elasticsearch”: Elasticsearch service +- “mongo”, “mongodb”: MongoDB service +- “ch”, “clickhouse”: ClickHouse service +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Describes the version of the service provided by the external service. +This is crucial for ensuring compatibility between different components of the system, +as different versions of a service may have varying features. +

+ +
+ +`endpoint`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the endpoint of the external service. +

+ +

+If the service is exposed via a cluster, the endpoint will be provided in the format of `host:port`. +

+ +
+ +`host`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the service or IP address of the external service. +

+ +
+ +`port`
+ + +CredentialVar + + + +
+ +(Optional) + +

+Specifies the port of the external service. +

+ +
+ +`auth`
+ + +ConnectionCredentialAuth + + + +
+ +(Optional) + +

+Specifies the authentication credentials required for accessing an external service. +

+ +
+

+ServiceDescriptorStatus + +

+ +

+ +(Appears on:ServiceDescriptor) + +

+
+ +

+ServiceDescriptorStatus defines the observed state of ServiceDescriptor +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the current lifecycle phase of the ServiceDescriptor. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ServiceConnectionCredential. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been processed by the controller. +

+ +
+

+ServicePort + +

+ +

+ +(Appears on:ServiceSpec) + +

+
+ +

+ServicePort is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of this port within the service. This must be a DNS_LABEL. +All ports within a ServiceSpec must have unique names. When considering +the endpoints for a Service, this must match the ‘name’ field in the +EndpointPort. +

+ +
+ +`protocol`
+ + +Kubernetes core/v1.Protocol + + + +
+ +(Optional) + +

+The IP protocol for this port. Supports “TCP”, “UDP”, and “SCTP”. +Default is TCP. +

+ +
+ +`appProtocol`
+ +string + + +
+ +(Optional) + +

+The application protocol for this port. +This field follows standard Kubernetes label syntax. +Un-prefixed names are reserved for IANA standard service names (as per +RFC-6335 and https://www.iana.org/assignments/service-names). +Non-standard protocols should use prefixed names such as +mycompany.com/my-custom-protocol. +

+ +
+ +`port`
+ +int32 + + +
+ + +

+The port that will be exposed by this service. +

+ +
+ +`targetPort`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Number or name of the port to access on the pods targeted by the service. +

+ +

+Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. +

+
    +
  • +If this is a string, it will be looked up as a named port in the target Pod’s container ports. +
  • +
  • +If this is not specified, the value of the `port` field is used (an identity map). +
  • +
+ +

+This field is ignored for services with clusterIP=None, and should be +omitted or set equal to the `port` field. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service +

+ +
+

+ServiceRef + +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the service reference declaration. +It corresponds to the serviceRefDeclaration name defined in either: +

+
    +
  • +`componentDefinition.spec.serviceRefDeclarations[*].name` +
  • +
  • +`clusterDefinition.spec.componentDefs[*].serviceRefDeclarations[*].name` (deprecated) +
  • +
+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced Cluster or the namespace of the referenced ServiceDescriptor object. +If not provided, the referenced Cluster and ServiceDescriptor will be searched in the namespace of the current +Cluster by default. +

+ +
+ +`cluster`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the KubeBlocks Cluster being referenced. +This is used when services from another KubeBlocks Cluster are consumed. +

+ +

+By default, the referenced KubeBlocks Cluster’s `clusterDefinition.spec.connectionCredential` +will be utilized to bind to the current Component. This credential should include: +`endpoint`, `port`, `username`, and `password`. +

+ +

+Note: +

+
    +
  • +The `ServiceKind` and `ServiceVersion` specified in the service reference within the +ClusterDefinition are not validated when using this approach. +
  • +
  • +If both `cluster` and `serviceDescriptor` are present, `cluster` will take precedence. +
  • +
+ +

+Deprecated since v0.9 since `clusterDefinition.spec.connectionCredential` is deprecated, +use `clusterServiceSelector` instead. +This field is maintained for backward compatibility and its use is discouraged. +Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. +

+ +
+ +`clusterServiceSelector`
+ + +ServiceRefClusterSelector + + + +
+ +(Optional) + +

+References a service provided by another KubeBlocks Cluster. +It specifies the ClusterService and the account credentials needed for access. +

+ +
+ +`serviceDescriptor`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ServiceDescriptor object that describes a service provided by external sources. +

+ +

+When referencing a service provided by external sources, a ServiceDescriptor object is required to establish +the service binding. +The `serviceDescriptor.spec.serviceKind` and `serviceDescriptor.spec.serviceVersion` should match the serviceKind +and serviceVersion declared in the definition. +

+ +

+If both `cluster` and `serviceDescriptor` are specified, the `cluster` takes precedence. +

+ +
+

+ServiceRefClusterSelector + +

+ +

+ +(Appears on:ServiceRef) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cluster`
+ +string + + +
+ + +

+The name of the Cluster being referenced. +

+ +
+ +`service`
+ + +ServiceRefServiceSelector + + + +
+ +(Optional) + +

+Identifies a ClusterService from the list of Services defined in `cluster.spec.services` of the referenced Cluster. +

+ +
+ +`credential`
+ + +ServiceRefCredentialSelector + + + +
+ +(Optional) + +

+Specifies the SystemAccount to authenticate and establish a connection with the referenced Cluster. +The SystemAccount should be defined in `componentDefinition.spec.systemAccounts` +of the Component providing the service in the referenced Cluster. +

+ +
+

+ServiceRefCredentialSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ + +

+The name of the Component where the credential resides in. +

+ +
+ +`name`
+ +string + + +
+ + +

+The name of the credential (SystemAccount) to reference. +

+ +
+

+ServiceRefDeclaration + +

+ +

+ +(Appears on:ClusterComponentDefinition, ComponentDefinitionSpec) + +

+
+ +

+ServiceRefDeclaration represents a reference to a service that can be either provided by a KubeBlocks Cluster +or an external service. +It acts as a placeholder for the actual service reference, which is determined later when a Cluster is created. +

+ +

+The purpose of ServiceRefDeclaration is to declare a service dependency without specifying the concrete details +of the service. +It allows for flexibility and abstraction in defining service references within a Component. +By using ServiceRefDeclaration, you can define service dependencies in a declarative manner, enabling loose coupling +and easier management of service references across different components and clusters. +

+ +

+Upon Cluster creation, the ServiceRefDeclaration is bound to an actual service through the ServiceRef field, +effectively resolving and connecting to the specified service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the ServiceRefDeclaration. +

+ +
+ +`serviceRefDeclarationSpecs`
+ + +[]ServiceRefDeclarationSpec + + + +
+ + +

+Defines a list of constraints and requirements for services that can be bound to this ServiceRefDeclaration +upon Cluster creation. +Each ServiceRefDeclarationSpec defines a ServiceKind and ServiceVersion, +outlining the acceptable service types and versions that are compatible. +

+ +

+This flexibility allows a ServiceRefDeclaration to be fulfilled by any one of the provided specs. +For example, if it requires an OLTP database, specs for both MySQL and PostgreSQL are listed, +either MySQL or PostgreSQL services can be used when binding. +

+ +
+ +`optional`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the service reference can be optional. +

+ +

+For an optional service-ref, the component can still be created even if the service-ref is not provided. +

+ +
+

+ServiceRefDeclarationSpec + +

+ +

+ +(Appears on:ServiceRefDeclaration) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ + +

+Specifies the type or nature of the service. This should be a well-known application cluster type, such as +{mysql, redis, mongodb}. +The field is case-insensitive and supports abbreviations for some well-known databases. +For instance, both `zk` and `zookeeper` are considered as a ZooKeeper cluster, while `pg`, `postgres`, `postgresql` +are all recognized as a PostgreSQL cluster. +

+ +
+ +`serviceVersion`
+ +string + + +
+ + +

+Defines the service version of the service reference. This is a regular expression that matches a version number pattern. +For instance, `^8.0.8$`, `8.0.\d{1,2}$`, `^[v\-]*?(\d{1,2}\.){0,3}\d{1,2}$` are all valid patterns. +

+ +
+

+ServiceRefServiceSelector + +

+ +

+ +(Appears on:ServiceRefClusterSelector) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`component`
+ +string + + +
+ +(Optional) + +

+The name of the Component where the Service resides in. +

+ +

+It is required when referencing a Component’s Service. +

+ +
+ +`service`
+ +string + + +
+ + +

+The name of the Service to be referenced. +

+ +

+Leave it empty to reference the default Service. Set it to “headless” to reference the default headless Service. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name,service2.name… +

+ +
+ +`port`
+ +string + + +
+ +(Optional) + +

+The port name of the Service to be referenced. +

+ +

+If there is a non-zero node-port exist for the matched Service port, the node-port will be selected first. +

+ +

+If the referenced Service is of pod-service type (a Service per Pod), there will be multiple Service objects matched, +and the resolved value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ServiceRefVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceRefVarSelector selects a var from a ServiceRefDeclaration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The ServiceRefDeclaration to select from. +

+ +
+ +`ServiceRefVars`
+ + +ServiceRefVars + + + +
+ + +

+ +(Members of `ServiceRefVars` are embedded into this type.) + +

+ +
+

+ServiceRefVars + +

+ +

+ +(Appears on:ServiceRefVarSelector) + +

+
+ +

+ServiceRefVars defines the vars that can be referenced from a ServiceRef. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`endpoint`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`port`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`CredentialVars`
+ + +CredentialVars + + + +
+ + +

+ +(Members of `CredentialVars` are embedded into this type.) + +

+ +
+

+ServiceSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+ServiceSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ports`
+ + +[]ServicePort + + + +
+ +(Optional) + +

+The list of ports that are exposed by this service. +More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+

+ServiceVarSelector + +

+ +

+ +(Appears on:VarSource) + +

+
+ +

+ServiceVarSelector selects a var from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ClusterObjectReference`
+ + +ClusterObjectReference + + + +
+ + +

+ +(Members of `ClusterObjectReference` are embedded into this type.) + +

+ +

+The Service to select from. +It can be referenced from the default headless service by setting the name to “headless”. +

+ +
+ +`ServiceVars`
+ + +ServiceVars + + + +
+ + +

+ +(Members of `ServiceVars` are embedded into this type.) + +

+ +
+

+ServiceVars + +

+ +

+ +(Appears on:ServiceVarSelector) + +

+
+ +

+ServiceVars defines the vars that can be referenced from a Service. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`host`
+ + +VarOption + + + +
+ +(Optional) + +
+ +`loadBalancer`
+ + +VarOption + + + +
+ +(Optional) + +

+LoadBalancer represents the LoadBalancer ingress point of the service. +

+ +

+If multiple ingress points are available, the first one will be used automatically, choosing between IP and Hostname. +

+ +
+ +`port`
+ + +NamedVar + + + +
+ +(Optional) + +

+Port references a port or node-port defined in the service. +

+ +

+If the referenced service is a pod-service, there will be multiple service objects matched, +and the value will be presented in the following format: service1.name:port1,service2.name:port2… +

+ +
+

+ShardingSpec + +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. +A typical design pattern for distributed databases is to distribute data across multiple shards, +with each shard consisting of multiple replicas. +Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components +using a template when shards are added. +When shards are removed, the corresponding Components are also deleted. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the common parent part of all shard names. +This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. +It is used to generate the names of underlying Components following the pattern `$(shardingSpec.name)-$(ShardID)`. +ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. +For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name +would be “my-shard-abc”. +

+ +

+Note that the name defined in Component template(`shardingSpec.template.name`) will be disregarded +when generating the Component names of the shards. The `shardingSpec.name` field takes precedence. +

+ +
+ +`template`
+ + +ClusterComponentSpec + + + +
+ + +

+The template for generating Components for shards, where each shard consists of one Component. +This field is of type ClusterComponentSpec, which encapsulates all the required details and +definitions for creating and managing the Components. +KubeBlocks uses this template to generate a set of identical Components or shards. +All the generated Components will have the same specifications and definitions as specified in the `template` field. +

+ +

+This allows for the creation of multiple Components with consistent configurations, +enabling sharding and distribution of workloads across Components. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards. +Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including: +

+
    +
  • +Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned. +
  • +
  • +Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted. +
  • +
+ +
+

+StatefulSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition, ConsensusSetSpec, ReplicationSetSpec) + +

+
+ +

+StatefulSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`updateStrategy`
+ + +UpdateStrategy + + + +
+ +(Optional) + +

+Specifies the strategy for updating Pods. +For workloadType=`Consensus`, the update strategy can be one of the following: +

+
    +
  • +`Serial`: Updates Members sequentially to minimize component downtime. +
  • +
  • +`BestEffortParallel`: Updates Members in parallel to minimize component write downtime. Majority remains online +at all times. +
  • +
  • +`Parallel`: Forces parallel updates. +
  • +
+ +
+ +`llPodManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down. +

+
    +
  • +`OrderedReady`: Creates pods in increasing order (pod-0, then pod-1, etc). The controller waits until each pod +is ready before continuing. Pods are removed in reverse order when scaling down. +
  • +
  • +`Parallel`: Creates pods in parallel to match the desired scale without waiting. All pods are deleted at once +when scaling down. +
  • +
+ +
+ +`llUpdateStrategy`
+ + +Kubernetes apps/v1.StatefulSetUpdateStrategy + + + +
+ +(Optional) + +

+Specifies the low-level StatefulSetUpdateStrategy to be used when updating Pods in the StatefulSet upon a +revision to the Template. +`UpdateStrategy` will be ignored if this is provided. +

+ +
+

+StatefulSetWorkload + +

+
+ +

+StatefulSetWorkload interface +

+
+

+StatelessSetSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+StatelessSetSpec is deprecated since v0.7. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`updateStrategy`
+ + +Kubernetes apps/v1.DeploymentStrategy + + + +
+ +(Optional) + +

+Specifies the deployment strategy that will be used to replace existing pods with new ones. +

+ +
+

+SwitchPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSwitchPolicy) + +

+
+ +

+SwitchPolicyType defines the types of switch policies that can be applied to a cluster. +

+ +

+Currently, only the Noop policy is supported. Support for MaximumAvailability and MaximumDataProtection policies is +planned for future releases. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"MaximumAvailability" +

+
+ +

+MaximumAvailability represents a switch policy that aims for maximum availability. This policy will switch if the +primary is active and the synchronization delay is 0 according to the user-defined lagProbe data delay detection +logic. If the primary is down, it will switch immediately. +This policy is intended for future support. +

+ +
+ +

+"MaximumDataProtection" +

+
+ +

+MaximumDataProtection represents a switch policy focused on maximum data protection. This policy will only switch +if the primary is active and the synchronization delay is 0, based on the user-defined lagProbe data lag detection +logic. If the primary is down, it will switch only if it can be confirmed that the primary and secondary data are +consistent. Otherwise, it will not switch. +This policy is planned for future implementation. +

+ +
+ +

+"Noop" +

+
+ +

+Noop indicates that KubeBlocks will not perform any high-availability switching for the components. Users are +required to implement their own HA solution or integrate an existing open-source HA solution. +

+ +
+

+SwitchoverAction + +

+ +

+ +(Appears on:SwitchoverSpec) + +

+
+ +

+SwitchoverAction is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Specifies the switchover command. +

+ +
+ +`scriptSpecSelectors`
+ + +[]ScriptSpecSelector + + + +
+ +(Optional) + +

+Used to select the script that need to be referenced. +When defined, the scripts defined in scriptSpecs can be referenced within the SwitchoverAction.CmdExecutorConfig. +

+ +
+

+SwitchoverSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+SwitchoverSpec is deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`withCandidate`
+ + +SwitchoverAction + + + +
+ +(Optional) + +

+Represents the action of switching over to a specified candidate primary or leader instance. +

+ +
+ +`withoutCandidate`
+ + +SwitchoverAction + + + +
+ +(Optional) + +

+Represents the action of switching over without specifying a candidate primary or leader instance. +

+ +
+

+SystemAccount + +

+ +

+ +(Appears on:ComponentDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the unique identifier for the account. This name is used by other entities to reference the account. +

+ +

+This field is immutable once set. +

+ +
+ +`initAccount`
+ +bool + + +
+ +(Optional) + +

+Indicates if this account is a system initialization account (e.g., MySQL root). +

+ +

+This field is immutable once set. +

+ +
+ +`statement`
+ +string + + +
+ +(Optional) + +

+Defines the statement used to create the account with the necessary privileges. +

+ +

+This field is immutable once set. +

+ +
+ +`passwordGenerationPolicy`
+ + +PasswordConfig + + + +
+ +(Optional) + +

+Specifies the policy for generating the account’s password. +

+ +

+This field is immutable once set. +

+ +
+ +`secretRef`
+ + +ProvisionSecretRef + + + +
+ +(Optional) + +

+Refers to the secret from which data will be copied to create the new account. +

+ +

+This field is immutable once set. +

+ +
+

+SystemAccountConfig + +

+ +

+ +(Appears on:SystemAccountSpec) + +

+
+ +

+SystemAccountConfig specifies how to create and delete system accounts. +

+ +

+Deprecated since v0.9. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ + +AccountName + + + +
+ + +

+The unique identifier of a system account. +

+ +
+ +`provisionPolicy`
+ + +ProvisionPolicy + + + +
+ + +

+Outlines the strategy for creating the account. +

+ +
+

+SystemAccountSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+SystemAccountSpec specifies information to create system accounts. +

+ +

+Deprecated since v0.8, be replaced by `componentDefinition.spec.systemAccounts` and +`componentDefinition.spec.lifecycleActions.accountProvision`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`cmdExecutorConfig`
+ + +CmdExecutorConfig + + + +
+ + +

+Configures how to obtain the client SDK and execute statements. +

+ +
+ +`passwordConfig`
+ + +PasswordConfig + + + +
+ + +

+Defines the pattern used to generate passwords for system accounts. +

+ +
+ +`accounts`
+ + +[]SystemAccountConfig + + + +
+ + +

+Defines the configuration settings for system accounts. +

+ +
+

+TLSConfig + +

+ +

+ +(Appears on:ComponentSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enable`
+ +bool + + +
+ +(Optional) + +

+A boolean flag that indicates whether the Component should use Transport Layer Security (TLS) +for secure communication. +When set to true, the Component will be configured to use TLS encryption for its network connections. +This ensures that the data transmitted between the Component and its clients or other Components is encrypted +and protected from unauthorized access. +If TLS is enabled, the Component may require additional configuration, +such as specifying TLS certificates and keys, to properly set up the secure communication channel. +

+ +
+ +`issuer`
+ + +Issuer + + + +
+ +(Optional) + +

+Specifies the configuration for the TLS certificates issuer. +It allows defining the issuer name and the reference to the secret containing the TLS certificates and key. +The secret should contain the CA certificate, TLS certificate, and private key in the specified keys. +Required when TLS is enabled. +

+ +
+

+TLSSecretRef + +

+ +

+ +(Appears on:Issuer) + +

+
+ +

+TLSSecretRef defines Secret contains Tls certs +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name of the Secret that contains user-provided certificates. +

+ +
+ +`ca`
+ +string + + +
+ + +

+Key of CA cert in Secret +

+ +
+ +`cert`
+ +string + + +
+ + +

+Key of Cert in Secret +

+ +
+ +`key`
+ +string + + +
+ + +

+Key of TLS private key in Secret +

+ +
+

+TargetPodSelector +(`string` alias) +

+ +

+ +(Appears on:Action) + +

+
+ +

+TargetPodSelector defines how to select pod(s) to execute an Action. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+ +

+"Ordinal" +

+
+ +
+ +

+"Role" +

+
+ +
+

+TenancyType +(`string` alias) +

+ +

+ +(Appears on:Affinity, ClusterSpec) + +

+
+ +

+TenancyType defines the type of tenancy for cluster tenant resources. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"DedicatedNode" +

+
+ +

+DedicatedNode means each pod runs on their own dedicated node. +

+ +
+ +

+"SharedNode" +

+
+ +

+SharedNode means multiple pods may share the same node. +

+ +
+

+TerminationPolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterSpec) + +

+
+ +

+TerminationPolicyType defines termination policy types. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +

+Delete is based on Halt and deletes PVCs. +

+ +
+ +

+"DoNotTerminate" +

+
+ +

+DoNotTerminate will block delete operation. +

+ +
+ +

+"Halt" +

+
+ +

+Halt will delete workload resources such as statefulset, deployment workloads but keep PVCs. +

+ +
+ +

+"WipeOut" +

+
+ +

+WipeOut is based on Delete and wipe out all volume snapshots and snapshot data from backup storage location. +

+ +
+

+UpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentDefinitionSpec, StatefulSetSpec) + +

+
+ +

+UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied +across the cluster. +The available strategies are `Serial`, `BestEffortParallel`, and `Parallel`. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +

+BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making +a best-effort attempt to update as many replicas as possible concurrently +while maintaining the component’s availability. +Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number +of replicas remain available during the update process to maintain the component’s quorum and functionality. +

+ +

+For example, consider a component with 5 replicas. To maintain the component’s availability and quorum, +the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least +3 replicas (a quorum) remain available and functional during the update process. +

+ +

+The `BestEffortParallel` strategy strikes a balance between update speed and component availability. +

+ +
+ +

+"Parallel" +

+
+ +

+ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component. +The replicas are updated in parallel, with the operator updating all replicas concurrently. +This strategy provides the fastest update time but may lead to a period of reduced availability or +capacity during the update process. +

+ +
+ +

+"Serial" +

+
+ +

+SerialStrategy indicates that updates are applied one at a time in a sequential manner. +The operator waits for each replica to be updated and ready before proceeding to the next one. +This ensures that only one replica is unavailable at a time during the update process. +

+ +
+

+UpgradePolicy +(`string` alias) +

+
+ +

+UpgradePolicy defines the policy of reconfiguring. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"autoReload" +

+
+ +
+ +

+"dynamicReloadBeginRestart" +

+
+ +
+ +

+"none" +

+
+ +
+ +

+"simple" +

+
+ +
+ +

+"parallel" +

+
+ +
+ +

+"rolling" +

+
+ +
+ +

+"operatorSyncUpdate" +

+
+ +
+

+UserResourceRefs + +

+ +

+ +(Appears on:ClusterComponentSpec) + +

+
+ +

+UserResourceRefs defines references to user-defined Secrets and ConfigMaps. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`secretRefs`
+ + +[]SecretRef + + + +
+ +(Optional) + +

+SecretRefs defines the user-defined Secrets. +

+ +
+ +`configMapRefs`
+ + +[]ConfigMapRef + + + +
+ +(Optional) + +

+ConfigMapRefs defines the user-defined ConfigMaps. +

+ +
+

+VarOption +(`string` alias) +

+ +

+ +(Appears on:ComponentVars, CredentialVars, NamedVar, ServiceRefVars, ServiceVars) + +

+
+ +

+VarOption defines whether a variable is required or optional. +

+
+

+VarSource + +

+ +

+ +(Appears on:EnvVar) + +

+
+ +

+VarSource represents a source for the value of an EnvVar. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+ +`hostNetworkVarRef`
+ + +HostNetworkVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of host-network resources. +

+ +
+ +`serviceVarRef`
+ + +ServiceVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Service. +

+ +
+ +`credentialVarRef`
+ + +CredentialVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Credential (SystemAccount). +

+ +
+ +`serviceRefVarRef`
+ + +ServiceRefVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a ServiceRef. +

+ +
+ +`componentVarRef`
+ + +ComponentVarSelector + + + +
+ +(Optional) + +

+Selects a defined var of a Component. +

+ +
+

+VolumeProtectionSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+VolumeProtectionSpec is deprecated since v0.9, replaced with ComponentVolume.HighWatermark. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`highWatermark`
+ +int + + +
+ +(Optional) + +

+The high watermark threshold for volume space usage. +If there is any specified volumes who’s space usage is over the threshold, the pre-defined “LOCK” action +will be triggered to degrade the service to protect volume from space exhaustion, such as to set the instance +as read-only. And after that, if all volumes’ space usage drops under the threshold later, the pre-defined +“UNLOCK” action will be performed to recover the service normally. +

+ +
+ +`volumes`
+ + +[]ProtectedVolume + + + +
+ +(Optional) + +

+The Volumes to be protected. +

+ +
+

+VolumeType +(`string` alias) +

+ +

+ +(Appears on:VolumeTypeSpec) + +

+
+ +

+VolumeType defines the type of volume, specifically distinguishing between volumes used for backup data and those used for logs. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"data" +

+
+ +

+VolumeTypeData indicates a volume designated for storing backup data. This type of volume is optimized for the +storage and retrieval of data backups, ensuring data persistence and reliability. +

+ +
+ +

+"log" +

+
+ +

+VolumeTypeLog indicates a volume designated for storing logs. This type of volume is optimized for log data, +facilitating efficient log storage, retrieval, and management. +

+ +
+

+VolumeTypeSpec + +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+VolumeTypeSpec is deprecated since v0.9, replaced with ComponentVolume. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Corresponds to the name of the VolumeMounts field in PodSpec.Container. +

+ +
+ +`type`
+ + +VolumeType + + + +
+ +(Optional) + +

+Type of data the volume will persistent. +

+ +
+

+WorkloadType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentDefinition) + +

+
+ +

+WorkloadType defines the type of workload for the components of the ClusterDefinition. +It can be one of the following: `Stateless`, `Stateful`, `Consensus`, or `Replication`. +

+ +

+Deprecated since v0.8. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Consensus" +

+
+ +

+Consensus represents a workload type involving distributed consensus algorithms for coordinated decision-making. +

+ +
+ +

+"Replication" +

+
+ +

+Replication represents a workload type that involves replication, typically used for achieving high availability +and fault tolerance. +

+ +
+ +

+"Stateful" +

+
+ +

+Stateful represents a workload type where components maintain state, and each instance has a unique identity. +

+ +
+ +

+"Stateless" +

+
+ +

+Stateless represents a workload type where components do not maintain state, and instances are interchangeable. +

+ +
+
+

apps.kubeblocks.io/v1beta1

+
+
+Resource Types: + +

+ConfigConstraint + +

+
+ +

+ConfigConstraint manages the parameters across multiple configuration files contained in a single configure template. +These configuration files should have the same format (e.g. ini, xml, properties, json). +

+ +

+It provides the following functionalities: +

+
    +
  1. +Parameter Value Validation: Validates and ensures compliance of parameter values with defined constraints. +
  2. +
  3. +Dynamic Reload on Modification: Monitors parameter changes and triggers dynamic reloads to apply updates. +
  4. +
  5. +Parameter Rendering in Templates: Injects parameters into templates to generate up-to-date configuration files. +
  6. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`apps.kubeblocks.io/v1beta1` + +
+ +`kind`
+string + +
+`ConfigConstraint` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ConfigConstraintSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +
+ +`status`
+ + +ConfigConstraintStatus + + + +
+ + +
+

+AutoTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+AutoTrigger automatically perform the reload when specified conditions are met. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`processName`
+ +string + + +
+ +(Optional) + +

+The name of the process. +

+ +
+

+CfgFileFormat +(`string` alias) +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+CfgFileFormat defines formatter of configuration files. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"dotenv" +

+
+ +
+ +

+"hcl" +

+
+ +
+ +

+"ini" +

+
+ +
+ +

+"json" +

+
+ +
+ +

+"properties" +

+
+ +
+ +

+"props-plus" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"toml" +

+
+ +
+ +

+"xml" +

+
+ +
+ +

+"yaml" +

+
+ +
+

+ConfigConstraintPhase +(`string` alias) +

+ +

+ +(Appears on:ConfigConstraintStatus, ConfigConstraintStatus) + +

+
+ +

+ConfigConstraintPhase defines the ConfigConstraint CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+ConfigConstraintSpec + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintSpec defines the desired state of ConfigConstraint +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+

+ConfigConstraintStatus + +

+ +

+ +(Appears on:ConfigConstraint) + +

+
+ +

+ConfigConstraintStatus represents the observed state of a ConfigConstraint. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +ConfigConstraintPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to CCAvailablePhase, the ConfigConstraint can be referenced by ClusterDefinition. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides descriptions for abnormal states. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Refers to the most recent generation observed for this ConfigConstraint. This value is updated by the API Server. +

+ +
+

+DownwardAPIChangeTriggeredAction + +

+ +

+ +(Appears on:ConfigConstraintSpec, ConfigConstraintSpec) + +

+
+ +

+DownwardAPIChangeTriggeredAction defines an action that triggers specific commands in response to changes in Pod labels. +For example, a command might be executed when the ‘role’ label of the Pod is updated. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the field. It must be a string of maximum length 63. +The name should match the regex pattern `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$`. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the mount point of the Downward API volume. +

+ +
+ +`items`
+ + +[]Kubernetes core/v1.DownwardAPIVolumeFile + + + +
+ + +

+Represents a list of files under the Downward API volume. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be triggered when changes are detected in Downward API volume files. +It relies on the inotify mechanism in the config-manager sidecar to monitor file changes. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the DownwardAction to perform specific tasks or configurations. +

+ +
+

+DynamicParameterSelectedPolicy +(`string` alias) +

+
+ +

+DynamicParameterSelectedPolicy determines how to select the parameters of dynamic reload actions +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"all" +

+
+ +
+ +

+"dynamic" +

+
+ +
+

+DynamicReloadType +(`string` alias) +

+
+ +

+DynamicReloadType defines reload method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"auto" +

+
+ +
+ +

+"http" +

+
+ +
+ +

+"sql" +

+
+ +
+ +

+"exec" +

+
+ +
+ +

+"tpl" +

+
+ +
+ +

+"signal" +

+
+ +
+

+FileFormatConfig + +

+ +

+ +(Appears on:ConfigConstraintSpec, ConfigConstraintSpec) + +

+
+ +

+FileFormatConfig specifies the format of the configuration file and any associated parameters +that are specific to the chosen format. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FormatterAction`
+ + +FormatterAction + + + +
+ + +

+ +(Members of `FormatterAction` are embedded into this type.) + +

+(Optional) + +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +
+ +`format`
+ + +CfgFileFormat + + + +
+ + +

+The config file format. Valid values are `ini`, `xml`, `yaml`, `json`, +`hcl`, `dotenv`, `properties` and `toml`. Each format has its own characteristics and use cases. +

+ + +
+

+FormatterAction + +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+FormatterAction configures format-specific options for different configuration file format. +Note: Only one of its members should be specified at any given time. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`iniConfig`
+ + +IniConfig + + + +
+ +(Optional) + +

+Holds options specific to the ‘ini’ file format. +

+ +
+

+IniConfig + +

+ +

+ +(Appears on:FormatterAction) + +

+
+ +

+IniConfig holds options specific to the ‘ini’ file format. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`sectionName`
+ +string + + +
+ +(Optional) + +

+A string that describes the name of the ini section. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ParametersSchema Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topLevelKey`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configSchema.cue’. +

+ +
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schemaInJSON`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+ReloadAction + +

+ +

+ +(Appears on:ConfigConstraintSpec) + +

+
+ +

+ReloadAction defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+ +`targetPodSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `reloadedPodSelector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `reloadedPodSelector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+

+ScriptConfig + +

+ +

+ +(Appears on:ConfigConstraintSpec, DownwardAPIChangeTriggeredAction, ShellTrigger, TPLScriptTrigger) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scriptConfigMapRef`
+ +string + + +
+ + +

+Specifies the reference to the ConfigMap containing the scripts. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace for the ConfigMap. +If not specified, it defaults to the “default” namespace. +

+ +
+

+ShellTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+ShellTrigger allows to execute a custom shell script to reload the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+Specifies the command to execute in order to reload the process. It should be a valid shell command. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines the synchronization mode of parameter updates with “config-manager”. +

+
    +
  • +‘True’: Executes reload actions synchronously, pausing until completion. +
  • +
  • +‘False’: Executes reload actions asynchronously, without waiting for completion. +
  • +
+ +
+ +`batchReload`
+ +bool + + +
+ +(Optional) + +

+Controls whether parameter updates are processed individually or collectively in a batch: +

+
    +
  • +‘True’: Processes all changes in one batch reload. +
  • +
  • +‘False’: Processes each change individually. +
  • +
+ +

+Defaults to ‘False’ if unspecified. +

+ +
+ +`batchParamsFormatterTemplate`
+ +string + + +
+ +(Optional) + +

+Specifies a Go template string for formatting batch input data. +It’s used when `batchReload` is ‘True’ to format data passed into STDIN of the script. +The template accesses key-value pairs of updated parameters via the ‘$’ variable. +This allows for custom formatting of the input data. +

+ +

+Example template: +

+
+
+batchParamsFormatterTemplate: |-
+{{- range $pKey, $pValue := $ }}
+{{ printf "%s:%s" $pKey $pValue }}
+{{- end }}
+
+
+ +

+This example generates batch input data in a key:value format, sorted by keys. +

+
+
+key1:value1
+key2:value2
+key3:value3
+
+
+ +

+If not specified, the default format is key=value, sorted by keys, for each updated parameter. +

+
+
+key1=value1
+key2=value2
+key3=value3
+
+
+ +
+ +`toolsSetup`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload. +

+ +
+

+SignalType +(`string` alias) +

+ +

+ +(Appears on:UnixSignalTrigger) + +

+
+ +

+SignalType defines which signals are valid. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"SIGABRT" +

+
+ +
+ +

+"SIGALRM" +

+
+ +
+ +

+"SIGBUS" +

+
+ +
+ +

+"SIGCHLD" +

+
+ +
+ +

+"SIGCONT" +

+
+ +
+ +

+"SIGFPE" +

+
+ +
+ +

+"SIGHUP" +

+
+ +
+ +

+"SIGILL" +

+
+ +
+ +

+"SIGINT" +

+
+ +
+ +

+"SIGIO" +

+
+ +
+ +

+"SIGKILL" +

+
+ +
+ +

+"SIGPIPE" +

+
+ +
+ +

+"SIGPROF" +

+
+ +
+ +

+"SIGPWR" +

+
+ +
+ +

+"SIGQUIT" +

+
+ +
+ +

+"SIGSEGV" +

+
+ +
+ +

+"SIGSTKFLT" +

+
+ +
+ +

+"SIGSTOP" +

+
+ +
+ +

+"SIGSYS" +

+
+ +
+ +

+"SIGTERM" +

+
+ +
+ +

+"SIGTRAP" +

+
+ +
+ +

+"SIGTSTP" +

+
+ +
+ +

+"SIGTTIN" +

+
+ +
+ +

+"SIGTTOU" +

+
+ +
+ +

+"SIGURG" +

+
+ +
+ +

+"SIGUSR1" +

+
+ +
+ +

+"SIGUSR2" +

+
+ +
+ +

+"SIGVTALRM" +

+
+ +
+ +

+"SIGWINCH" +

+
+ +
+ +

+"SIGXCPU" +

+
+ +
+ +

+"SIGXFSZ" +

+
+ +
+

+TPLScriptTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+TPLScriptTrigger Enables reloading process using a Go template script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ScriptConfig`
+ + +ScriptConfig + + + +
+ + +

+ +(Members of `ScriptConfig` are embedded into this type.) + +

+ +

+Specifies the ConfigMap that contains the script to be executed for reload. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines whether parameter updates should be synchronized with the “config-manager”. +Specifies the controller’s reload strategy: +

+
    +
  • +If set to ‘True’, the controller executes the reload action in synchronous mode, +pausing execution until the reload completes. +
  • +
  • +If set to ‘False’, the controller executes the reload action in asynchronous mode, +updating the ConfigMap without waiting for the reload process to finish. +
  • +
+ +
+

+ToolConfig + +

+ +

+ +(Appears on:ToolsSetup) + +

+
+ +

+ToolConfig specifies the settings of an init container that prepare tools for dynamic reload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the init container. +

+ +
+ +`asContainerImage`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the tool image should be used as the container image for a sidecar. +This is useful for large tool images, such as those for C++ tools, which may depend on +numerous libraries (e.g., *.so files). +

+ +

+If enabled, the tool image is deployed as a sidecar container image. +

+ +

+Examples: +

+
+
+ toolsSetup::
+   mountPoint: /kb_tools
+   toolConfigs:
+     - name: kb-tools
+       asContainerImage: true
+       image:  apecloud/oceanbase:4.2.0.0-100010032023083021
+
+
+ +

+generated containers: +

+
+
+initContainers:
+ - name: install-config-manager-tool
+   image: apecloud/kubeblocks-tools:${version}
+   command:
+   - cp
+   - /bin/config_render
+   - /opt/tools
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+containers:
+ - name: config-manager
+   image: apecloud/oceanbase:4.2.0.0-100010032023083021
+   imagePullPolicy: IfNotPresent
+	  command:
+   - /opt/tools/reloader
+   - --log-level
+   - info
+   - --operator-update-enable
+   - --tcp
+   - "9901"
+   - --config
+   - /opt/config-manager/config-manager.yaml
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+
+
+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the tool container image. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed by the init container. +

+ +
+

+ToolsSetup + +

+ +

+ +(Appears on:ConfigConstraintSpec, ShellTrigger) + +

+
+ +

+ToolsSetup prepares the tools for dynamic reloads used in ShellTrigger from a specified container image. +

+ +

+Example: +

+
+
+
+toolsSetup:
+	 mountPoint: /kb_tools
+	 toolConfigs:
+	   - name: kb-tools
+	     command:
+	       - cp
+	       - /bin/ob-tools
+	       - /kb_tools/obtools
+	     image: docker.io/apecloud/obtools
+
+
+ +

+This example copies the “/bin/ob-tools” binary from the image to “/kb_tools/obtools”. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the directory path in the container where the tools-related files are to be copied. +This field is typically used with an emptyDir volume to ensure a temporary, empty directory is provided at pod creation. +

+ +
+ +`toolConfigs`
+ + +[]ToolConfig + + + +
+ +(Optional) + +

+Specifies a list of settings of init containers that prepare tools for dynamic reload. +

+ +
+

+UnixSignalTrigger + +

+ +

+ +(Appears on:ReloadOptions, ReloadAction) + +

+
+ +

+UnixSignalTrigger is used to trigger a reload by sending a specific Unix signal to the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`signal`
+ + +SignalType + + + +
+ + +

+Specifies a valid Unix signal to be sent. +For a comprehensive list of all Unix signals, see: ../../pkg/configuration/configmap/handler.go:allUnixSignals +

+ +
+ +`processName`
+ +string + + +
+ + +

+Identifies the name of the process to which the Unix signal will be sent. +

+ +
+
+

workloads.kubeblocks.io/v1

+
+
+Resource Types: + +

+InstanceSet + +

+
+ +

+InstanceSet is the Schema for the instancesets API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`workloads.kubeblocks.io/v1` + +
+ +`kind`
+string + +
+`InstanceSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Contains the metadata for the particular object, such as name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +InstanceSetSpec + + + +
+ + +

+Defines the desired state of the state machine. It includes the configuration details for the state machine. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +parallel: force parallel +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. Instanceset obtains role through pods’ role label `kubeblocks.io/role`. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`templateVars`
+ +map[string]string + + +
+ +(Optional) + +

+Provides variables which are used to call Actions. +

+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`configs`
+ + +[]ConfigTemplate + + + +
+ +(Optional) + +

+Describe the configs to be reconfigured. +

+ +
+ +
+ +`status`
+ + +InstanceSetStatus + + + +
+ + +

+Represents the current information about the state machine. This data may be out of date. +

+ +
+

+ConditionType +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"InstanceAvailable" +

+
+ +

+InstanceAvailable ConditionStatus will be True if all instances(pods) are in the ready condition +and continue for “MinReadySeconds” seconds. Otherwise, it will be set to False. +

+ +
+ +

+"InstanceFailure" +

+
+ +

+InstanceFailure is added in an instance set when at least one of its instances(pods) is in a `Failed` phase. +

+ +
+ +

+"InstanceReady" +

+
+ +

+InstanceReady is added in an instance set when at least one of its instances(pods) is in a Ready condition. +ConditionStatus will be True if all its instances(pods) are in a Ready condition. +Or, a NotReady reason with not ready instances encoded in the Message filed will be set. +

+ +
+ +

+"InstanceUpdateRestricted" +

+
+ +

+InstanceUpdateRestricted represents a ConditionType that indicates updates to an InstanceSet are blocked(when the +PodUpdatePolicy is set to StrictInPlace but the pods cannot be updated in-place). +

+ +
+

+ConfigTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the config. +

+ +
+ +`generation`
+ +int64 + + +
+ + +

+The generation of the config. +

+ +
+ +`reconfigure`
+ + +Action + + + +
+ +(Optional) + +

+The custom reconfigure action. +

+ +
+ +`reconfigureActionName`
+ +string + + +
+ +(Optional) + +

+The name of the custom reconfigure action. +

+ +

+An empty name indicates that the reconfigure action is the default one defined by lifecycle actions. +

+ +
+ +`parameters`
+ +map[string]string + + +
+ +(Optional) + +

+The parameters to call the reconfigure action. +

+ +
+

+InstanceConfigStatus + +

+ +

+ +(Appears on:InstanceStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of the config. +

+ +
+ +`generation`
+ +int64 + + +
+ + +

+The generation of the config. +

+ +
+

+InstanceSetSpec + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetSpec defines the desired state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`persistentVolumeClaimRetentionPolicy`
+ + +PersistentVolumeClaimRetentionPolicy + + + +
+ +(Optional) + +

+persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent +volume claims created from volumeClaimTemplates. By default, all persistent +volume claims are created as needed and retained until manually deleted. This +policy allows the lifecycle to be altered, for example by deleting persistent +volume claims when their workload is deleted, or when their pod is scaled +down. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`instanceUpdateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ +(Optional) + +

+Provides fine-grained control over the spec update process of all instances. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +parallel: force parallel +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. Instanceset obtains role through pods’ role label `kubeblocks.io/role`. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`templateVars`
+ +map[string]string + + +
+ +(Optional) + +

+Provides variables which are used to call Actions. +

+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`configs`
+ + +[]ConfigTemplate + + + +
+ +(Optional) + +

+Describe the configs to be reconfigured. +

+ +
+

+InstanceSetStatus + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetStatus defines the observed state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the +InstanceSet’s generation, which is updated on mutation by the API Server. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+replicas is the number of instances created by the InstanceSet controller. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ + +

+readyReplicas is the number of instances created for this InstanceSet with a Ready Condition. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ + +

+updatedReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+ +`currentRevision`
+ +string + + +
+ + +

+currentRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the +sequence [0,currentReplicas). +

+ +
+ +`updateRevision`
+ +string + + +
+ + +

+updateRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the sequence +[replicas-updatedReplicas,replicas) +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents the latest available observations of an instanceset’s current state. +Known .status.conditions.type are: “InstanceFailure”, “InstanceReady” +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Total number of available instances (ready for at least minReadySeconds) targeted by this InstanceSet. +

+ +
+ +`initReplicas`
+ +int32 + + +
+ +(Optional) + +

+Defines the initial number of instances when the cluster is first initialized. +This value is set to spec.Replicas at the time of object creation and remains constant thereafter. +Used only when spec.roles set. +

+ +
+ +`readyInitReplicas`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of instances that have already reached the MembersStatus during the cluster initialization stage. +This value remains constant once it equals InitReplicas. +Used only when spec.roles set. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Provides the status of each member in the cluster. +

+ +
+ +`instanceStatus`
+ + +[]InstanceStatus + + + +
+ +(Optional) + +

+Provides the status of each instance in the ITS. +

+ +
+ +`currentRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`updateRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`templatesStatus`
+ + +[]InstanceTemplateStatus + + + +
+ +(Optional) + +

+TemplatesStatus represents status of each instance generated by InstanceTemplates +

+ +
+

+InstanceStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`configs`
+ + +[]InstanceConfigStatus + + + +
+ +(Optional) + +

+The status of configs. +

+ +
+

+InstanceTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations in a Component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the Component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the Component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+

+InstanceTemplateStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+ +

+InstanceTemplateStatus aggregates the status of replicas for each InstanceTemplate +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name, the name of the InstanceTemplate. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Replicas is the number of replicas of the InstanceTemplate. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ +(Optional) + +

+ReadyReplicas is the number of Pods that have a Ready Condition. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+AvailableReplicas is the number of Pods that ready for at least minReadySeconds. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ +(Optional) + +

+UpdatedReplicas is the number of Pods created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+

+MemberStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`role`
+ + +ReplicaRole + + + +
+ +(Optional) + +

+Defines the role of the replica in the cluster. +

+ +
+

+MemberUpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+MemberUpdateStrategy defines Cluster Component update strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+MembershipReconfiguration + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`switchover`
+ + +Action + + + +
+ +(Optional) + +

+Defines the procedure for a controlled transition of a role to a new replica. +

+ +
+
+

workloads.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+InstanceSet + +

+
+ +

+InstanceSet is the Schema for the instancesets API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`workloads.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`InstanceSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Contains the metadata for the particular object, such as name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +InstanceSetSpec + + + +
+ + +

+Defines the desired state of the state machine. It includes the configuration details for the state machine. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`service`
+ + +Kubernetes core/v1.Service + + + +
+ +(Optional) + +

+Defines the behavior of a service spec. +Provides read-write service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`updateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ + +

+Indicates the StatefulSetUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Provides method to probe role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`credential`
+ + +Credential + + + +
+ +(Optional) + +

+Credential used to connect to DB engine +

+ +
+ +
+ +`status`
+ + +InstanceSetStatus + + + +
+ + +

+Represents the current information about the state machine. This data may be out of date. +

+ +
+

+AccessMode +(`string` alias) +

+ +

+ +(Appears on:ReplicaRole) + +

+
+ +

+AccessMode defines SVC access mode enums. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"None" +

+
+ +
+ +

+"ReadWrite" +

+
+ +
+ +

+"Readonly" +

+
+ +
+

+Action + +

+ +

+ +(Appears on:MembershipReconfiguration, RoleProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ +(Optional) + +

+Refers to the utility image that contains the command which can be utilized to retrieve or process role information. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+A set of instructions that will be executed within the Container to retrieve or process role information. This field is required. +

+ +
+ +`args`
+ +[]string + + +
+ +(Optional) + +

+Additional parameters used to perform specific statements. This field is optional. +

+ +
+

+ConditionType +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"InstanceAvailable" +

+
+ +

+InstanceAvailable ConditionStatus will be True if all instances(pods) are in the ready condition +and continue for “MinReadySeconds” seconds. Otherwise, it will be set to False. +

+ +
+ +

+"InstanceFailure" +

+
+ +

+InstanceFailure is added in an instance set when at least one of its instances(pods) is in a `Failed` phase. +

+ +
+ +

+"InstanceReady" +

+
+ +

+InstanceReady is added in an instance set when at least one of its instances(pods) is in a Ready condition. +ConditionStatus will be True if all its instances(pods) are in a Ready condition. +Or, a NotReady reason with not ready instances encoded in the Message filed will be set. +

+ +
+ +

+"InstanceUpdateRestricted" +

+
+ +

+InstanceUpdateRestricted represents a ConditionType that indicates updates to an InstanceSet are blocked(when the +PodUpdatePolicy is set to StrictInPlace but the pods cannot be updated in-place). +

+ +
+

+Credential + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`username`
+ + +CredentialVar + + + +
+ + +

+Defines the user’s name for the credential. +The corresponding environment variable will be KB_ITS_USERNAME. +

+ +
+ +`password`
+ + +CredentialVar + + + +
+ + +

+Represents the user’s password for the credential. +The corresponding environment variable will be KB_ITS_PASSWORD. +

+ +
+

+CredentialVar + +

+ +

+ +(Appears on:Credential) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`value`
+ +string + + +
+ +(Optional) + +

+Specifies the value of the environment variable. This field is optional and defaults to an empty string. +The value can include variable references in the format $(VAR_NAME) which will be expanded using previously defined environment variables in the container and any service environment variables. +

+ +

+If a variable cannot be resolved, the reference in the input string will remain unchanged. +Double $$ can be used to escape the $(VAR_NAME) syntax, resulting in a single $ and producing the string literal “$(VAR_NAME)”. +Escaped references will not be expanded, regardless of whether the variable exists or not. +

+ +
+ +`valueFrom`
+ + +Kubernetes core/v1.EnvVarSource + + + +
+ +(Optional) + +

+Defines the source for the environment variable’s value. This field is optional and cannot be used if the ‘Value’ field is not empty. +

+ +
+

+InstanceSetSpec + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetSpec defines the desired state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified. +

+ +
+ +`defaultTemplateOrdinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of the default template. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under the default template would be +$(cluster.name)-$(component.name)-0、$(cluster.name)-$(component.name)-1 and $(cluster.name)-$(component.name)-7 +

+ +
+ +`minReadySeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready. +

+ +
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Represents a label query over pods that should match the desired replica count indicated by the `replica` field. +It must match the labels defined in the pod template. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +

+ +
+ +`service`
+ + +Kubernetes core/v1.Service + + + +
+ +(Optional) + +

+Defines the behavior of a service spec. +Provides read-write service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`template`
+ + +Kubernetes core/v1.PodTemplateSpec + + + +
+ + +
+ +`instances`
+ + +[]InstanceTemplate + + + +
+ +(Optional) + +

+Overrides values in default Template. +

+ +

+Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates. +

+ +

+The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique. +

+ +

+The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Specifies the names of instances to be transitioned to offline status. +

+ +

+Marking an instance as offline results in the following: +

+
    +
  1. +The associated pod is stopped, and its PersistentVolumeClaim (PVC) is retained for potential +future reuse or data recovery, but it is no longer actively used. +
  2. +
  3. +The ordinal number assigned to this instance is preserved, ensuring it remains unique +and avoiding conflicts with new instances. +
  4. +
+ +

+Setting instances to offline allows for a controlled scale-in process, preserving their data and maintaining +ordinal consistency within the cluster. +Note that offline instances and their associated resources, such as PVCs, are not automatically deleted. +The cluster administrator must manually manage the cleanup and removal of these resources when they are no longer needed. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Specifies a list of PersistentVolumeClaim templates that define the storage requirements for each replica. +Each template specifies the desired characteristics of a persistent volume, such as storage class, +size, and access modes. +These templates are used to dynamically provision persistent volumes for replicas upon their creation. +The final name of each PVC is generated by appending the pod’s identifier to the name specified in volumeClaimTemplates[*].name. +

+ +
+ +`podManagementPolicy`
+ + +Kubernetes apps/v1.PodManagementPolicyType + + + +
+ +(Optional) + +

+Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down. +

+ +

+The default policy is `OrderedReady`, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is `Parallel` which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`parallelPodManagementConcurrency`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Controls the concurrency of pods during initial scale up, when replacing pods on nodes, +or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`. +The default Concurrency is 100%. +

+ +
+ +`podUpdatePolicy`
+ + +PodUpdatePolicyType + + + +
+ +(Optional) + +

+PodUpdatePolicy indicates how pods should be updated +

+
    +
  • +`StrictInPlace` indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +
  • +
  • +`PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +Default value is “PreferInPlace” +
  • +
+ +
+ +`updateStrategy`
+ + +InstanceUpdateStrategy + + + +
+ + +

+Indicates the StatefulSetUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +

+ +

+Note: This field will be removed in future version. +

+ +
+ +`roles`
+ + +[]ReplicaRole + + + +
+ +(Optional) + +

+A list of roles defined in the system. +

+ +
+ +`roleProbe`
+ + +RoleProbe + + + +
+ +(Optional) + +

+Provides method to probe role. +

+ +
+ +`membershipReconfiguration`
+ + +MembershipReconfiguration + + + +
+ +(Optional) + +

+Provides actions to do membership dynamic reconfiguration. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+ +`paused`
+ +bool + + +
+ +(Optional) + +

+Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. +

+ +
+ +`credential`
+ + +Credential + + + +
+ +(Optional) + +

+Credential used to connect to DB engine +

+ +
+

+InstanceSetStatus + +

+ +

+ +(Appears on:InstanceSet) + +

+
+ +

+InstanceSetStatus defines the observed state of InstanceSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+observedGeneration is the most recent generation observed for this InstanceSet. It corresponds to the +InstanceSet’s generation, which is updated on mutation by the API Server. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+replicas is the number of instances created by the InstanceSet controller. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ + +

+readyReplicas is the number of instances created for this InstanceSet with a Ready Condition. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ + +

+updatedReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+ +`currentRevision`
+ +string + + +
+ + +

+currentRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the +sequence [0,currentReplicas). +

+ +
+ +`updateRevision`
+ +string + + +
+ + +

+updateRevision, if not empty, indicates the version of the InstanceSet used to generate instances in the sequence +[replicas-updatedReplicas,replicas) +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents the latest available observations of an instanceset’s current state. +Known .status.conditions.type are: “InstanceFailure”, “InstanceReady” +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Total number of available instances (ready for at least minReadySeconds) targeted by this InstanceSet. +

+ +
+ +`initReplicas`
+ +int32 + + +
+ +(Optional) + +

+Defines the initial number of instances when the cluster is first initialized. +This value is set to spec.Replicas at the time of object creation and remains constant thereafter. +Used only when spec.roles set. +

+ +
+ +`readyInitReplicas`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of instances that have already reached the MembersStatus during the cluster initialization stage. +This value remains constant once it equals InitReplicas. +Used only when spec.roles set. +

+ +
+ +`membersStatus`
+ + +[]MemberStatus + + + +
+ +(Optional) + +

+Provides the status of each member in the cluster. +

+ +
+ +`readyWithoutPrimary`
+ +bool + + +
+ +(Optional) + +

+Indicates whether it is required for the InstanceSet to have at least one primary instance ready. +

+ +
+ +`currentRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`updateRevisions`
+ +map[string]string + + +
+ +(Optional) + +

+updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision. +

+ +
+ +`templatesStatus`
+ + +[]InstanceTemplateStatus + + + +
+ +(Optional) + +

+TemplatesStatus represents status of each instance generated by InstanceTemplates +

+ +
+

+InstanceTemplate + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceTemplate allows customization of individual replica configurations within a Component, +without altering the base component template defined in ClusterComponentSpec. +It enables the application of distinct settings to specific instances (replicas), +providing flexibility while maintaining a common configuration baseline. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name specifies the unique name of the instance Pod created using this InstanceTemplate. +This name is constructed by concatenating the component’s name, the template’s name, and the instance’s ordinal +using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. +The specified name overrides any default naming conventions or patterns. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of instances (Pods) to create from this InstanceTemplate. +This field allows setting how many replicated instances of the component, +with the specific overrides in the InstanceTemplate, are created. +The default value is 1. A value of 0 disables instance creation. +

+ +
+ +`ordinals`
+ + +Ordinals + + + +
+ + +

+Specifies the desired Ordinals of this InstanceTemplate. +The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. +

+ +

+For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, +then the instance names generated under this InstanceTemplate would be +$(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and +$(cluster.name)-$(component.name)-$(template.name)-7 +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs to be merged into the Pod’s existing annotations. +Existing keys will have their values overwritten, while new keys will be added to the annotations. +

+ +
+ +`labels`
+ +map[string]string + + +
+ +(Optional) + +

+Specifies a map of key-value pairs that will be merged into the Pod’s existing labels. +Values for existing keys will be overwritten, and new keys will be added. +

+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies an override for the first container’s image in the pod. +

+ +
+ +`schedulingPolicy`
+ + +SchedulingPolicy + + + +
+ +(Optional) + +

+Specifies the scheduling policy for the Component. +

+ +
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies an override for the resource requirements of the first container in the Pod. +This field allows for customizing resource allocation (CPU, memory, etc.) for the container. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines Env to override. +Add new or override existing envs. +

+ +
+ +`volumes`
+ + +[]Kubernetes core/v1.Volume + + + +
+ +(Optional) + +

+Defines Volumes to override. +Add new or override existing volumes. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines VolumeMounts to override. +Add new or override existing volume mounts of the first container in the pod. +

+ +
+ +`volumeClaimTemplates`
+ + +[]Kubernetes core/v1.PersistentVolumeClaim + + + +
+ +(Optional) + +

+Defines VolumeClaimTemplates to override. +Add new or override existing volume claim templates. +

+ +
+

+InstanceTemplateStatus + +

+ +

+ +(Appears on:InstanceSetStatus) + +

+
+ +

+InstanceTemplateStatus aggregates the status of replicas for each InstanceTemplate +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Name, the name of the InstanceTemplate. +

+ +
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Replicas is the number of replicas of the InstanceTemplate. +

+ +
+ +`readyReplicas`
+ +int32 + + +
+ +(Optional) + +

+ReadyReplicas is the number of Pods that have a Ready Condition. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+AvailableReplicas is the number of Pods that ready for at least minReadySeconds. +

+ +
+ +`currentReplicas`
+ +int32 + + +
+ + +

+currentReplicas is the number of instances created by the InstanceSet controller from the InstanceSet version +indicated by CurrentRevisions. +

+ +
+ +`updatedReplicas`
+ +int32 + + +
+ +(Optional) + +

+UpdatedReplicas is the number of Pods created by the InstanceSet controller from the InstanceSet version +indicated by UpdateRevisions. +

+ +
+

+InstanceUpdateStrategy + +

+ +

+ +(Appears on:InstanceSetSpec) + +

+
+ +

+InstanceUpdateStrategy indicates the strategy that the InstanceSet +controller will use to perform updates. It includes any additional parameters +necessary to perform the update for the indicated strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`partition`
+ +int32 + + +
+ +(Optional) + +

+Partition indicates the number of pods that should be updated during a rolling update. +The remaining pods will remain untouched. This is helpful in defining how many pods +should participate in the update process. The update process will follow the order +of pod names in descending lexicographical (dictionary) order. The default value is +Replicas (i.e., update all pods). +

+ +
+ +`maxUnavailable`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+The maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding up. This can not be 0. +Defaults to 1. The field applies to all pods. That means if there is any unavailable pod, +it will be counted towards MaxUnavailable. +

+ +
+ +`memberUpdateStrategy`
+ + +MemberUpdateStrategy + + + +
+ +(Optional) + +

+Members(Pods) update strategy. +

+
    +
  • +serial: update Members one by one that guarantee minimum component unavailable time. +
  • +
  • +bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time. +
  • +
  • +parallel: force parallel +
  • +
+ +
+

+MemberStatus + +

+ +

+ +(Appears on:ClusterComponentStatus, InstanceSetStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podName`
+ +string + + +
+ + +

+Represents the name of the pod. +

+ +
+ +`role`
+ + +ReplicaRole + + + +
+ +(Optional) + +

+Defines the role of the replica in the cluster. +

+ +
+

+MemberUpdateStrategy +(`string` alias) +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec, InstanceUpdateStrategy) + +

+
+ +

+MemberUpdateStrategy defines Cluster Component update strategy. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"BestEffortParallel" +

+
+ +
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+MembershipReconfiguration + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`switchoverAction`
+ + +Action + + + +
+ +(Optional) + +

+Specifies the environment variables that can be used in all following Actions: +- KB_ITS_USERNAME: Represents the username part of the credential +- KB_ITS_PASSWORD: Represents the password part of the credential +- KB_ITS_LEADER_HOST: Represents the leader host +- KB_ITS_TARGET_HOST: Represents the target host +- KB_ITS_SERVICE_PORT: Represents the service port +

+ +

+Defines the action to perform a switchover. +If the Image is not configured, the latest BusyBox image will be used. +

+ +
+ +`memberJoinAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to add a member. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`memberLeaveAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to remove a member. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`logSyncAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to trigger the new member to start log syncing. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+ +`promoteAction`
+ + +Action + + + +
+ +(Optional) + +

+Defines the action to inform the cluster that the new member can join voting now. +If the Image is not configured, the Image from the previous non-nil action will be used. +

+ +
+

+Ordinals + +

+ +

+ +(Appears on:InstanceSetSpec, InstanceTemplate) + +

+
+ +

+Ordinals represents a combination of continuous segments and individual values. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ranges`
+ + +[]Range + + + +
+ + +
+ +`discrete`
+ +[]int32 + + +
+ + +
+

+PodUpdatePolicyType +(`string` alias) +

+ +

+ +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"PreferInPlace" +

+
+ +

+PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod. +If that fails, it will fall back to the ReCreate, where pod will be recreated. +

+ +
+ +

+"StrictInPlace" +

+
+ +

+StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades. +Any attempt to modify other fields will be rejected. +

+ +
+

+Range + +

+ +

+ +(Appears on:Ordinals) + +

+
+ +

+Range represents a range with a start and an end value. +It is used to define a continuous segment. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`start`
+ +int32 + + +
+ + +
+ +`end`
+ +int32 + + +
+ + +
+

+ReplicaRole + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec, MemberStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the role name of the replica. +

+ +
+ +`accessMode`
+ + +AccessMode + + + +
+ + +

+Specifies the service capabilities of this member. +

+ +
+ +`canVote`
+ +bool + + +
+ +(Optional) + +

+Indicates if this member has voting rights. +

+ +
+ +`isLeader`
+ +bool + + +
+ +(Optional) + +

+Determines if this member is the leader. +

+ +
+

+RoleProbe + +

+ +

+ +(Appears on:RSMSpec, InstanceSetSpec) + +

+
+ +

+RoleProbe defines how to observe role +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`builtinHandlerName`
+ +string + + +
+ +(Optional) + +

+Specifies the builtin handler name to use to probe the role of the main container. +Available handlers include: mysql, postgres, mongodb, redis, etcd, kafka. +Use CustomHandler to define a custom role probe function if none of the built-in handlers meet the requirement. +

+ +
+ +`customHandler`
+ + +[]Action + + + +
+ +(Optional) + +

+Defines a custom method for role probing. +Actions defined here are executed in series. +Upon completion of all actions, the final output should be a single string representing the role name defined in spec.Roles. +The latest BusyBox image will be used if Image is not configured. +Environment variables can be used in Command: +- v_KB_ITS_LASTSTDOUT: stdout from the last action, watch for ‘v’ prefix +- KB_ITS_USERNAME: username part of the credential +- KB_ITS_PASSWORD: password part of the credential +

+ +
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the container has started before initiating role probing. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency (in seconds) of probe execution. +

+ +
+ +`successThreshold`
+ +int32 + + +
+ +(Optional) + +

+Specifies the minimum number of consecutive successes for the probe to be considered successful after having failed. +

+ +
+ +`failureThreshold`
+ +int32 + + +
+ +(Optional) + +

+Specifies the minimum number of consecutive failures for the probe to be considered failed after having succeeded. +

+ +
+ +`roleUpdateMechanism`
+ + +RoleUpdateMechanism + + + +
+ +(Optional) + +

+Specifies the method for updating the pod role label. +

+ +
+

+RoleUpdateMechanism +(`string` alias) +

+ +

+ +(Appears on:RoleProbe) + +

+
+ +

+RoleUpdateMechanism defines the way how pod role label being updated. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"DirectAPIServerEventUpdate" +

+
+ +
+ +

+"ReadinessProbeEventUpdate" +

+
+ +
+

+SchedulingPolicy + +

+ +

+ +(Appears on:InstanceTemplate) + +

+
+ +

+SchedulingPolicy the scheduling policy. +Deprecated: Unify with apps/v1alpha1.SchedulingPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+If specified, the Pod will be dispatched by specified scheduler. +If not specified, the Pod will be dispatched by default scheduler. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+NodeSelector is a selector which must be true for the Pod to fit on a node. +Selector which must match a node’s labels for the Pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, +the scheduler simply schedules this Pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Specifies a group of affinity scheduling rules of the Cluster, including NodeAffinity, PodAffinity, and PodAntiAffinity. +

+ +
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Allows Pods to be scheduled onto nodes with matching taints. +Each toleration in the array allows the Pod to tolerate node taints based on +specified `key`, `value`, `effect`, and `operator`. +

+
    +
  • +The `key`, `value`, and `effect` identify the taint that the toleration matches. +
  • +
  • +The `operator` determines how the toleration matches the taint. +
  • +
+ +

+Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+TopologySpreadConstraints describes how a group of Pods ought to spread across topology +domains. Scheduler will schedule Pods in a way which abides by the constraints. +All topologySpreadConstraints are ANDed. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/api-reference/dataprotection.mdx b/docs/zh/preview/user_docs/references/api-reference/dataprotection.mdx new file mode 100644 index 00000000..cac54810 --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/dataprotection.mdx @@ -0,0 +1,11576 @@ +--- +title: Dataprotection API Reference +description: Dataprotection API Reference +keywords: [dataprotection, api] +sidebar_position: 4 +sidebar_label: Dataprotection +--- +
+ +

+Packages: +

+ +

dataprotection.kubeblocks.io/v1alpha1

+
+
+Resource Types: + +

+ActionSet + +

+
+ +

+ActionSet is the Schema for the actionsets API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ActionSet` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ActionSetSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupType`
+ + +BackupType + + + +
+ + +

+Specifies the backup type. Supported values include: +

+
    +
  • +`Full` for a full backup. +
  • +
  • +`Incremental` back up data that have changed since the last backup (either full or incremental). +
  • +
  • +`Differential` back up data that has changed since the last full backup. +
  • +
  • +`Continuous` back up transaction logs continuously, such as MySQL binlog, PostgreSQL WAL, etc. +
  • +
  • +`Selective` back up data more precisely, use custom parameters, such as specific databases or tables. +
  • +
+ +

+Continuous backup is essential for implementing Point-in-Time Recovery (PITR). +

+ +
+ +`parametersSchema`
+ + +ActionSetParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema of parameters in backups and restores before their usage. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`envFrom`
+ + +[]Kubernetes core/v1.EnvFromSource + + + +
+ +(Optional) + +

+Specifies a list of sources to populate environment variables in the container. +The keys within a source must be a C_IDENTIFIER. Any invalid keys will be +reported as an event when the container starts. If a key exists in multiple +sources, the value from the last source will take precedence. Any values +defined by an Env with a duplicate key will take precedence. +

+ +

+This field cannot be updated. +

+ +
+ +`backup`
+ + +BackupActionSpec + + + +
+ +(Optional) + +

+Specifies the backup action. +

+ +
+ +`restore`
+ + +RestoreActionSpec + + + +
+ +(Optional) + +

+Specifies the restore action. +

+ +
+ +
+ +`status`
+ + +ActionSetStatus + + + +
+ + +
+

+Backup + +

+
+ +

+Backup is the Schema for the backups API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Backup` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backup policy to be applied for this backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in the backup policy. +

+ +
+ +`deletionPolicy`
+ + +BackupDeletionPolicy + + + +
+ + +

+Determines whether the backup contents stored in the backup repository +should be deleted when the backup custom resource(CR) is deleted. +Supported values are `Retain` and `Delete`. +

+
    +
  • +`Retain` means that the backup content and its physical snapshot on backup repository are kept. +
  • +
  • +`Delete` means that the backup content and its physical snapshot on backup repository are deleted. +
  • +
+ +

+the backup CR but retaining the backup contents in backup repository. +The current implementation only prevent accidental deletion of backup data. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines a duration up to which the backup should be kept. +Controller will remove all backups that are older than the RetentionPeriod. +If not set, the backup will be kept forever. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Determines the parent backup name for incremental or differential backup. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+ +
+ +`status`
+ + +BackupStatus + + + +
+ + +
+

+BackupPolicy + +

+
+ +

+BackupPolicy is the Schema for the backuppolicies API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupPolicy` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupPolicySpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupRepo where the backup data will be stored. +If not set, data will be stored in the default backup repository. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the directory inside the backup repository to store the backup. +This path is relative to the path of the backup repository. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the backup as failed. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ + +

+Specifies the target information to back up, such as the target pod, the +cluster connection credential. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +optional +

+ +
+ +`backupMethods`
+ + +[]BackupMethod + + + +
+ + +

+Defines the backup methods. +

+ +
+ +`useKopia`
+ +bool + + +
+ +(Optional) + +

+Specifies whether backup data should be stored in a Kopia repository. +

+ +

+Data within the Kopia repository is both compressed and encrypted. Furthermore, +data deduplication is implemented across various backups of the same cluster. +This approach significantly reduces the actual storage usage, particularly +for clusters with a low update frequency. +

+ +

+NOTE: This feature should NOT be enabled when using KubeBlocks Community Edition, otherwise the backup will not be processed. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Specifies the parameters for encrypting backup data. +Encryption will be disabled if the field is not set. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Specifies the backup retention policy. This has a precedence over `backup.spec.retentionPeriod`. +

+ +
+ +
+ +`status`
+ + +BackupPolicyStatus + + + +
+ + +
+

+BackupRepo + +

+
+ +

+BackupRepo is a repository for storing backup data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupRepo` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupRepoSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`storageProviderRef`
+ +string + + +
+ + +

+Specifies the name of the `StorageProvider` used by this backup repository. +

+ +
+ +`accessMethod`
+ + +AccessMethod + + + +
+ +(Optional) + +

+Specifies the access method of the backup repository. +

+ +
+ +`volumeCapacity`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the capacity of the PVC created by this backup repository. +

+ +
+ +`pvReclaimPolicy`
+ + +Kubernetes core/v1.PersistentVolumeReclaimPolicy + + + +
+ + +

+Specifies reclaim policy of the PV created by this backup repository. +

+ +
+ +`config`
+ +map[string]string + + +
+ +(Optional) + +

+Stores the non-secret configuration parameters for the `StorageProvider`. +

+ +
+ +`credential`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+References to the secret that holds the credentials for the `StorageProvider`. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the prefix of the path for storing backup data. +

+ +
+ +
+ +`status`
+ + +BackupRepoStatus + + + +
+ + +
+

+BackupSchedule + +

+
+ +

+BackupSchedule is the Schema for the backupschedules API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`BackupSchedule` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupScheduleSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backupPolicy to be applied for the `schedules`. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Defines the deadline in minutes for starting the backup workload if it +misses its scheduled time for any reason. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ + +

+Defines the list of backup schedules. +

+ +
+ +
+ +`status`
+ + +BackupScheduleStatus + + + +
+ + +
+

+Restore + +

+
+ +

+Restore is the Schema for the restores API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Restore` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +RestoreSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`backup`
+ + +BackupRef + + + +
+ + +

+Specifies the backup to be restored. The restore behavior is based on the backup type: +

+
    +
  1. +Full: will be restored the full backup directly. +
  2. +
  3. +Incremental: will be restored sequentially from the most recent full backup of this incremental backup. +
  4. +
  5. +Differential: will be restored sequentially from the parent backup of the differential backup. +
  6. +
  7. +Continuous: will find the most recent full backup at this time point and the continuous backups after it to restore. +
  8. +
+ +
+ +`restoreTime`
+ +string + + +
+ +(Optional) + +

+Specifies the point in time for restoring. +

+ +
+ +`resources`
+ + +RestoreKubeResources + + + +
+ +(Optional) + +

+Restores the specified resources of Kubernetes. +

+ +
+ +`prepareDataConfig`
+ + +PrepareDataConfig + + + +
+ +(Optional) + +

+Configuration for the action of “prepareData” phase, including the persistent volume claims +that need to be restored and scheduling strategy of temporary recovery pod. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the service account name needed for recovery pod. +

+ +
+ +`readyConfig`
+ + +ReadyConfig + + + +
+ +(Optional) + +

+Configuration for the action of “postReady” phase. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to set in the container for restore. These will be +merged with the env of Backup and ActionSet. +

+ +

+The priority of merging is as follows: `Restore env > Backup env > ActionSet env`. +

+ +
+ +`containerResources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the required resources of restore job’s container. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the restore failed. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+ +
+ +`status`
+ + +RestoreStatus + + + +
+ + +
+

+StorageProvider + +

+
+ +

+StorageProvider comprises specifications that provide guidance on accessing remote storage. +Currently the supported access methods are via a dedicated CSI driver or the `datasafed` tool. +In case of CSI driver, the specification expounds on provisioning PVCs for that driver. +As for the `datasafed` tool, the specification provides insights on generating the necessary +configuration file. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`dataprotection.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`StorageProvider` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +StorageProviderSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`csiDriverName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the CSI driver used to access remote storage. +This field can be empty, it indicates that the storage is not accessible via CSI. +

+ +
+ +`csiDriverSecretTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that used to render and generate `k8s.io/api/core/v1.Secret` +resources for a specific CSI driver. +For example, `accessKey` and `secretKey` needed by CSI-S3 are stored in this +`Secret` resource. +

+ +
+ +`storageClassTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template utilized to render and generate `kubernetes.storage.k8s.io.v1.StorageClass` +resources. The `StorageClass’ created by this template is aimed at using the CSI driver. +

+ +
+ +`persistentVolumeClaimTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that renders and generates `k8s.io/api/core/v1.PersistentVolumeClaim` +resources. This PVC can reference the `StorageClass` created from `storageClassTemplate`, +allowing Pods to access remote storage by mounting the PVC. +

+ +
+ +`datasafedConfigTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template used to render and generate `k8s.io/api/core/v1.Secret`. +This `Secret` involves the configuration details required by the `datasafed` tool +to access remote storage. For example, the `Secret` should contain `endpoint`, +`bucket`, ‘region’, ‘accessKey’, ‘secretKey’, or something else for S3 storage. +This field can be empty, it means this kind of storage is not accessible via +the `datasafed` tool. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Describes the parameters required for storage. +The parameters defined here can be referenced in the above templates, +and `kbcli` uses this definition for dynamic command-line parameter parsing. +

+ +
+ +
+ +`status`
+ + +StorageProviderStatus + + + +
+ + +
+

+AccessMethod +(`string` alias) +

+ +

+ +(Appears on:BackupRepoSpec) + +

+
+ +

+AccessMethod represents an enumeration type that outlines +how the `BackupRepo` can be accessed. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Mount" +

+
+ +

+AccessMethodMount suggests that the storage is mounted locally +which allows for remote files to be accessed akin to local ones. +

+ +
+ +

+"Tool" +

+
+ +

+AccessMethodTool indicates the utilization of a command-line +tool for accessing the storage. +

+ +
+

+ActionErrorMode +(`string` alias) +

+ +

+ +(Appears on:ExecActionSpec, JobActionSpec) + +

+
+ +

+ActionErrorMode defines how to handle an error from an action. +Currently, only the Fail mode is supported, but the Continue mode will be supported in the future. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Continue" +

+
+ +

+ActionErrorModeContinue signifies that an error from an action is acceptable and can be ignored. +

+ +
+ +

+"Fail" +

+
+ +

+ActionErrorModeFail signifies that an error from an action is problematic and should be treated as a failure. +

+ +
+

+ActionPhase +(`string` alias) +

+ +

+ +(Appears on:ActionStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +

+ActionPhaseCompleted means the action has run successfully without errors. +

+ +
+ +

+"Failed" +

+
+ +

+ActionPhaseFailed means the action ran but encountered an error that +

+ +
+ +

+"New" +

+
+ +

+ActionPhaseNew means the action has been created but not yet processed by +the BackupController. +

+ +
+ +

+"Running" +

+
+ +

+ActionPhaseRunning means the action is currently executing. +

+ +
+

+ActionSetParametersSchema + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the schema for parameters using the OpenAPI v3. +The supported property types include: +- string +- number +- integer +- array: Note that only items of string type are supported. +

+ +
+

+ActionSetSpec + +

+ +

+ +(Appears on:ActionSet) + +

+
+ +

+ActionSetSpec defines the desired state of ActionSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupType`
+ + +BackupType + + + +
+ + +

+Specifies the backup type. Supported values include: +

+
    +
  • +`Full` for a full backup. +
  • +
  • +`Incremental` back up data that have changed since the last backup (either full or incremental). +
  • +
  • +`Differential` back up data that has changed since the last full backup. +
  • +
  • +`Continuous` back up transaction logs continuously, such as MySQL binlog, PostgreSQL WAL, etc. +
  • +
  • +`Selective` back up data more precisely, use custom parameters, such as specific databases or tables. +
  • +
+ +

+Continuous backup is essential for implementing Point-in-Time Recovery (PITR). +

+ +
+ +`parametersSchema`
+ + +ActionSetParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema of parameters in backups and restores before their usage. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`envFrom`
+ + +[]Kubernetes core/v1.EnvFromSource + + + +
+ +(Optional) + +

+Specifies a list of sources to populate environment variables in the container. +The keys within a source must be a C_IDENTIFIER. Any invalid keys will be +reported as an event when the container starts. If a key exists in multiple +sources, the value from the last source will take precedence. Any values +defined by an Env with a duplicate key will take precedence. +

+ +

+This field cannot be updated. +

+ +
+ +`backup`
+ + +BackupActionSpec + + + +
+ +(Optional) + +

+Specifies the backup action. +

+ +
+ +`restore`
+ + +RestoreActionSpec + + + +
+ +(Optional) + +

+Specifies the restore action. +

+ +
+

+ActionSetStatus + +

+ +

+ +(Appears on:ActionSet) + +

+
+ +

+ActionSetStatus defines the observed state of ActionSet +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Indicates the phase of the ActionSet. This can be either ‘Available’ or ‘Unavailable’. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation detailing the reason for the current phase of the ActionSet. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the generation number that has been observed by the controller. +

+ +
+

+ActionSpec + +

+ +

+ +(Appears on:BackupActionSpec, RestoreActionSpec) + +

+
+ +

+ActionSpec defines an action that should be executed. Only one of the fields may be set. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`exec`
+ + +ExecActionSpec + + + +
+ +(Optional) + +

+Specifies that the action should be executed using the pod’s exec API within a container. +

+ +
+ +`job`
+ + +JobActionSpec + + + +
+ +(Optional) + +

+Specifies that the action should be executed by a Kubernetes Job. +

+ +
+

+ActionStatus + +

+ +

+ +(Appears on:BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the action. +

+ +
+ +`targetPodName`
+ +string + + +
+ + +

+Records the target pod name which has been backed up. +

+ +
+ +`phase`
+ + +ActionPhase + + + +
+ +(Optional) + +

+The current phase of the action. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time an action was started. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time an action was completed. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+An error that caused the action to fail. +

+ +
+ +`actionType`
+ + +ActionType + + + +
+ +(Optional) + +

+The type of the action. +

+ +
+ +`availableReplicas`
+ +int32 + + +
+ +(Optional) + +

+Available replicas for statefulSet action. +

+ +
+ +`objectRef`
+ + +Kubernetes core/v1.ObjectReference + + + +
+ +(Optional) + +

+The object reference for the action. +

+ +
+ +`totalSize`
+ +string + + +
+ +(Optional) + +

+The total size of backed up data size. +A string with capacity units in the format of “1Gi”, “1Mi”, “1Ki”. +If no capacity unit is specified, it is assumed to be in bytes. +

+ +
+ +`timeRange`
+ + +BackupTimeRange + + + +
+ +(Optional) + +

+Records the time range of backed up data, for PITR, this is the time +range of recoverable data. +

+ +
+ +`volumeSnapshots`
+ + +[]VolumeSnapshotStatus + + + +
+ +(Optional) + +

+Records the volume snapshot status for the action. +

+ +
+

+ActionType +(`string` alias) +

+ +

+ +(Appears on:ActionStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Job" +

+
+ +
+ +

+"" +

+
+ +
+ +

+"StatefulSet" +

+
+ +
+

+BackupActionSpec + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupData`
+ + +BackupDataActionSpec + + + +
+ + +

+Represents the action to be performed for backing up data. +

+ +
+ +`preBackup`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Represents a set of actions that should be executed before the backup process begins. +

+ +
+ +`postBackup`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Represents a set of actions that should be executed after the backup process has completed. +

+ +
+ +`preDelete`
+ + +BaseJobActionSpec + + + +
+ +(Optional) + +

+Represents a custom deletion action that can be executed before the built-in deletion action. +Note: The preDelete action job will ignore the env/envFrom. +

+ +
+ +`withParameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters used by the backup action +

+ +
+

+BackupDataActionSpec + +

+ +

+ +(Appears on:BackupActionSpec) + +

+
+ +

+BackupDataActionSpec defines how to back up data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`JobActionSpec`
+ + +JobActionSpec + + + +
+ + +

+ +(Members of `JobActionSpec` are embedded into this type.) + +

+ +
+ +`syncProgress`
+ + +SyncProgress + + + +
+ +(Optional) + +

+Determines if the backup progress should be synchronized and the interval +for synchronization in seconds. +

+ +
+

+BackupDeletionPolicy +(`string` alias) +

+ +

+ +(Appears on:BackupSpec) + +

+
+ +

+BackupDeletionPolicy describes the policy for end-of-life maintenance of backup content. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Delete" +

+
+ +
+ +

+"Retain" +

+
+ +
+

+BackupMethod + +

+ +

+ +(Appears on:BackupPolicySpec, BackupStatus) + +

+
+ +

+BackupMethod defines the backup method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of backup method. +

+ +
+ +`compatibleMethod`
+ +string + + +
+ +(Optional) + +

+The name of the compatible full backup method, used by incremental backups. +

+ +
+ +`snapshotVolumes`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to take snapshots of persistent volumes. If true, +the ActionSetName is not required, the controller will use the CSI volume +snapshotter to create the snapshot. +

+ +
+ +`actionSetName`
+ +string + + +
+ +(Optional) + +

+Refers to the ActionSet object that defines the backup actions. +For volume snapshot backup, the actionSet is not required, the controller +will use the CSI volume snapshotter to create the snapshot. +

+ +
+ +`targetVolumes`
+ + +TargetVolumeInfo + + + +
+ +(Optional) + +

+Specifies which volumes from the target should be mounted in the backup workload. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies the environment variables for the backup workload. +

+ +
+ +`runtimeSettings`
+ + +RuntimeSettings + + + +
+ +(Optional) + +

+Specifies runtime settings for the backup workload container. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ +(Optional) + +

+Specifies the target information to back up, it will override the target in backup policy. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +

+ +
+

+BackupMethodTPL + +

+ +

+ +(Appears on:BackupPolicyTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+The name of backup method. +

+ +
+ +`compatibleMethod`
+ +string + + +
+ +(Optional) + +

+The name of the compatible full backup method, used by incremental backups. +

+ +
+ +`snapshotVolumes`
+ +bool + + +
+ +(Optional) + +

+Specifies whether to take snapshots of persistent volumes. If true, +the ActionSetName is not required, the controller will use the CSI volume +snapshotter to create the snapshot. +

+ +
+ +`actionSetName`
+ +string + + +
+ +(Optional) + +

+Refers to the ActionSet object that defines the backup actions. +For volume snapshot backup, the actionSet is not required, the controller +will use the CSI volume snapshotter to create the snapshot. +

+ +
+ +`targetVolumes`
+ + +TargetVolumeInfo + + + +
+ +(Optional) + +

+Specifies which volumes from the target should be mounted in the backup workload. +

+ +
+ +`env`
+ + +[]EnvVar + + + +
+ +(Optional) + +

+Specifies the environment variables for the backup workload. +

+ +
+ +`runtimeSettings`
+ + +RuntimeSettings + + + +
+ +(Optional) + +

+Specifies runtime settings for the backup workload container. +

+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+If set, specifies the method for selecting the replica to be backed up using the criteria defined here. +If this field is not set, the selection method specified in `backupPolicy.target` is used. +

+ +

+This field provides a way to override the global `backupPolicy.target` setting for specific BackupMethod. +

+ +
+

+BackupPhase +(`string` alias) +

+ +

+ +(Appears on:BackupStatus) + +

+
+ +

+BackupPhase describes the lifecycle phase of a Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +

+BackupPhaseCompleted means the backup has run successfully without errors. +

+ +
+ +

+"Deleting" +

+
+ +

+BackupPhaseDeleting means the backup and all its associated data are being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+BackupPhaseFailed means the backup ran but encountered an error that +prevented it from completing successfully. +

+ +
+ +

+"New" +

+
+ +

+BackupPhaseNew means the backup has been created but not yet processed by +the BackupController. +

+ +
+ +

+"Running" +

+
+ +

+BackupPhaseRunning means the backup is currently executing. +

+ +
+

+BackupPolicyPhase +(`string` alias) +

+
+ +

+BackupPolicyPhase defines phases for BackupPolicy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Failed" +

+
+ +
+

+BackupPolicyRetentionPolicy +(`string` alias) +

+ +

+ +(Appears on:BackupPolicySpec, BackupPolicyTemplateSpec) + +

+
+ +

+BackupPolicyRetentionPolicy defines the backup retention policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"" +

+
+ +

+BackupPolicyRetentionPolicyNone indicates that no backup retention policy is set. +

+ +
+ +

+"retainLatestBackup" +

+
+ +

+BackupPolicyRetentionPolicyRetainLatestBackup indicates that the latest backup is retained. +

+ +
+

+BackupPolicySpec + +

+ +

+ +(Appears on:BackupPolicy) + +

+
+ +

+BackupPolicySpec defines the desired state of BackupPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupRepo where the backup data will be stored. +If not set, data will be stored in the default backup repository. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the directory inside the backup repository to store the backup. +This path is relative to the path of the backup repository. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the backup as failed. +

+ +
+ +`target`
+ + +BackupTarget + + + +
+ + +

+Specifies the target information to back up, such as the target pod, the +cluster connection credential. +

+ +
+ +`targets`
+ + +[]BackupTarget + + + +
+ + +

+Specifies multiple target information for backup operations. This includes details +such as the target pod and cluster connection credentials. All specified targets +will be backed up collectively. +optional +

+ +
+ +`backupMethods`
+ + +[]BackupMethod + + + +
+ + +

+Defines the backup methods. +

+ +
+ +`useKopia`
+ +bool + + +
+ +(Optional) + +

+Specifies whether backup data should be stored in a Kopia repository. +

+ +

+Data within the Kopia repository is both compressed and encrypted. Furthermore, +data deduplication is implemented across various backups of the same cluster. +This approach significantly reduces the actual storage usage, particularly +for clusters with a low update frequency. +

+ +

+NOTE: This feature should NOT be enabled when using KubeBlocks Community Edition, otherwise the backup will not be processed. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Specifies the parameters for encrypting backup data. +Encryption will be disabled if the field is not set. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Specifies the backup retention policy. This has a precedence over `backup.spec.retentionPeriod`. +

+ +
+

+BackupPolicyStatus + +

+ +

+ +(Appears on:BackupPolicy) + +

+
+ +

+BackupPolicyStatus defines the observed state of BackupPolicy +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Phase - in list of [Available,Unavailable] +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+A human-readable message indicating details about why the BackupPolicy +is in this phase. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+ObservedGeneration is the most recent generation observed for this BackupPolicy. +It refers to the BackupPolicy’s generation, which is updated on mutation by the API Server. +

+ +
+

+BackupPolicyTemplate + +

+
+ +

+BackupPolicyTemplate should be provided by addon developers. +It is responsible for generating BackupPolicies for the addon that requires backup operations, +also determining the suitable backup methods and strategies. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+The metadata for the BackupPolicyTemplate object, including name, namespace, labels, and annotations. +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +BackupPolicyTemplateSpec + + + +
+ + +

+Defines the desired state of the BackupPolicyTemplate. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the BackupPolicyTemplate provides, and it is optional. +Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store +
  • +
+ +
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this BackupPolicyTemplate. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+Defines the selection criteria of instance to be backed up, and the connection credential to be used +during the backup process. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ +(Optional) + +

+Defines the execution plans for backup tasks, specifying when and how backups should occur, +and the retention period of backup files. +

+ +
+ +`backupMethods`
+ + +[]BackupMethodTPL + + + +
+ + +

+Defines an array of BackupMethods to be used. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum number of retry attempts for a backup before it is considered a failure. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Defines the backup retention policy to be used. +

+ +
+ +
+ +`status`
+ + +BackupPolicyTemplateStatus + + + +
+ + +

+Populated by the system, it represents the current information about the BackupPolicyTemplate. +

+ +
+

+BackupPolicyTemplateSpec + +

+ +

+ +(Appears on:BackupPolicyTemplate) + +

+
+ +

+BackupPolicyTemplateSpec contains the settings in a BackupPolicyTemplate. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceKind`
+ +string + + +
+ +(Optional) + +

+Defines the type of well-known service protocol that the BackupPolicyTemplate provides, and it is optional. +Some examples of well-known service protocols include: +

+
    +
  • +“MySQL”: Indicates that the Component provides a MySQL database service. +
  • +
  • +“PostgreSQL”: Indicates that the Component offers a PostgreSQL database service. +
  • +
  • +“Redis”: Signifies that the Component functions as a Redis key-value store. +
  • +
  • +“ETCD”: Denotes that the Component serves as an ETCD distributed key-value store +
  • +
+ +
+ +`compDefs`
+ +[]string + + +
+ + +

+CompDefs specifies names for the component definitions associated with this BackupPolicyTemplate. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`target`
+ + +TargetInstance + + + +
+ +(Optional) + +

+Defines the selection criteria of instance to be backed up, and the connection credential to be used +during the backup process. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ +(Optional) + +

+Defines the execution plans for backup tasks, specifying when and how backups should occur, +and the retention period of backup files. +

+ +
+ +`backupMethods`
+ + +[]BackupMethodTPL + + + +
+ + +

+Defines an array of BackupMethods to be used. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum number of retry attempts for a backup before it is considered a failure. +

+ +
+ +`retentionPolicy`
+ + +BackupPolicyRetentionPolicy + + + +
+ +(Optional) + +

+Defines the backup retention policy to be used. +

+ +
+

+BackupPolicyTemplateStatus + +

+ +

+ +(Appears on:BackupPolicyTemplate) + +

+
+ +

+BackupPolicyTemplateStatus defines the observed state of BackupPolicyTemplate. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this BackupPolicyTemplate. +

+ +
+ +`phase`
+ + +Phase + + + +
+ + +

+Specifies the current phase of the BackupPolicyTemplate. Valid values are `empty`, `Available`, `Unavailable`. +When `Available`, the BackupPolicyTemplate is ready and can be referenced by related objects. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+BackupRef + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+ +

+BackupRef describes the backup info. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the backup name. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+Specifies the backup namespace. +

+ +
+ +`sourceTargetName`
+ +string + + +
+ + +

+Specifies the source target for restoration, identified by its name. +

+ +
+

+BackupRepoPhase +(`string` alias) +

+ +

+ +(Appears on:BackupRepoStatus) + +

+
+ +

+BackupRepoPhase denotes different stages for the `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Deleting" +

+
+ +

+BackupRepoDeleting indicates the backup repository is being deleted. +

+ +
+ +

+"Failed" +

+
+ +

+BackupRepoFailed indicates the pre-check has been failed. +

+ +
+ +

+"PreChecking" +

+
+ +

+BackupRepoPreChecking indicates the backup repository is being pre-checked. +

+ +
+ +

+"Ready" +

+
+ +

+BackupRepoReady indicates the backup repository is ready for use. +

+ +
+

+BackupRepoSpec + +

+ +

+ +(Appears on:BackupRepo) + +

+
+ +

+BackupRepoSpec defines the desired state of `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storageProviderRef`
+ +string + + +
+ + +

+Specifies the name of the `StorageProvider` used by this backup repository. +

+ +
+ +`accessMethod`
+ + +AccessMethod + + + +
+ +(Optional) + +

+Specifies the access method of the backup repository. +

+ +
+ +`volumeCapacity`
+ + +Kubernetes resource.Quantity + + + +
+ +(Optional) + +

+Specifies the capacity of the PVC created by this backup repository. +

+ +
+ +`pvReclaimPolicy`
+ + +Kubernetes core/v1.PersistentVolumeReclaimPolicy + + + +
+ + +

+Specifies reclaim policy of the PV created by this backup repository. +

+ +
+ +`config`
+ +map[string]string + + +
+ +(Optional) + +

+Stores the non-secret configuration parameters for the `StorageProvider`. +

+ +
+ +`credential`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+References to the secret that holds the credentials for the `StorageProvider`. +

+ +
+ +`pathPrefix`
+ +string + + +
+ +(Optional) + +

+Specifies the prefix of the path for storing backup data. +

+ +
+

+BackupRepoStatus + +

+ +

+ +(Appears on:BackupRepo) + +

+
+ +

+BackupRepoStatus defines the observed state of `BackupRepo`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +BackupRepoPhase + + + +
+ +(Optional) + +

+Represents the current phase of reconciliation for the backup repository. +Permissible values are PreChecking, Failed, Ready, Deleting. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides a detailed description of the current state of the backup repository. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation of the resource that the controller has observed. +

+ +
+ +`generatedCSIDriverSecret`
+ + +Kubernetes core/v1.SecretReference + + + +
+ +(Optional) + +

+Refers to the generated secret for the `StorageProvider`. +

+ +
+ +`generatedStorageClassName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the generated storage class. +

+ +
+ +`backupPVCName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the PVC that stores backup data. +

+ +
+ +`toolConfigSecretName`
+ +string + + +
+ +(Optional) + +

+Represents the name of the secret that contains the configuration for the tool. +

+ +
+ +`isDefault`
+ +bool + + +
+ +(Optional) + +

+Indicates if this backup repository is the default one. +

+ +
+

+BackupSchedulePhase +(`string` alias) +

+ +

+ +(Appears on:BackupScheduleStatus) + +

+
+ +

+BackupSchedulePhase defines the phase of BackupSchedule +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+BackupSchedulePhaseAvailable indicates the backup schedule is available. +

+ +
+ +

+"Failed" +

+
+ +

+BackupSchedulePhaseFailed indicates the backup schedule has failed. +

+ +
+

+BackupScheduleSpec + +

+ +

+ +(Appears on:BackupSchedule) + +

+
+ +

+BackupScheduleSpec defines the desired state of BackupSchedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backupPolicy to be applied for the `schedules`. +

+ +
+ +`startingDeadlineMinutes`
+ +int64 + + +
+ +(Optional) + +

+Defines the deadline in minutes for starting the backup workload if it +misses its scheduled time for any reason. +

+ +
+ +`schedules`
+ + +[]SchedulePolicy + + + +
+ + +

+Defines the list of backup schedules. +

+ +
+

+BackupScheduleStatus + +

+ +

+ +(Appears on:BackupSchedule) + +

+
+ +

+BackupScheduleStatus defines the observed state of BackupSchedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +BackupSchedulePhase + + + +
+ +(Optional) + +

+Describes the phase of the BackupSchedule. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed for this BackupSchedule. +It refers to the BackupSchedule’s generation, which is updated on mutation +by the API Server. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Represents an error that caused the backup to fail. +

+ +
+ +`schedules`
+ + +map[string]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ScheduleStatus + + + +
+ +(Optional) + +

+Describes the status of each schedule. +

+ +
+

+BackupSpec + +

+ +

+ +(Appears on:Backup) + +

+
+ +

+BackupSpec defines the desired state of Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupPolicyName`
+ +string + + +
+ + +

+Specifies the backup policy to be applied for this backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in the backup policy. +

+ +
+ +`deletionPolicy`
+ + +BackupDeletionPolicy + + + +
+ + +

+Determines whether the backup contents stored in the backup repository +should be deleted when the backup custom resource(CR) is deleted. +Supported values are `Retain` and `Delete`. +

+
    +
  • +`Retain` means that the backup content and its physical snapshot on backup repository are kept. +
  • +
  • +`Delete` means that the backup content and its physical snapshot on backup repository are deleted. +
  • +
+ +

+the backup CR but retaining the backup contents in backup repository. +The current implementation only prevent accidental deletion of backup data. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines a duration up to which the backup should be kept. +Controller will remove all backups that are older than the RetentionPeriod. +If not set, the backup will be kept forever. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Determines the parent backup name for incremental or differential backup. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+BackupStatus + +

+ +

+ +(Appears on:Backup) + +

+
+ +

+BackupStatus defines the observed state of Backup. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`formatVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the backup format version, which includes major, minor, and patch versions. +

+ +
+ +`phase`
+ + +BackupPhase + + + +
+ +(Optional) + +

+Indicates the current state of the backup operation. +

+ +
+ +`expiration`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Indicates when this backup becomes eligible for garbage collection. +A ‘null’ value implies that the backup will not be cleaned up unless manually deleted. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the backup operation was started. +The server’s time is used for this timestamp. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the backup operation was completed. +This timestamp is recorded even if the backup operation fails. +The server’s time is used for this timestamp. +

+ +
+ +`duration`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Records the duration of the backup operation. +When converted to a string, the format is “1h2m0.5s”. +

+ +
+ +`totalSize`
+ +string + + +
+ +(Optional) + +

+Records the total size of the data backed up. +The size is represented as a string with capacity units in the format of “1Gi”, “1Mi”, “1Ki”. +If no capacity unit is specified, it is assumed to be in bytes. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Any error that caused the backup operation to fail. +

+ +
+ +`backupRepoName`
+ +string + + +
+ +(Optional) + +

+The name of the backup repository. +

+ +
+ +`path`
+ +string + + +
+ +(Optional) + +

+The directory within the backup repository where the backup data is stored. +This is an absolute path within the backup repository. +

+ +
+ +`kopiaRepoPath`
+ +string + + +
+ +(Optional) + +

+Records the path of the Kopia repository. +

+ +
+ +`persistentVolumeClaimName`
+ +string + + +
+ +(Optional) + +

+Records the name of the persistent volume claim used to store the backup data. +

+ +
+ +`timeRange`
+ + +BackupTimeRange + + + +
+ +(Optional) + +

+Records the time range of the data backed up. For Point-in-Time Recovery (PITR), +this is the time range of recoverable data. +

+ +
+ +`target`
+ + +BackupStatusTarget + + + +
+ +(Optional) + +

+Records the target information for this backup. +

+ +
+ +`targets`
+ + +[]BackupStatusTarget + + + +
+ +(Optional) + +

+Records the targets information for this backup. +

+ +
+ +`backupMethod`
+ + +BackupMethod + + + +
+ +(Optional) + +

+Records the backup method information for this backup. +Refer to BackupMethod for more details. +

+ +
+ +`encryptionConfig`
+ + +EncryptionConfig + + + +
+ +(Optional) + +

+Records the encryption config for this backup. +

+ +
+ +`actions`
+ + +[]ActionStatus + + + +
+ +(Optional) + +

+Records the actions status for this backup. +

+ +
+ +`volumeSnapshots`
+ + +[]VolumeSnapshotStatus + + + +
+ +(Optional) + +

+Records the volume snapshot status for the action. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+Records the parent backup name for incremental or differential backup. +When the parent backup is deleted, the backup will also be deleted. +

+ +
+ +`baseBackupName`
+ +string + + +
+ +(Optional) + +

+Records the base full backup name for incremental backup or differential backup. +When the base backup is deleted, the backup will also be deleted. +

+ +
+ +`extras`
+ +[]string + + +
+ +(Optional) + +

+Records any additional information for the backup. +

+ +
+

+BackupStatusTarget + +

+ +

+ +(Appears on:BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`BackupTarget`
+ + +BackupTarget + + + +
+ + +

+ +(Members of `BackupTarget` are embedded into this type.) + +

+ +
+ +`selectedTargetPods`
+ +[]string + + +
+ + +

+Records the selected pods by the target info during backup. +

+ +
+

+BackupTarget + +

+ +

+ +(Appears on:BackupMethod, BackupPolicySpec, BackupStatusTarget) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies a mandatory and unique identifier for each target when using the “targets” field. +The backup data for the current target is stored in a uniquely named subdirectory. +

+ +
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Used to find the target pod. The volumes of the target pod will be backed up. +

+ +
+ +`connectionCredential`
+ + +ConnectionCredential + + + +
+ +(Optional) + +

+Specifies the connection credential to connect to the target database cluster. +

+ +
+ +`resources`
+ + +KubeResources + + + +
+ +(Optional) + +

+Specifies the kubernetes resources to back up. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ + +

+Specifies the service account to run the backup workload. +

+ +
+ +`containerPort`
+ + +ContainerPort + + + +
+ +(Optional) + +

+Specifies the container port in the target pod. +If not specified, the first container and its first port will be used. +

+ +
+

+BackupTimeRange + +

+ +

+ +(Appears on:ActionStatus, BackupStatus) + +

+
+ +

+BackupTimeRange records the time range of backed up data, for PITR, this is the +time range of recoverable data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`timeZone`
+ +string + + +
+ +(Optional) + +

+time zone, supports only zone offset, with a value range of “-12:59 ~ +13:00”. +

+ +
+ +`start`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the start time of the backup, in Coordinated Universal Time (UTC). +

+ +
+ +`end`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the end time of the backup, in Coordinated Universal Time (UTC). +

+ +
+

+BackupType +(`string` alias) +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+ +

+BackupType the backup type. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Continuous" +

+
+ +
+ +

+"Differential" +

+
+ +
+ +

+"Full" +

+
+ +
+ +

+"Incremental" +

+
+ +
+ +

+"Selective" +

+
+ +
+

+BaseJobActionSpec + +

+ +

+ +(Appears on:BackupActionSpec, JobActionSpec) + +

+
+ +

+BaseJobActionSpec is an action that creates a Kubernetes Job to execute a command. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Specifies the image of the backup container. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Defines the commands to back up the volume data. +

+ +
+

+ConnectionCredential + +

+ +

+ +(Appears on:BackupTarget, ReadyConfig) + +

+
+ +

+ConnectionCredential specifies the connection credential to connect to the +target database cluster. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`secretName`
+ +string + + +
+ + +

+Refers to the Secret object that contains the connection credential. +

+ +
+ +`usernameKey`
+ +string + + +
+ + +

+Specifies the map key of the user in the connection credential secret. +

+ +
+ +`passwordKey`
+ +string + + +
+ + +

+Specifies the map key of the password in the connection credential secret. +This password will be saved in the backup annotation for full backup. +You can use the environment variable DP_ENCRYPTION_KEY to specify encryption key. +

+ +
+ +`hostKey`
+ +string + + +
+ +(Optional) + +

+Specifies the map key of the host in the connection credential secret. +

+ +
+ +`portKey`
+ +string + + +
+ +(Optional) + +

+Specifies the map key of the port in the connection credential secret. +

+ +
+

+ContainerPort + +

+ +

+ +(Appears on:BackupTarget, TargetInstance) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`containerName`
+ +string + + +
+ + +

+Specifies the name of container with the port. +

+ +
+ +`portName`
+ +string + + +
+ + +

+Specifies the port name. +

+ +
+

+DataRestorePolicy +(`string` alias) +

+ +

+ +(Appears on:RequiredPolicyForAllPodSelection) + +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"OneToMany" +

+
+ +
+ +

+"OneToOne" +

+
+ +
+

+EncryptionConfig + +

+ +

+ +(Appears on:BackupPolicySpec, BackupStatus) + +

+
+ +

+EncryptionConfig defines the parameters for encrypting backup data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`algorithm`
+ +string + + +
+ + +

+Specifies the encryption algorithm. Currently supported algorithms are: +

+
    +
  • +AES-128-CFB +
  • +
  • +AES-192-CFB +
  • +
  • +AES-256-CFB +
  • +
+ +
+ +`passPhraseSecretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ + +

+Selects the key of a secret in the current namespace, the value of the secret +is used as the encryption key. +

+ +
+

+EnvVar + +

+ +

+ +(Appears on:BackupMethodTPL) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the environment variable key. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Specifies the environment variable value. +

+ +
+ +`valueFrom`
+ + +ValueFrom + + + +
+ +(Optional) + +

+Specifies the source used to determine the value of the environment variable. +Cannot be used if value is not empty. +

+ +
+

+ExecAction + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`target`
+ + +ExecActionTarget + + + +
+ +(Optional) + +

+Defines the pods that need to be executed for the exec action. +Execution will occur on all pods that meet the conditions. +

+ +
+

+ExecActionSpec + +

+ +

+ +(Appears on:ActionSpec) + +

+
+ +

+ExecActionSpec is an action that uses the pod exec API to execute a command in a container +in a pod. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`container`
+ +string + + +
+ +(Optional) + +

+Specifies the container within the pod where the command should be executed. +If not specified, the first container in the pod is used by default. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Defines the command and arguments to be executed. +

+ +
+ +`onError`
+ + +ActionErrorMode + + + +
+ +(Optional) + +

+Indicates how to behave if an error is encountered during the execution of this action. +

+ +
+ +`timeout`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Specifies the maximum duration to wait for the hook to complete before +considering the execution a failure. +

+ +
+

+ExecActionTarget + +

+ +

+ +(Appears on:ExecAction) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+Executes kubectl in all selected pods. +

+ +
+

+IncludeResource + +

+ +

+ +(Appears on:RestoreKubeResources) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`groupResource`
+ +string + + +
+ + +
+ +`labelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Selects the specified resource for recovery by label. +

+ +
+

+JobAction + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requiredPolicyForAllPodSelection`
+ + +RequiredPolicyForAllPodSelection + + + +
+ + +

+Specifies the restore policy, which is required when the pod selection strategy for the source target is ‘All’. +This field is ignored if the pod selection strategy is ‘Any’. +optional +

+ +
+ +`target`
+ + +JobActionTarget + + + +
+ + +

+Defines the pods that needs to be executed for the job action. +

+ +
+

+JobActionSpec + +

+ +

+ +(Appears on:ActionSpec, BackupDataActionSpec, RestoreActionSpec) + +

+
+ +

+JobActionSpec is an action that creates a Kubernetes Job to execute a command. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`BaseJobActionSpec`
+ + +BaseJobActionSpec + + + +
+ + +

+ +(Members of `BaseJobActionSpec` are embedded into this type.) + +

+ +
+ +`runOnTargetPodNode`
+ +bool + + +
+ +(Optional) + +

+Determines whether to run the job workload on the target pod node. +If the backup container needs to mount the target pod’s volumes, this field +should be set to true. Otherwise, the target pod’s volumes will be ignored. +

+ +
+ +`onError`
+ + +ActionErrorMode + + + +
+ +(Optional) + +

+Indicates how to behave if an error is encountered during the execution of this action. +

+ +
+

+JobActionTarget + +

+ +

+ +(Appears on:JobAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Selects one of the pods, identified by labels, to build the job spec. +This includes mounting required volumes and injecting built-in environment variables of the selected pod. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Defines which volumes of the selected pod need to be mounted on the restoring pod. +

+ +
+

+KubeResources + +

+ +

+ +(Appears on:BackupTarget) + +

+
+ +

+KubeResources defines the kubernetes resources to back up. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`selector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+A metav1.LabelSelector to filter the target kubernetes resources that need +to be backed up. If not set, will do not back up any kubernetes resources. +

+ +
+ +`included`
+ +[]string + + +
+ +(Optional) + +

+included is a slice of namespaced-scoped resource type names to include in +the kubernetes resources. +The default value is empty. +

+ +
+ +`excluded`
+ +[]string + + +
+ +(Optional) + +

+excluded is a slice of namespaced-scoped resource type names to exclude in +the kubernetes resources. +The default value is empty. +

+ +
+

+ParameterPair + +

+ +

+ +(Appears on:BackupSpec, RestoreSpec, SchedulePolicy) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Represents the name of the parameter. +

+ +
+ +`value`
+ +string + + +
+ + +

+Represents the parameter values. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:StorageProviderSpec) + +

+
+ +

+ParametersSchema describes the parameters needed for a certain storage. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the parameters in OpenAPI V3. +

+ +
+ +`credentialFields`
+ +[]string + + +
+ +(Optional) + +

+Defines which parameters are credential fields, which need to be handled specifically. +For instance, these should be stored in a `Secret` instead of a `ConfigMap`. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:ActionSetStatus, BackupPolicyStatus, BackupPolicyTemplateStatus) + +

+
+ +

+Phase defines the BackupPolicy and ActionSet CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+PodSelectionStrategy +(`string` alias) +

+ +

+ +(Appears on:PodSelector, TargetInstance) + +

+
+ +

+PodSelectionStrategy specifies the strategy to select when multiple pods are +selected for backup target +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +

+PodSelectionStrategyAll selects all pods that match the labelsSelector. +

+ +
+ +

+"Any" +

+
+ +

+PodSelectionStrategyAny selects any one pod that match the labelsSelector. +

+ +
+

+PodSelector + +

+ +

+ +(Appears on:BackupTarget, JobActionTarget) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`LabelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ + +

+ +(Members of `LabelSelector` are embedded into this type.) + +

+ +

+labelsSelector is the label selector to filter the target pods. +

+ +
+ +`fallbackLabelSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+fallbackLabelSelector is used to filter available pods when the labelSelector fails. +This only takes effect when the `strategy` field below is set to `Any`. +

+ +
+ +`strategy`
+ + +PodSelectionStrategy + + + +
+ + +

+Specifies the strategy to select the target pod when multiple pods are selected. +Valid values are: +

+
    +
  • +`Any`: select any one pod that match the labelsSelector. +
  • +
  • +`All`: select all pods that match the labelsSelector. The backup data for the current pod +will be stored in a subdirectory named after the pod. +
  • +
+ +
+ +`useParentSelectedPods`
+ +bool + + +
+ +(Optional) + +

+UseParentSelectedPods indicates whether to use the pods selected by the parent for backup. +If set to true, the backup will use the same pods selected by the parent. +And only takes effect when the ‘strategy’ is set to ‘Any’. +

+ +
+

+PrepareDataConfig + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`requiredPolicyForAllPodSelection`
+ + +RequiredPolicyForAllPodSelection + + + +
+ + +

+Specifies the restore policy, which is required when the pod selection strategy for the source target is ‘All’. +This field is ignored if the pod selection strategy is ‘Any’. +optional +

+ +
+ +`dataSourceRef`
+ + +VolumeConfig + + + +
+ +(Optional) + +

+Specifies the configuration when using `persistentVolumeClaim.spec.dataSourceRef` method for restoring. +Describes the source volume of the backup targetVolumes and the mount path in the restoring container. +

+ +
+ +`volumeClaims`
+ + +[]RestoreVolumeClaim + + + +
+ +(Optional) + +

+Defines the persistent Volume claims that need to be restored and mounted together into the restore job. +These persistent Volume claims will be created if they do not exist. +

+ +
+ +`volumeClaimsTemplate`
+ + +RestoreVolumeClaimsTemplate + + + +
+ +(Optional) + +

+Defines a template to build persistent Volume claims that need to be restored. +These claims will be created in an orderly manner based on the number of replicas or reused if they already exist. +

+ +
+ +`volumeClaimRestorePolicy`
+ + +VolumeClaimRestorePolicy + + + +
+ + +

+Defines restore policy for persistent volume claim. +Supported policies are as follows: +

+
    +
  • +`Parallel`: parallel recovery of persistent volume claim. +
  • +
  • +`Serial`: restore the persistent volume claim in sequence, and wait until the previous persistent volume claim is restored before restoring a new one. +
  • +
+ +
+ +`schedulingSpec`
+ + +SchedulingSpec + + + +
+ +(Optional) + +

+Specifies the scheduling spec for the restoring pod. +

+ +
+

+ReadinessProbe + +

+ +

+ +(Appears on:ReadyConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`initialDelaySeconds`
+ +int + + +
+ +(Optional) + +

+Specifies the number of seconds after the container has started before the probe is initiated. +

+ +
+ +`timeoutSeconds`
+ +int + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +The default value is 30 seconds, and the minimum value is 1. +

+ +
+ +`periodSeconds`
+ +int + + +
+ +(Optional) + +

+Specifies how often (in seconds) to perform the probe. +The default value is 5 seconds, and the minimum value is 1. +

+ +
+ +`exec`
+ + +ReadinessProbeExecAction + + + +
+ + +

+Specifies the action to take. +

+ +
+

+ReadinessProbeExecAction + +

+ +

+ +(Appears on:ReadinessProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`image`
+ +string + + +
+ + +

+Refers to the container image. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+Refers to the container command. +

+ +
+

+ReadyConfig + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`jobAction`
+ + +JobAction + + + +
+ +(Optional) + +

+Specifies the configuration for a job action. +

+ +
+ +`execAction`
+ + +ExecAction + + + +
+ +(Optional) + +

+Specifies the configuration for an exec action. +

+ +
+ +`connectionCredential`
+ + +ConnectionCredential + + + +
+ +(Optional) + +

+Defines the credential template used to create a connection credential. +

+ +
+ +`readinessProbe`
+ + +ReadinessProbe + + + +
+ +(Optional) + +

+Defines a periodic probe of the service readiness. +The controller will perform postReadyHooks of BackupScript.spec.restore +after the service readiness when readinessProbe is configured. +

+ +
+

+RequiredPolicyForAllPodSelection + +

+ +

+ +(Appears on:JobAction, PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`dataRestorePolicy`
+ + +DataRestorePolicy + + + +
+ + +

+Specifies the data restore policy. Options include: +- OneToMany: Enables restoration of all volumes from a single data copy of the original target instance. +The ‘sourceOfOneToMany’ field must be set when using this policy. +- OneToOne: Restricts data restoration such that each data piece can only be restored to a single target instance. +This is the default policy. When the number of target instances specified for restoration surpasses the count of original backup target instances. +

+ +
+ +`sourceOfOneToMany`
+ + +SourceOfOneToMany + + + +
+ + +

+Specifies the name of the source target pod. This field is mandatory when the DataRestorePolicy is configured to ‘OneToMany’. +

+ +
+

+RestoreActionSpec + +

+ +

+ +(Appears on:ActionSetSpec) + +

+
+ +

+RestoreActionSpec defines how to restore data. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`prepareData`
+ + +JobActionSpec + + + +
+ +(Optional) + +

+Specifies the action required to prepare data for restoration. +

+ +
+ +`postReady`
+ + +[]ActionSpec + + + +
+ +(Optional) + +

+Specifies the actions that should be executed after the data has been prepared and is ready for restoration. +

+ +
+ +`baseBackupRequired`
+ +bool + + +
+ +(Optional) + +

+Determines if a base backup is required during restoration. +

+ +
+ +`withParameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters used by the restore action +

+ +
+

+RestoreActionStatus +(`string` alias) +

+ +

+ +(Appears on:RestoreStatusAction) + +

+
+ +

+RestoreActionStatus the status of restore action. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Completed" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Processing" +

+
+ +
+

+RestoreKubeResources + +

+ +

+ +(Appears on:RestoreSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`included`
+ + +[]IncludeResource + + + +
+ +(Optional) + +

+Restores the specified resources. +

+ +
+

+RestorePhase +(`string` alias) +

+ +

+ +(Appears on:RestoreStatus) + +

+
+ +

+RestorePhase The current phase. Valid values are Running, Completed, Failed, AsDataSource. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"AsDataSource" +

+
+ +
+ +

+"Completed" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Running" +

+
+ +
+

+RestoreSpec + +

+ +

+ +(Appears on:Restore) + +

+
+ +

+RestoreSpec defines the desired state of Restore +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backup`
+ + +BackupRef + + + +
+ + +

+Specifies the backup to be restored. The restore behavior is based on the backup type: +

+
    +
  1. +Full: will be restored the full backup directly. +
  2. +
  3. +Incremental: will be restored sequentially from the most recent full backup of this incremental backup. +
  4. +
  5. +Differential: will be restored sequentially from the parent backup of the differential backup. +
  6. +
  7. +Continuous: will find the most recent full backup at this time point and the continuous backups after it to restore. +
  8. +
+ +
+ +`restoreTime`
+ +string + + +
+ +(Optional) + +

+Specifies the point in time for restoring. +

+ +
+ +`resources`
+ + +RestoreKubeResources + + + +
+ +(Optional) + +

+Restores the specified resources of Kubernetes. +

+ +
+ +`prepareDataConfig`
+ + +PrepareDataConfig + + + +
+ +(Optional) + +

+Configuration for the action of “prepareData” phase, including the persistent volume claims +that need to be restored and scheduling strategy of temporary recovery pod. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ +(Optional) + +

+Specifies the service account name needed for recovery pod. +

+ +
+ +`readyConfig`
+ + +ReadyConfig + + + +
+ +(Optional) + +

+Configuration for the action of “postReady” phase. +

+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+List of environment variables to set in the container for restore. These will be +merged with the env of Backup and ActionSet. +

+ +

+The priority of merging is as follows: `Restore env > Backup env > ActionSet env`. +

+ +
+ +`containerResources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the required resources of restore job’s container. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries before marking the restore failed. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+RestoreStage +(`string` alias) +

+
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"postReady" +

+
+ +
+ +

+"prepareData" +

+
+ +
+

+RestoreStatus + +

+ +

+ +(Appears on:Restore) + +

+
+ +

+RestoreStatus defines the observed state of Restore +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +RestorePhase + + + +
+ +(Optional) + +

+Represents the current phase of the restore. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the date/time when the restore started being processed. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the date/time when the restore finished being processed. +

+ +
+ +`duration`
+ + +Kubernetes meta/v1.Duration + + + +
+ +(Optional) + +

+Records the duration of the restore execution. +When converted to a string, the form is “1h2m0.5s”. +

+ +
+ +`actions`
+ + +RestoreStatusActions + + + +
+ +(Optional) + +

+Records all restore actions performed. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the current state of the restore API Resource, like warning. +

+ +
+

+RestoreStatusAction + +

+ +

+ +(Appears on:RestoreStatusActions) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Describes the name of the restore action based on the current backup. +

+ +
+ +`backupName`
+ +string + + +
+ + +

+Describes which backup’s restore action belongs to. +

+ +
+ +`objectKey`
+ +string + + +
+ + +

+Describes the execution object of the restore action. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable message indicating details about the object condition. +

+ +
+ +`status`
+ + +RestoreActionStatus + + + +
+ + +

+The status of this action. +

+ +
+ +`startTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+The start time of the restore job. +

+ +
+ +`endTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+The completion time of the restore job. +

+ +
+

+RestoreStatusActions + +

+ +

+ +(Appears on:RestoreStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`prepareData`
+ + +[]RestoreStatusAction + + + +
+ +(Optional) + +

+Records the actions for the prepareData phase. +

+ +
+ +`postReady`
+ + +[]RestoreStatusAction + + + +
+ +(Optional) + +

+Records the actions for the postReady phase. +

+ +
+

+RestoreVolumeClaim + +

+ +

+ +(Appears on:PrepareDataConfig, RestoreVolumeClaimsTemplate) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ + +

+Specifies the standard metadata for the object. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +

+Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`volumeClaimSpec`
+ + +Kubernetes core/v1.PersistentVolumeClaimSpec + + + +
+ + +

+Defines the desired characteristics of a persistent volume claim. +

+ +
+ +`VolumeConfig`
+ + +VolumeConfig + + + +
+ + +

+ +(Members of `VolumeConfig` are embedded into this type.) + +

+ +

+Describes the source volume of the backup target volumes and the mount path in the restoring container. +At least one must exist for volumeSource and mountPath. +

+ +
+

+RestoreVolumeClaimsTemplate + +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templates`
+ + +[]RestoreVolumeClaim + + + +
+ + +

+Contains a list of volume claims. +

+ +
+ +`replicas`
+ +int32 + + +
+ + +

+Specifies the replicas of persistent volume claim that need to be created and restored. +The format of the created claim name is `$(template-name)-$(index)`. +

+ +
+ +`startingIndex`
+ +int32 + + +
+ + +

+Specifies the starting index for the created persistent volume claim according to the template. +The minimum value is 0. +

+ +
+

+RetentionPeriod +(`string` alias) +

+ +

+ +(Appears on:BackupSpec, SchedulePolicy) + +

+
+ +

+RetentionPeriod represents a duration in the format “1y2mo3w4d5h6m”, where +y=year, mo=month, w=week, d=day, h=hour, m=minute. +

+
+

+RuntimeSettings + +

+ +

+ +(Appears on:BackupMethod, BackupMethodTPL) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`resources`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ +(Optional) + +

+Specifies the resource required by container. +More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +

+ +
+

+SchedulePhase +(`string` alias) +

+ +

+ +(Appears on:ScheduleStatus) + +

+
+ +

+SchedulePhase represents the phase of a schedule. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Running" +

+
+ +
+

+SchedulePolicy + +

+ +

+ +(Appears on:BackupPolicyTemplateSpec, BackupScheduleSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Specifies whether the backup schedule is enabled or not. +

+ +
+ +`name`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the schedule. Names cannot be duplicated. +If the name is empty, it will be considered the same as the value of the backupMethod below. +

+ +
+ +`backupMethod`
+ +string + + +
+ + +

+Specifies the backup method name that is defined in backupPolicy. +

+ +
+ +`cronExpression`
+ +string + + +
+ + +

+Specifies the cron expression for the schedule. The timezone is in UTC. +see https://en.wikipedia.org/wiki/Cron. +

+ +
+ +`retentionPeriod`
+ + +RetentionPeriod + + + +
+ +(Optional) + +

+Determines the duration for which the backup should be kept. +KubeBlocks will remove all backups that are older than the RetentionPeriod. +For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+ScheduleStatus + +

+ +

+ +(Appears on:BackupScheduleStatus) + +

+
+ +

+ScheduleStatus represents the status of each schedule. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +SchedulePhase + + + +
+ +(Optional) + +

+Describes the phase of the schedule. +

+ +
+ +`failureReason`
+ +string + + +
+ +(Optional) + +

+Represents an error that caused the backup to fail. +

+ +
+ +`lastScheduleTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the last time the backup was scheduled. +

+ +
+ +`lastSuccessfulTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the last time the backup was successfully completed. +

+ +
+

+SchedulingSpec + +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`tolerations`
+ + +[]Kubernetes core/v1.Toleration + + + +
+ +(Optional) + +

+Specifies the tolerations for the restoring pod. +

+ +
+ +`nodeSelector`
+ +map[string]string + + +
+ +(Optional) + +

+Defines a selector which must be true for the pod to fit on a node. +The selector must match a node’s labels for the pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`nodeName`
+ +string + + +
+ +(Optional) + +

+Specifies a request to schedule this pod onto a specific node. If it is non-empty, +the scheduler simply schedules this pod onto that node, assuming that it fits resource +requirements. +

+ +
+ +`affinity`
+ + +Kubernetes core/v1.Affinity + + + +
+ +(Optional) + +

+Contains a group of affinity scheduling rules. +Refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +`topologySpreadConstraints`
+ + +[]Kubernetes core/v1.TopologySpreadConstraint + + + +
+ +(Optional) + +

+Describes how a group of pods ought to spread across topology +domains. The scheduler will schedule pods in a way which abides by the constraints. +Refer to https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ +

+ +
+ +`schedulerName`
+ +string + + +
+ +(Optional) + +

+Specifies the scheduler to dispatch the pod. +If not specified, the pod will be dispatched by the default scheduler. +

+ +
+

+SourceOfOneToMany + +

+ +

+ +(Appears on:RequiredPolicyForAllPodSelection) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`targetPodName`
+ +string + + +
+ + +

+Specifies the name of the source target pod. +

+ +
+

+StorageProviderPhase +(`string` alias) +

+ +

+ +(Appears on:StorageProviderStatus) + +

+
+ +

+StorageProviderPhase defines phases of a `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"NotReady" +

+
+ +

+StorageProviderNotReady indicates that the `StorageProvider` is not ready, +usually because the specified CSI driver is not yet installed. +

+ +
+ +

+"Ready" +

+
+ +

+StorageProviderReady indicates that the `StorageProvider` is ready for use. +

+ +
+

+StorageProviderSpec + +

+ +

+ +(Appears on:StorageProvider) + +

+
+ +

+StorageProviderSpec defines the desired state of `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`csiDriverName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the CSI driver used to access remote storage. +This field can be empty, it indicates that the storage is not accessible via CSI. +

+ +
+ +`csiDriverSecretTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that used to render and generate `k8s.io/api/core/v1.Secret` +resources for a specific CSI driver. +For example, `accessKey` and `secretKey` needed by CSI-S3 are stored in this +`Secret` resource. +

+ +
+ +`storageClassTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template utilized to render and generate `kubernetes.storage.k8s.io.v1.StorageClass` +resources. The `StorageClass’ created by this template is aimed at using the CSI driver. +

+ +
+ +`persistentVolumeClaimTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template that renders and generates `k8s.io/api/core/v1.PersistentVolumeClaim` +resources. This PVC can reference the `StorageClass` created from `storageClassTemplate`, +allowing Pods to access remote storage by mounting the PVC. +

+ +
+ +`datasafedConfigTemplate`
+ +string + + +
+ +(Optional) + +

+A Go template used to render and generate `k8s.io/api/core/v1.Secret`. +This `Secret` involves the configuration details required by the `datasafed` tool +to access remote storage. For example, the `Secret` should contain `endpoint`, +`bucket`, ‘region’, ‘accessKey’, ‘secretKey’, or something else for S3 storage. +This field can be empty, it means this kind of storage is not accessible via +the `datasafed` tool. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Describes the parameters required for storage. +The parameters defined here can be referenced in the above templates, +and `kbcli` uses this definition for dynamic command-line parameter parsing. +

+ +
+

+StorageProviderStatus + +

+ +

+ +(Appears on:StorageProvider) + +

+
+ +

+StorageProviderStatus defines the observed state of `StorageProvider`. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ + +StorageProviderPhase + + + +
+ + +

+The phase of the `StorageProvider`. Valid phases are `NotReady` and `Ready`. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the current state of the `StorageProvider`. +

+ +
+

+SyncProgress + +

+ +

+ +(Appears on:BackupDataActionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`enabled`
+ +bool + + +
+ +(Optional) + +

+Determines if the backup progress should be synchronized. If set to true, +a sidecar container will be instantiated to synchronize the backup progress with the +Backup Custom Resource (CR) status. +

+ +
+ +`intervalSeconds`
+ +int32 + + +
+ +(Optional) + +

+Defines the interval in seconds for synchronizing the backup progress. +

+ +
+

+TargetInstance + +

+ +

+ +(Appears on:BackupMethodTPL, BackupPolicyTemplateSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ + +

+Specifies the role to select one or more replicas for backup. +

+
    +
  • +If no replica with the specified role exists, the backup task will fail. +Special case: If there is only one replica in the cluster, it will be used for backup, +even if its role differs from the specified one. +For example, if you specify backing up on a secondary replica, but the cluster is single-node +with only one primary replica, the primary will be used for backup. +Future versions will address this special case using role priorities. +
  • +
  • +If multiple replicas satisfy the specified role, the choice (`Any` or `All`) will be made according to +the `strategy` field below. +
  • +
+ +
+ +`fallbackRole`
+ +string + + +
+ +(Optional) + +

+Specifies the fallback role to select one replica for backup, this only takes effect when the +`strategy` field below is set to `Any`. +

+ +
+ +`account`
+ +string + + +
+ +(Optional) + +

+If `backupPolicy.componentDefs` is set, this field is required to specify the system account name. +This account must match one listed in `componentDefinition.spec.systemAccounts[*].name`. +The corresponding secret created by this account is used to connect to the database. +

+ +
+ +`strategy`
+ + +PodSelectionStrategy + + + +
+ +(Optional) + +

+Specifies the PodSelectionStrategy to use when multiple pods are +selected for the backup target. +Valid values are: +

+
    +
  • +Any: Selects any one pod that matches the labelsSelector. +
  • +
  • +All: Selects all pods that match the labelsSelector. +
  • +
+ +
+ +`useParentSelectedPods`
+ +bool + + +
+ +(Optional) + +

+UseParentSelectedPods indicates whether to use the pods selected by the parent for backup. +If set to true, the backup will use the same pods selected by the parent. +And only takes effect when the ‘strategy’ is set to ‘Any’. +

+ +
+ +`containerPort`
+ + +ContainerPort + + + +
+ +(Optional) + +

+Specifies the container port in the target pod. +If not specified, the first container and its first port will be used. +

+ +
+

+TargetVolumeInfo + +

+ +

+ +(Appears on:BackupMethod, BackupMethodTPL) + +

+
+ +

+TargetVolumeInfo specifies the volumes and their mounts of the targeted application +that should be mounted in backup workload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumes`
+ +[]string + + +
+ +(Optional) + +

+Specifies the list of volumes of targeted application that should be mounted +on the backup workload. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Specifies the mount for the volumes specified in `volumes` section. +

+ +
+

+ValueFrom + +

+ +

+ +(Appears on:EnvVar) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`versionMapping`
+ + +[]VersionMapping + + + +
+ +(Optional) + +

+Determine the appropriate version of the backup tool image from service version. +

+ +
+

+VersionMapping + +

+ +

+ +(Appears on:ValueFrom) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`serviceVersions`
+ +[]string + + +
+ + +

+Represents an array of the service version that can be mapped to the appropriate value. +Each name in the list can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“8.0.33”: Matches the exact name “8.0.33” +
  • +
  • +“8.0”: Matches all names starting with “8.0” +
  • +
  • +”^8.0.\d{1,2}$“: Matches all names starting with “8.0.” followed by one or two digits. +
  • +
+ +
+ +`mappedValue`
+ +string + + +
+ + +

+Specifies a mapping value based on service version. +Typically used to set up the tools image required for backup operations. +

+ +
+

+VolumeClaimRestorePolicy +(`string` alias) +

+ +

+ +(Appears on:PrepareDataConfig) + +

+
+ +

+VolumeClaimRestorePolicy defines restore policy for persistent volume claim. +Supported policies are as follows: +

+
    +
  1. +Parallel: parallel recovery of persistent volume claim. +
  2. +
  3. +Serial: restore the persistent volume claim in sequence, and wait until the previous persistent volume claim is restored before restoring a new one. +
  4. +
+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Parallel" +

+
+ +
+ +

+"Serial" +

+
+ +
+

+VolumeConfig + +

+ +

+ +(Appears on:PrepareDataConfig, RestoreVolumeClaim) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`volumeSource`
+ +string + + +
+ +(Optional) + +

+Describes the volume that will be restored from the specified volume of the backup targetVolumes. +This is required if the backup uses a volume snapshot. +

+ +
+ +`mountPath`
+ +string + + +
+ +(Optional) + +

+Specifies the path within the restoring container at which the volume should be mounted. +

+ +
+

+VolumeSnapshotStatus + +

+ +

+ +(Appears on:ActionStatus, BackupStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+The name of the volume snapshot. +

+ +
+ +`contentName`
+ +string + + +
+ +(Optional) + +

+The name of the volume snapshot content. +

+ +
+ +`volumeName`
+ +string + + +
+ +(Optional) + +

+The name of the volume. +

+ +
+ +`size`
+ +string + + +
+ +(Optional) + +

+The size of the volume snapshot. +

+ +
+ +`targetName`
+ +string + + +
+ + +

+Associates this volumeSnapshot with its corresponding target. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/api-reference/operations.mdx b/docs/zh/preview/user_docs/references/api-reference/operations.mdx new file mode 100644 index 00000000..bee60182 --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/operations.mdx @@ -0,0 +1,7998 @@ +--- +title: Operations API Reference +description: Operations API Reference +keywords: [operations, api] +sidebar_position: 2 +sidebar_label: Operations +--- +
+ +

+Packages: +

+ +

operations.kubeblocks.io/v1alpha1

+Resource Types: + +

+OpsDefinition + +

+
+ +

+OpsDefinition is the Schema for the OpsDefinitions API. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`operations.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`OpsDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +OpsDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`preConditions`
+ + +[]PreCondition + + + +
+ +(Optional) + +

+Specifies the preconditions that must be met to run the actions for the operation. +if set, it will check the condition before the Component runs this operation. +Example: +

+
+
+ preConditions:
+ - rule:
+     expression: '{{ eq .component.status.phase "Running" }}'
+     message: Component is not in Running status.
+
+
+ +
+ +`podInfoExtractors`
+ + +[]PodInfoExtractor + + + +
+ +(Optional) + +

+Specifies a list of PodInfoExtractor, each designed to select a specific Pod and extract selected runtime info +from its PodSpec. +The extracted information, such as environment variables, volumes and tolerations, are then injected into +Jobs or Pods that execute the OpsActions defined in `actions`. +

+ +
+ +`componentInfos`
+ + +[]ComponentInfo + + + +
+ +(Optional) + +

+Specifies a list of ComponentDefinition for Components associated with this OpsDefinition. +It also includes connection credentials (address and account) for each Component. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema for validating the data types and value ranges of parameters in OpsActions before their usage. +

+ +
+ +`actions`
+ + +[]OpsAction + + + +
+ + +

+Specifies a list of OpsAction where each customized action is executed sequentially. +

+ +
+ +
+ +`status`
+ + +OpsDefinitionStatus + + + +
+ + +
+

+OpsRequest + +

+
+ +

+OpsRequest is the Schema for the opsrequests API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`operations.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`OpsRequest` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +OpsRequestSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`cancel`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the current operation should be canceled and terminated gracefully if it’s in the +“Pending”, “Creating”, or “Running” state. +

+ +

+This field applies only to “VerticalScaling” and “HorizontalScaling” opsRequests. +

+ +

+Note: Setting `cancel` to true is irreversible; further modifications to this field are ineffective. +

+ +
+ +`force`
+ +bool + + +
+ +(Optional) + +

+Instructs the system to bypass pre-checks (including cluster state checks and customized pre-conditions hooks) +and immediately execute the opsRequest, except for the opsRequest of ‘Start’ type, which will still undergo +pre-checks even if `force` is true. +

+ +

+This is useful for concurrent execution of ‘VerticalScaling’ and ‘HorizontalScaling’ opsRequests. +By setting `force` to true, you can bypass the default checks and demand these opsRequests to run +simultaneously. +

+ +

+Note: Once set, the `force` field is immutable and cannot be updated. +

+ +
+ +`enqueueOnForce`
+ +bool + + +
+ +(Optional) + +

+Indicates whether opsRequest should continue to queue when ‘force’ is set to true. +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, +“Expose”, “RebuildInstance”, “Custom”. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`ttlSecondsAfterSucceed`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after successfully completing +(when `opsRequest.status.phase` is “Succeed”) before automatic deletion. +

+ +
+ +`ttlSecondsAfterUnsuccessfulCompletion`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after completion +for any phase other than “Succeed” (e.g., “Failed”, “Cancelled”, “Aborted”) before automatic deletion. +

+ +
+ +`preConditionDeadlineSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum time in seconds that the OpsRequest will wait for its pre-conditions to be met +before it aborts the operation. +If set to 0 (default), pre-conditions must be satisfied immediately for the OpsRequest to proceed. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration (in seconds) that an opsRequest is allowed to run. +If the opsRequest runs longer than this duration, its phase will be marked as Aborted. +If this value is not set or set to 0, the timeout will be ignored and the opsRequest will run indefinitely. +

+ +
+ +`SpecificOpsRequest`
+ + +SpecificOpsRequest + + + +
+ + +

+ +(Members of `SpecificOpsRequest` are embedded into this type.) + +

+ +

+Exactly one of its members must be set. +

+ +
+ +
+ +`status`
+ + +OpsRequestStatus + + + +
+ + +
+

+ActionTask + +

+ +

+ +(Appears on:ProgressStatusDetail) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`objectKey`
+ +string + + +
+ + +

+Represents the name of the task. +

+ +
+ +`namespace`
+ +string + + +
+ + +

+Represents the namespace where the task is deployed. +

+ +
+ +`status`
+ + +ActionTaskStatus + + + +
+ + +

+Indicates the current status of the task, including “Processing”, “Failed”, “Succeed”. +

+ +
+ +`targetPodName`
+ +string + + +
+ +(Optional) + +

+The name of the Pod that the task is associated with or operates on. +

+ +
+ +`retries`
+ +int32 + + +
+ +(Optional) + +

+The count of retry attempts made for this task. +

+ +
+

+ActionTaskStatus +(`string` alias) +

+ +

+ +(Appears on:ActionTask) + +

+
+ +

+ActionTaskStatus defines the status of the task. +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Processing" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+Backup + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Backup custom resource. +

+ +
+ +`backupPolicyName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of the BackupPolicy applied to perform this Backup. +

+ +
+ +`backupMethod`
+ +string + + +
+ +(Optional) + +

+Specifies the name of BackupMethod. +The specified BackupMethod must be defined in the BackupPolicy. +

+ +
+ +`deletionPolicy`
+ +string + + +
+ +(Optional) + +

+Determines whether the backup contents stored in backup repository +should be deleted when the Backup custom resource is deleted. +Supported values are `Retain` and `Delete`. +- `Retain` means that the backup content and its physical snapshot on backup repository are kept. +- `Delete` means that the backup content and its physical snapshot on backup repository are deleted. +

+ +
+ +`retentionPeriod`
+ +string + + +
+ +(Optional) + +

+Determines the duration for which the Backup custom resources should be retained. +

+ +

+The controller will automatically remove all Backup objects that are older than the specified RetentionPeriod. +For example, RetentionPeriod of `30d` will keep only the Backup objects of last 30 days. +Sample duration format: +

+
    +
  • +years: 2y +
  • +
  • +months: 6mo +
  • +
  • +days: 30d +
  • +
  • +hours: 12h +
  • +
  • +minutes: 30m +
  • +
+ +

+You can also combine the above durations. For example: 30d12h30m. +If not set, the Backup objects will be kept forever. +

+ +

+If the `deletionPolicy` is set to ‘Delete’, then the associated backup data will also be deleted +along with the Backup object. +Otherwise, only the Backup custom resource will be deleted. +

+ +
+ +`parentBackupName`
+ +string + + +
+ +(Optional) + +

+If the specified BackupMethod is incremental, `parentBackupName` is required. +

+ +
+ +`parameters`
+ +[]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ParameterPair + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+BackupRefSpec + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ref`
+ + +RefNamespaceName + + + +
+ +(Optional) + +

+Refers to a reference backup that needs to be restored. +

+ +
+

+CompletionProbe + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`initialDelaySeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds to wait after the resource has been patched before initiating completion probes. +The default value is 5 seconds, with a minimum value of 1. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of seconds after which the probe times out. +The default value is 60 seconds, with a minimum value of 1. +

+ +
+ +`periodSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the frequency (in seconds) at which the probe should be performed. +The default value is 5 seconds, with a minimum value of 1. +

+ +
+ +`matchExpressions`
+ + +MatchExpressions + + + +
+ + +

+Executes expressions regularly, based on the value of PeriodSeconds, to determine if the action has been completed. +

+ +
+

+ComponentInfo + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDefinitionName`
+ +string + + +
+ + +

+Specifies the name of the ComponentDefinition. +The name can represent an exact name, a name prefix, or a regular expression pattern. +

+ +

+For example: +

+
    +
  • +“mysql-8.0.30-v1alpha1”: Matches the exact name “mysql-8.0.30-v1alpha1” +
  • +
  • +“mysql-8.0.30”: Matches all names starting with “mysql-8.0.30” +
  • +
  • +”^mysql-8.0.\d{1,2}$“: Matches all names starting with “mysql-8.0.” followed by one or two digits. +
  • +
+ +
+ +`accountName`
+ +string + + +
+ +(Optional) + +

+Specifies the account name associated with the Component. +If set, the corresponding account username and password are injected into containers’ environment variables +`KB_ACCOUNT_USERNAME` and `KB_ACCOUNT_PASSWORD`. +

+ +
+ +`serviceName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Service. +If set, the service name is injected as the `KB_COMP_SVC_NAME` environment variable in the containers, +and each service port is mapped to a corresponding environment variable named `KB_COMP_SVC_PORT_$(portName)`. +The `portName` is transformed by replacing ‘-’ with ‘_’ and converting to uppercase. +

+ +
+

+ComponentOps + +

+ +

+ +(Appears on:CustomOpsComponent, HorizontalScaling, RebuildInstance, Reconfigure, SpecificOpsRequest, UpgradeComponent, VerticalScaling, VolumeExpansion) + +

+
+ +

+ComponentOps specifies the Component to be operated on. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component as defined in the cluster.spec +

+ +
+

+CustomOps + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`opsDefinitionName`
+ +string + + +
+ + +

+Specifies the name of the OpsDefinition. +

+ +
+ +`serviceAccountName`
+ +string + + +
+ + +

+Specifies the name of the ServiceAccount to be used for executing the custom operation. +

+ +
+ +`maxConcurrentComponents`
+ + +Kubernetes api utils intstr.IntOrString + + + +
+ +(Optional) + +

+Specifies the maximum number of components to be operated on concurrently to mitigate performance impact +on clusters with multiple components. +

+ +

+It accepts an absolute number (e.g., 5) or a percentage of components to execute in parallel (e.g., “10%”). +Percentages are rounded up to the nearest whole number of components. +For example, if “10%” results in less than one, it rounds up to 1. +

+ +

+When unspecified, all components are processed simultaneously by default. +

+ +

+Note: This feature is not implemented yet. +

+ +
+ +`components`
+ + +[]CustomOpsComponent + + + +
+ + +

+Specifies the components and their parameters for executing custom actions as defined in OpsDefinition. +Requires at least one component. +

+ +
+

+CustomOpsComponent + +

+ +

+ +(Appears on:CustomOps) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +[]Parameter + + + +
+ +(Optional) + +

+Specifies the parameters that match the schema specified in the `opsDefinition.spec.parametersSchema`. +

+ +
+

+EnvVarRef + +

+ +

+ +(Appears on:OpsVarSource) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`targetContainerName`
+ +string + + +
+ +(Optional) + +

+Specifies the container name in the target Pod. +If not specified, the first container will be used by default. +

+ +
+ +`envName`
+ +string + + +
+ + +

+Defines the name of the environment variable. +This name can originate from an ‘env’ entry or be a data key from an ‘envFrom’ source. +

+ +
+

+Expose + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`switch`
+ + +ExposeSwitch + + + +
+ + +

+Indicates whether the services will be exposed. +‘Enable’ exposes the services. while ‘Disable’ removes the exposed Service. +

+ +
+ +`services`
+ + +[]OpsService + + + +
+ + +

+Specifies a list of OpsService. +When an OpsService is exposed, a corresponding ClusterService will be added to `cluster.spec.services`. +On the other hand, when an OpsService is unexposed, the corresponding ClusterService will be removed +from `cluster.spec.services`. +

+ +

+Note: If `componentName` is not specified, the `ports` and `selector` fields must be provided +in each OpsService definition. +

+ +
+

+ExposeSwitch +(`string` alias) +

+ +

+ +(Appears on:Expose) + +

+
+ +

+ExposeSwitch Specifies the switch for the expose operation. This switch can be used to enable or disable the expose operation. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Disable" +

+
+ +
+ +

+"Enable" +

+
+ +
+

+FailurePolicyType +(`string` alias) +

+ +

+ +(Appears on:OpsAction) + +

+
+ +

+FailurePolicyType specifies the type of failure policy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Fail" +

+
+ +

+FailurePolicyFail means that an error will be reported. +

+ +
+ +

+"Ignore" +

+
+ +

+FailurePolicyIgnore means that an error will be ignored but logged. +

+ +
+

+HorizontalScaling + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+HorizontalScaling defines the parameters of a horizontal scaling operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`shards`
+ +int32 + + +
+ + +

+Specifies the desired number of shards for the component. +This parameter is mutually exclusive with other parameters. +

+ +
+ +`scaleOut`
+ + +ScaleOut + + + +
+ +(Optional) + +

+Specifies the replica changes for scaling out components and instance templates, +and brings offline instances back online. Can be used in conjunction with the “scaleIn” operation. +Note: Any configuration that deletes instances is considered invalid. +

+ +
+ +`scaleIn`
+ + +ScaleIn + + + +
+ +(Optional) + +

+Specifies the replica changes for scaling in components and instance templates, +and takes specified instances offline. Can be used in conjunction with the “scaleOut” operation. +Note: Any configuration that creates instances is considered invalid. +

+ +
+

+Instance + +

+ +

+ +(Appears on:RebuildInstance) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Pod name of the instance. +

+ +
+ +`targetNodeName`
+ +string + + +
+ +(Optional) + +

+The instance will rebuild on the specified node. +If not set, it will rebuild on a random node. +

+ +
+

+InstanceReplicasTemplate + +

+ +

+ +(Appears on:ReplicaChanger) + +

+
+ +

+InstanceReplicasTemplate defines the template for instance replicas. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the instance template. +

+ +
+ +`replicaChanges`
+ +int32 + + +
+ + +

+Specifies the replica changes for the instance template. +

+ +
+

+InstanceResourceTemplate + +

+ +

+ +(Appears on:VerticalScaling) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refer to the instance template name of the component or sharding. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+ +

+Defines the computational resource size for vertical scaling. +

+ +
+

+InstanceVolumeClaimTemplate + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Refer to the instance template name of the component or sharding. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ + +

+volumeClaimTemplates specifies the storage size and volumeClaimTemplate name. +

+ +
+

+JSONPatchOperation + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`op`
+ +string + + +
+ + +

+Specifies the type of JSON patch operation. It supports the following values: ‘add’, ‘remove’, ‘replace’. +

+ +
+ +`path`
+ +string + + +
+ + +

+Specifies the json patch path. +

+ +
+ +`value`
+ +string + + +
+ + +

+Specifies the value to be used in the JSON patch operation. +

+ +
+

+LastComponentConfiguration + +

+ +

+ +(Appears on:LastConfiguration) + +

+
+ +

+LastComponentConfiguration can be used to track and compare the desired state of the Component over time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicas`
+ +int32 + + +
+ +(Optional) + +

+Records the `replicas` of the Component prior to any changes. +

+ +
+ +`shards`
+ +int32 + + +
+ +(Optional) + +

+Records the `shards` of the Component prior to any changes. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+(Optional) + +

+Records the resources of the Component prior to any changes. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ +(Optional) + +

+Records volumes’ storage size of the Component prior to any changes. +

+ +
+ +`services`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentService + + +
+ +(Optional) + +

+Records the ClusterComponentService list of the Component prior to any changes. +

+ +
+ +`instances`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.InstanceTemplate + + +
+ +(Optional) + +

+Records the InstanceTemplate list of the Component prior to any changes. +

+ +
+ +`offlineInstances`
+ +[]string + + +
+ +(Optional) + +

+Records the offline instances of the Component prior to any changes. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Records the version of the Service expected to be provisioned by this Component prior to any changes. +

+ +
+ +`componentDefinitionName`
+ +string + + +
+ +(Optional) + +

+Records the name of the ComponentDefinition prior to any changes. +

+ +
+

+LastConfiguration + +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/operations/v1alpha1.LastComponentConfiguration + + + +
+ +(Optional) + +

+Records the configuration of each Component prior to any changes. +

+ +
+

+MatchExpressions + +

+ +

+ +(Appears on:CompletionProbe) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`failure`
+ +string + + +
+ +(Optional) + +

+Specifies a failure condition for an action using a Go template expression. +Should evaluate to either `true` or `false`. +The current resource object is parsed into the Go template. +for example, you can use ‘{{ eq .spec.replicas 1 }}’. +

+ +
+ +`success`
+ +string + + +
+ + +

+Specifies a success condition for an action using a Go template expression. +Should evaluate to either `true` or `false`. +The current resource object is parsed into the Go template. +for example, using ‘{{ eq .spec.replicas 1 }}’ +

+ +
+

+OpsAction + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+ +

+OpsAction specifies a custom action defined in OpsDefinition for execution in a “Custom” OpsRequest. +

+ +

+OpsAction can be of three types: +

+
    +
  • +workload: Creates a Job or Pod to run custom scripts, ideal for isolated or long-running tasks. +
  • +
  • +exec: Executes commands directly within an existing container using the kubectl exec interface, +suitable for immediate, short-lived operations. +
  • +
  • +resourceModifier: Modifies a K8s object using JSON patches, useful for updating the spec of some resource. +
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the OpsAction. +

+ +
+ +`failurePolicy`
+ + +FailurePolicyType + + + +
+ +(Optional) + +

+Specifies the failure policy of the OpsAction. +Valid values are: +

+
    +
  • +“Fail”: Marks the entire OpsRequest as failed if the action fails. +
  • +
  • +“Ignore”: The OpsRequest continues processing despite the failure of the action. +
  • +
+ +
+ +`parameters`
+ +[]string + + +
+ +(Optional) + +

+Specifies the parameters for the OpsAction. Their usage varies based on the action type: +

+
    +
  • +For ‘workload’ or ‘exec’ actions, parameters are injected as environment variables. +
  • +
  • +For ‘resourceModifier’ actions, parameter can be referenced using $() in fields +`resourceModifier.completionProbe.matchExpressions` and `resourceModifier.jsonPatches[*].value`. +
  • +
+ +
+ +`workload`
+ + +OpsWorkloadAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘workload’ action. +This action leads to the creation of a K8s workload, such as a Pod or Job, to execute specified tasks. +

+ +
+ +`exec`
+ + +OpsExecAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘exec’ action. +It creates a Pod and invokes a ‘kubectl exec’ to run command inside a specified container with the target Pod. +

+ +
+ +`resourceModifier`
+ + +OpsResourceModifierAction + + + +
+ +(Optional) + +

+Specifies the configuration for a ‘resourceModifier’ action. +This action allows for modifications to existing K8s objects. +

+ +

+Note: This feature has not been implemented yet. +

+ +
+

+OpsDefinitionSpec + +

+ +

+ +(Appears on:OpsDefinition) + +

+
+ +

+OpsDefinitionSpec defines the desired state of OpsDefinition. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`preConditions`
+ + +[]PreCondition + + + +
+ +(Optional) + +

+Specifies the preconditions that must be met to run the actions for the operation. +if set, it will check the condition before the Component runs this operation. +Example: +

+
+
+ preConditions:
+ - rule:
+     expression: '{{ eq .component.status.phase "Running" }}'
+     message: Component is not in Running status.
+
+
+ +
+ +`podInfoExtractors`
+ + +[]PodInfoExtractor + + + +
+ +(Optional) + +

+Specifies a list of PodInfoExtractor, each designed to select a specific Pod and extract selected runtime info +from its PodSpec. +The extracted information, such as environment variables, volumes and tolerations, are then injected into +Jobs or Pods that execute the OpsActions defined in `actions`. +

+ +
+ +`componentInfos`
+ + +[]ComponentInfo + + + +
+ +(Optional) + +

+Specifies a list of ComponentDefinition for Components associated with this OpsDefinition. +It also includes connection credentials (address and account) for each Component. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Specifies the schema for validating the data types and value ranges of parameters in OpsActions before their usage. +

+ +
+ +`actions`
+ + +[]OpsAction + + + +
+ + +

+Specifies a list of OpsAction where each customized action is executed sequentially. +

+ +
+

+OpsDefinitionStatus + +

+ +

+ +(Appears on:OpsDefinition) + +

+
+ +

+OpsDefinitionStatus defines the observed state of OpsDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the most recent generation observed of this OpsDefinition. +

+ +
+ +`phase`
+ + +Phase + + + +
+ +(Optional) + +

+Represents the current state of the OpsDefinition. +Valid values are “”, “Available”, “Unavailable”. +When it equals to “Available”, the OpsDefinition is ready and can be used in a “Custom” OpsRequest. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+

+OpsEnvVar + +

+ +

+ +(Appears on:PodInfoExtractor) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the environment variable to be injected into Pods executing OpsActions. +It must conform to the C_IDENTIFIER format, which includes only alphanumeric characters and underscores, and cannot begin with a digit. +

+ +
+ +`valueFrom`
+ + +OpsVarSource + + + +
+ + +

+Specifies the source of the environment variable’s value. +

+ +
+

+OpsExecAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`podInfoExtractorName`
+ +string + + +
+ + +

+Specifies a PodInfoExtractor defined in the `opsDefinition.spec.podInfoExtractors`. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries allowed before marking the action as failed. +

+ +
+ +`command`
+ +[]string + + +
+ + +

+The command to be executed via ‘kubectl exec –’. +

+ +
+ +`containerName`
+ +string + + +
+ +(Optional) + +

+The name of the container in the target pod where the command should be executed. +This corresponds to the `-c {containerName}` option in `kubectl exec`. +

+ +

+If not set, the first container is used. +

+ +
+

+OpsPhase +(`string` alias) +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+ +

+OpsPhase defines opsRequest phase. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Aborted" +

+
+ +
+ +

+"Cancelled" +

+
+ +
+ +

+"Cancelling" +

+
+ +
+ +

+"Creating" +

+
+ +
+ +

+"Failed" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+OpsRecorder + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+name OpsRequest name +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+opsRequest type +

+ +
+ +`inQueue`
+ +bool + + +
+ + +

+indicates whether the current opsRequest is in the queue +

+ +
+ +`queueBySelf`
+ +bool + + +
+ + +

+indicates that the operation is queued for execution within its own-type scope. +

+ +
+

+OpsRequestBehaviour + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FromClusterPhases`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterPhase + + +
+ + +
+ +`ToClusterPhase`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ClusterPhase + + +
+ + +
+

+OpsRequestComponentStatus + +

+ +

+ +(Appears on:OpsRequestStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`phase`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ComponentPhase + + +
+ +(Optional) + +

+Records the current phase of the Component, mirroring `cluster.status.components[componentName].phase`. +

+ +
+ +`lastFailedTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the timestamp when the Component last transitioned to a “Failed” phase. +

+ +
+ +`preCheck`
+ + +PreCheckResult + + + +
+ +(Optional) + +

+Records the result of the preConditions check of the opsRequest, which determines subsequent steps. +

+ +
+ +`progressDetails`
+ + +[]ProgressStatusDetail + + + +
+ +(Optional) + +

+Describes the progress details of objects or actions associated with the Component. +

+ +
+ +`reason`
+ +string + + +
+ +(Optional) + +

+Provides an explanation for the Component being in its current state. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable message indicating details about this operation. +

+ +
+

+OpsRequestSpec + +

+ +

+ +(Appears on:OpsRequest) + +

+
+ +

+OpsRequestSpec defines the desired state of OpsRequest +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`cancel`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the current operation should be canceled and terminated gracefully if it’s in the +“Pending”, “Creating”, or “Running” state. +

+ +

+This field applies only to “VerticalScaling” and “HorizontalScaling” opsRequests. +

+ +

+Note: Setting `cancel` to true is irreversible; further modifications to this field are ineffective. +

+ +
+ +`force`
+ +bool + + +
+ +(Optional) + +

+Instructs the system to bypass pre-checks (including cluster state checks and customized pre-conditions hooks) +and immediately execute the opsRequest, except for the opsRequest of ‘Start’ type, which will still undergo +pre-checks even if `force` is true. +

+ +

+This is useful for concurrent execution of ‘VerticalScaling’ and ‘HorizontalScaling’ opsRequests. +By setting `force` to true, you can bypass the default checks and demand these opsRequests to run +simultaneously. +

+ +

+Note: Once set, the `force` field is immutable and cannot be updated. +

+ +
+ +`enqueueOnForce`
+ +bool + + +
+ +(Optional) + +

+Indicates whether opsRequest should continue to queue when ‘force’ is set to true. +

+ +
+ +`type`
+ + +OpsType + + + +
+ + +

+Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, +“Expose”, “RebuildInstance”, “Custom”. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`ttlSecondsAfterSucceed`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after successfully completing +(when `opsRequest.status.phase` is “Succeed”) before automatic deletion. +

+ +
+ +`ttlSecondsAfterUnsuccessfulCompletion`
+ +int32 + + +
+ +(Optional) + +

+Specifies the duration in seconds that an OpsRequest will remain in the system after completion +for any phase other than “Succeed” (e.g., “Failed”, “Cancelled”, “Aborted”) before automatic deletion. +

+ +
+ +`preConditionDeadlineSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum time in seconds that the OpsRequest will wait for its pre-conditions to be met +before it aborts the operation. +If set to 0 (default), pre-conditions must be satisfied immediately for the OpsRequest to proceed. +

+ +
+ +`timeoutSeconds`
+ +int32 + + +
+ +(Optional) + +

+Specifies the maximum duration (in seconds) that an opsRequest is allowed to run. +If the opsRequest runs longer than this duration, its phase will be marked as Aborted. +If this value is not set or set to 0, the timeout will be ignored and the opsRequest will run indefinitely. +

+ +
+ +`SpecificOpsRequest`
+ + +SpecificOpsRequest + + + +
+ + +

+ +(Members of `SpecificOpsRequest` are embedded into this type.) + +

+ +

+Exactly one of its members must be set. +

+ +
+

+OpsRequestStatus + +

+ +

+ +(Appears on:OpsRequest) + +

+
+ +

+OpsRequestStatus represents the observed state of an OpsRequest. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterGeneration`
+ +int64 + + +
+ +(Optional) + +

+Records the cluster generation after the OpsRequest action has been handled. +

+ +
+ +`phase`
+ + +OpsPhase + + + +
+ + +

+Represents the phase of the OpsRequest. +Possible values include “Pending”, “Creating”, “Running”, “Cancelling”, “Cancelled”, “Failed”, “Succeed”. +

+ +
+ +`progress`
+ +string + + +
+ + +

+Represents the progress of the OpsRequest. +

+ +
+ +`lastConfiguration`
+ + +LastConfiguration + + + +
+ +(Optional) + +

+Records the configuration prior to any changes. +

+ +
+ +`components`
+ + +map[string]github.com/apecloud/kubeblocks/apis/operations/v1alpha1.OpsRequestComponentStatus + + + +
+ +(Optional) + +

+Records the status information of Components changed due to the OpsRequest. +

+ +
+ +`extras`
+ +[]string + + +
+ + +

+A collection of additional key-value pairs that provide supplementary information for the OpsRequest. +

+ +
+ +`startTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest started processing. +

+ +
+ +`completionTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest was completed. +

+ +
+ +`cancelTimestamp`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the time when the OpsRequest was cancelled. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Describes the detailed status of the OpsRequest. +Possible condition types include “Cancelled”, “WaitForProgressing”, “Validated”, “Succeed”, “Failed”, “Restarting”, +“VerticalScaling”, “HorizontalScaling”, “VolumeExpanding”, “Reconfigure”, “Switchover”, “Stopping”, “Starting”, +“VersionUpgrading”, “Exposing”, “Backup”, “InstancesRebuilding”, “CustomOperation”. +

+ +
+

+OpsRequestVolumeClaimTemplate + +

+ +

+ +(Appears on:InstanceVolumeClaimTemplate, LastComponentConfiguration, VolumeExpansion) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`storage`
+ + +Kubernetes resource.Quantity + + + +
+ + +

+Specifies the desired storage size for the volume. +

+ +
+ +`name`
+ +string + + +
+ + +

+Specify the name of the volumeClaimTemplate in the Component. +The specified name must match one of the volumeClaimTemplates defined +in the `clusterComponentSpec.volumeClaimTemplates` field. +

+ +
+

+OpsResourceModifierAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`resource`
+ + +TypedObjectRef + + + +
+ + +

+Specifies the K8s object that is to be updated. +

+ +
+ +`jsonPatches`
+ + +[]JSONPatchOperation + + + +
+ + +

+Specifies a list of patches for modifying the object. +

+ +
+ +`completionProbe`
+ + +CompletionProbe + + + +
+ + +

+Specifies a method to determine if the action has been completed. +

+ +

+Note: This feature has not been implemented yet. +

+ +
+

+OpsService + +

+ +

+ +(Appears on:Expose) + +

+
+ +

+OpsService represents the parameters to dynamically create or remove a ClusterService in the `cluster.spec.services` array. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the Service. This name is used to set `clusterService.name`. +

+ +

+Note: This field cannot be updated. +

+ +
+ +`annotations`
+ +map[string]string + + +
+ +(Optional) + +

+Contains cloud provider related parameters if ServiceType is LoadBalancer. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer. +

+ +
+ +`ports`
+ + +[]Kubernetes core/v1.ServicePort + + + +
+ +(Optional) + +

+Specifies Port definitions that are to be exposed by a ClusterService. +

+ +

+If not specified, the Port definitions from non-NodePort and non-LoadBalancer type ComponentService +defined in the ComponentDefinition (`componentDefinition.spec.services`) will be used. +If no matching ComponentService is found, the expose operation will fail. +

+ +

+More info: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports +

+ +
+ +`roleSelector`
+ +string + + +
+ +(Optional) + +

+Specifies a role to target with the service. +If specified, the service will only be exposed to pods with the matching role. +

+ +

+Note: If the component has roles, at least one of ‘roleSelector’ or ‘podSelector’ must be specified. +If both are specified, a pod must match both conditions to be selected. +

+ +
+ +`podSelector`
+ +map[string]string + + +
+ +(Optional) + +

+Routes service traffic to pods with matching label keys and values. +If specified, the service will only be exposed to pods matching the selector. +

+ +

+Note: If the component has roles, at least one of ‘roleSelector’ or ‘podSelector’ must be specified. +If both are specified, a pod must match both conditions to be selected. +

+ +
+ +`serviceType`
+ + +Kubernetes core/v1.ServiceType + + + +
+ +(Optional) + +

+Determines how the Service is exposed. Defaults to ‘ClusterIP’. +Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. +

+
    +
  • +`ClusterIP`: allocates a cluster-internal IP address for load-balancing to endpoints. +Endpoints are determined by the selector or if that is not specified, +they are determined by manual construction of an Endpoints object or EndpointSlice objects. +
  • +
  • +`NodePort`: builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. +
  • +
  • +`LoadBalancer`: builds on NodePort and creates an external load-balancer (if supported in the current cloud) +which routes to the same endpoints as the clusterIP. +
  • +
+ +

+Note: although K8s Service type allows the ‘ExternalName’ type, it is not a valid option for the expose operation. +

+ +

+For more info, see: +https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types. +

+ +
+ +`ipFamilies`
+ + +[]Kubernetes core/v1.IPFamily + + + +
+ +(Optional) + +

+A list of IP families (e.g., IPv4, IPv6) assigned to this Service. +

+ +

+Usually assigned automatically based on the cluster configuration and the `ipFamilyPolicy` field. +If specified manually, the requested IP family must be available in the cluster and allowed by the `ipFamilyPolicy`. +If the requested IP family is not available or not allowed, the Service creation will fail. +

+ +

+Valid values: +

+
    +
  • +“IPv4” +
  • +
  • +“IPv6” +
  • +
+ +

+This field may hold a maximum of two entries (dual-stack families, in either order). +

+ +

+Common combinations of `ipFamilies` and `ipFamilyPolicy` are: +

+
    +
  • +ipFamilies=[] + ipFamilyPolicy=“PreferDualStack” : +The Service prefers dual-stack but can fall back to single-stack if the cluster does not support dual-stack. +The IP family is automatically assigned based on the cluster configuration. +
  • +
  • +ipFamilies=[“IPV4”,“IPV6”] + ipFamilyPolicy=“RequiredDualStack” : +The Service requires dual-stack and will only be created if the cluster supports both IPv4 and IPv6. +The primary IP family is IPV4. +
  • +
  • +ipFamilies=[“IPV6”,“IPV4”] + ipFamilyPolicy=“RequiredDualStack” : +The Service requires dual-stack and will only be created if the cluster supports both IPv4 and IPv6. +The primary IP family is IPV6. +
  • +
  • +ipFamilies=[“IPV4”] + ipFamilyPolicy=“SingleStack” : +The Service uses a single-stack with IPv4 only. +
  • +
  • +ipFamilies=[“IPV6”] + ipFamilyPolicy=“SingleStack” : +The Service uses a single-stack with IPv6 only. +
  • +
+ +
+ +`ipFamilyPolicy`
+ + +Kubernetes core/v1.IPFamilyPolicy + + + +
+ +(Optional) + +

+Specifies whether the Service should use a single IP family (SingleStack) or two IP families (DualStack). +

+ +

+Possible values: +

+
    +
  • +‘SingleStack’ (default) : The Service uses a single IP family. +If no value is provided, IPFamilyPolicy defaults to SingleStack. +
  • +
  • +‘PreferDualStack’ : The Service prefers to use two IP families on dual-stack configured clusters +or a single IP family on single-stack clusters. +
  • +
  • +‘RequiredDualStack’ : The Service requires two IP families on dual-stack configured clusters. +If the cluster is not configured for dual-stack, the Service creation fails. +
  • +
+ +
+

+OpsType +(`string` alias) +

+ +

+ +(Appears on:OpsRecorder, OpsRequestSpec) + +

+
+ +

+OpsType defines operation types. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Backup" +

+
+ +
+ +

+"Custom" +

+
+ +

+RebuildInstance rebuilding an instance is very useful when a node is offline or an instance is unrecoverable. +

+ +
+ +

+"Expose" +

+
+ +

+StartType the start operation will start the pods which is deleted in stop operation. +

+ +
+ +

+"HorizontalScaling" +

+
+ +
+ +

+"RebuildInstance" +

+
+ +
+ +

+"Reconfiguring" +

+
+ +
+ +

+"Restart" +

+
+ +
+ +

+"Restore" +

+
+ +
+ +

+"Start" +

+
+ +

+StopType the stop operation will delete all pods in a cluster concurrently. +

+ +
+ +

+"Stop" +

+
+ +

+RestartType the restart operation is a special case of the rolling update operation. +

+ +
+ +

+"Switchover" +

+
+ +
+ +

+"Upgrade" +

+
+ +
+ +

+"VerticalScaling" +

+
+ +
+ +

+"VolumeExpansion" +

+
+ +
+

+OpsVarSource + +

+ +

+ +(Appears on:OpsEnvVar) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`envRef`
+ + +EnvVarRef + + + +
+ +(Optional) + +

+Specifies a reference to a specific environment variable within a container. +Used to specify the source of the variable, which can be either “env” or “envFrom”. +

+ +
+ +`fieldPath`
+ + +Kubernetes core/v1.ObjectFieldSelector + + + +
+ +(Optional) + +

+Represents the JSONPath expression pointing to the specific data within the JSON structure of the target Pod. +It is used to extract precise data locations for operations on the Pod. +

+ +
+

+OpsWorkloadAction + +

+ +

+ +(Appears on:OpsAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`type`
+ + +OpsWorkloadType + + + +
+ + +

+Defines the workload type of the action. Valid values include “Job” and “Pod”. +

+
    +
  • +“Job”: Creates a Job to execute the action. +
  • +
  • +“Pod”: Creates a Pod to execute the action. +Note: unlike Jobs, manually deleting a Pod does not affect the `backoffLimit`. +
  • +
+ +
+ +`podInfoExtractorName`
+ +string + + +
+ + +

+Specifies a PodInfoExtractor defined in the `opsDefinition.spec.podInfoExtractors`. +

+ +
+ +`backoffLimit`
+ +int32 + + +
+ +(Optional) + +

+Specifies the number of retries allowed before marking the action as failed. +

+ +
+ +`podSpec`
+ + +Kubernetes core/v1.PodSpec + + + +
+ + +

+Specifies the PodSpec of the ‘workload’ action. +

+ +
+

+OpsWorkloadType +(`string` alias) +

+ +

+ +(Appears on:OpsWorkloadAction) + +

+
+ +

+OpsWorkloadType policy after action failure. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Job" +

+
+ +
+ +

+"Pod" +

+
+ +
+

+Parameter + +

+ +

+ +(Appears on:CustomOpsComponent) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the identifier of the parameter as defined in the OpsDefinition. +

+ +
+ +`value`
+ +string + + +
+ + +

+Holds the data associated with the parameter. +If the parameter type is an array, the format should be “v1,v2,v3”. +

+ +
+ +`valueFrom`
+ + +ParameterSource + + + +
+ + +

+Source for the parameter’s value. Cannot be used if value is not empty. +

+ +
+

+ParameterPair + +

+ +

+ +(Appears on:Reconfigure) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`key`
+ +string + + +
+ + +

+Represents the name of the parameter that is to be updated. +

+ +
+ +`value`
+ +string + + +
+ +(Optional) + +

+Represents the parameter values that are to be updated. +If set to nil, the parameter defined by the Key field will be removed from the configuration file. +

+ +
+

+ParameterSource + +

+ +

+ +(Appears on:Parameter) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`configMapKeyRef`
+ + +Kubernetes core/v1.ConfigMapKeySelector + + + +
+ +(Optional) + +

+Selects a key of a ConfigMap. +

+ +
+ +`secretKeyRef`
+ + +Kubernetes core/v1.SecretKeySelector + + + +
+ +(Optional) + +

+Selects a key of a Secret. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`openAPIV3Schema`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ +(Optional) + +

+Defines the schema for parameters using the OpenAPI v3. +The supported property types include: +- string +- number +- integer +- array: Note that only items of string type are supported. +

+ +
+

+Phase +(`string` alias) +

+ +

+ +(Appears on:OpsDefinitionStatus) + +

+
+ +

+Phase represents the current status of the ClusterDefinition CR. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +

+AvailablePhase indicates that the object is in an available state. +

+ +
+ +

+"Unavailable" +

+
+ +

+UnavailablePhase indicates that the object is in an unavailable state. +

+ +
+

+PodInfoExtractor + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the PodInfoExtractor. +

+ +
+ +`env`
+ + +[]OpsEnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be extracted from a selected Pod, +and injected into the containers executing each OpsAction. +

+ +
+ +`podSelector`
+ + +PodSelector + + + +
+ + +

+Used to select the target Pod from which environment variables and volumes are extracted from its PodSpec. +

+ +
+ +`volumeMounts`
+ + +[]Kubernetes core/v1.VolumeMount + + + +
+ +(Optional) + +

+Specifies a list of volumes, along with their respective mount points, that are to be extracted from a selected Pod, +and mounted onto the containers executing each OpsAction. +This allows the containers to access shared or persistent data necessary for the operation. +

+ +
+

+PodSelectionPolicy +(`string` alias) +

+ +

+ +(Appears on:PodSelector) + +

+
+ +

+PodSelectionPolicy pod selection strategy. +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"All" +

+
+ +
+ +

+"Any" +

+
+ +
+

+PodSelector + +

+ +

+ +(Appears on:PodInfoExtractor) + +

+
+ +

+PodSelector selects the target Pod from which environment variables and volumes are extracted from its PodSpec. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`role`
+ +string + + +
+ +(Optional) + +

+Specifies the role of the target Pod. +

+ +
+ +`multiPodSelectionPolicy`
+ + +PodSelectionPolicy + + + +
+ + +

+Defines the policy for selecting the target pod when multiple pods match the podSelector. +It can be either ‘Any’ (select any one pod that matches the podSelector) +or ‘All’ (select all pods that match the podSelector). +

+ +
+

+PointInTimeRefSpec + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`time`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Refers to the specific time point for restoration, with UTC as the time zone. +

+ +
+ +`ref`
+ + +RefNamespaceName + + + +
+ +(Optional) + +

+Refers to a reference source cluster that needs to be restored. +

+ +
+

+PreCheckResult + +

+ +

+ +(Appears on:OpsRequestComponentStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`pass`
+ +bool + + +
+ + +

+Indicates whether the preCheck operation passed or failed. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides explanations related to the preCheck result in a human-readable format. +

+ +
+

+PreCondition + +

+ +

+ +(Appears on:OpsDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`rule`
+ + +Rule + + + +
+ + +

+Specifies the conditions that must be met for the operation to execute. +

+ +
+

+ProgressStatus +(`string` alias) +

+ +

+ +(Appears on:ProgressStatusDetail) + +

+
+ +

+ProgressStatus defines the status of the opsRequest progress. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Failed" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Processing" +

+
+ +
+ +

+"Succeed" +

+
+ +
+

+ProgressStatusDetail + +

+ +

+ +(Appears on:OpsRequestComponentStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`group`
+ +string + + +
+ +(Optional) + +

+Specifies the group to which the current object belongs to. +

+ +
+ +`objectKey`
+ +string + + +
+ +(Optional) + +

+`objectKey` uniquely identifies the object, which can be any K8s object, like a Pod, Job, Component, or PVC. +Either `objectKey` or `actionName` must be provided. +

+ +
+ +`actionName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of an OpsAction, as defined in `opsDefinition.spec.actions[*].name`. +Either `objectKey` or `actionName` must be provided. +

+ +
+ +`actionTasks`
+ + +[]ActionTask + + + +
+ +(Optional) + +

+Lists the tasks, such as Jobs or Pods, that carry out the action. +

+ +
+ +`status`
+ + +ProgressStatus + + + +
+ + +

+Represents the current processing state of the object, including “Processing”, “Pending”, “Failed”, “Succeed” +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a human-readable explanation of the object’s condition. +

+ +
+ +`startTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the start time of object processing. +

+ +
+ +`endTime`
+ + +Kubernetes meta/v1.Time + + + +
+ +(Optional) + +

+Records the completion time of object processing. +

+ +
+

+RebuildInstance + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`instances`
+ + +[]Instance + + + +
+ + +

+Specifies the instances (Pods) that need to be rebuilt, typically operating as standbys. +

+ +
+ +`inPlace`
+ +bool + + +
+ + +

+When it is set to true, the instance will be rebuilt in-place. +If false, a new pod will be created. Once the new pod is ready to serve, +the instance that require rebuilding will be taken offline. +

+ +
+ +`backupName`
+ +string + + +
+ +(Optional) + +

+Indicates the name of the Backup custom resource from which to recover the instance. +Defaults to an empty PersistentVolume if unspecified. +

+ +

+Note: +- Only full physical backups are supported for multi-replica Components (e.g., ‘xtrabackup’ for MySQL). +- Logical backups (e.g., ‘mysqldump’ for MySQL) are unsupported in the current version. +

+ +
+ +`sourceBackupTargetName`
+ +string + + +
+ +(Optional) + +

+When multiple source targets exist of the backup, you must specify the source target to restore. +

+ +
+ +`restoreEnv`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Defines container environment variables for the restore process. +merged with the ones specified in the Backup and ActionSet resources. +

+ +

+Merge priority: Restore env > Backup env > ActionSet env. +

+ +

+Purpose: Some databases require different configurations when being restored as a standby +compared to being restored as a primary. +For example, when restoring MySQL as a replica, you need to set `skip_slave_start="ON"` for 5.7 +or `skip_replica_start="ON"` for 8.0. +Allowing environment variables to be passed in makes it more convenient to control these behavioral differences +during the restore process. +

+ +
+

+Reconfigure + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+Reconfigure defines the parameters for updating a Component’s configuration. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +[]ParameterPair + + + +
+ +(Optional) + +

+Specifies a list of key-value pairs representing parameters and their corresponding values +within a single configuration file. +This field is used to override or set the values of parameters without modifying the entire configuration file. +

+ +
+

+RefNamespaceName + +

+ +

+ +(Appears on:BackupRefSpec, PointInTimeRefSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ +(Optional) + +

+Refers to the specific name of the resource. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Refers to the specific namespace of the resource. +

+ +
+

+ReplicaChanger + +

+ +

+ +(Appears on:ScaleIn, ScaleOut) + +

+
+ +

+ReplicaChanger defines the parameters for changing the number of replicas. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`replicaChanges`
+ +int32 + + +
+ + +

+Specifies the replica changes for the component. +

+ +
+ +`instances`
+ + +[]InstanceReplicasTemplate + + + +
+ +(Optional) + +

+Modifies the desired replicas count for existing InstanceTemplate. +if the inst +

+ +
+

+Restore + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`backupName`
+ +string + + +
+ + +

+Specifies the name of the Backup custom resource. +

+ +
+ +`backupNamespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the backup custom resource. If not specified, the namespace of the opsRequest will be used. +

+ +
+ +`restorePointInTime`
+ +string + + +
+ + +

+Specifies the point in time to which the restore should be performed. +Supported time formats: +

+
    +
  • +RFC3339 format, e.g. “2023-11-25T18:52:53Z” +
  • +
  • +A human-readable date-time format, e.g. “Jul 25,2023 18:52:53 UTC+0800” +
  • +
+ +
+ +`env`
+ + +[]Kubernetes core/v1.EnvVar + + + +
+ +(Optional) + +

+Specifies a list of environment variables to be set in the container. +

+ +
+ +`volumeRestorePolicy`
+ +string + + +
+ + +

+Specifies the policy for restoring volume claims of a Component’s Pods. +It determines whether the volume claims should be restored sequentially (one by one) or in parallel (all at once). +Support values: +

+
    +
  • +“Serial” +
  • +
  • +“Parallel” +
  • +
+ +
+ +`deferPostReadyUntilClusterRunning`
+ +bool + + +
+ + +

+Controls the timing of PostReady actions during the recovery process. +

+ +

+If false (default), PostReady actions execute when the Component reaches the “Running” state. +If true, PostReady actions are delayed until the entire Cluster is “Running,” +ensuring the cluster’s overall stability before proceeding. +

+ +

+This setting is useful for coordinating PostReady operations across the Cluster for optimal cluster conditions. +

+ +
+ +`parameters`
+ +[]github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1.ParameterPair + + +
+ +(Optional) + +

+Specifies a list of name-value pairs representing parameters and their corresponding values. +Parameters match the schema specified in the `actionset.spec.parametersSchema` +

+ +
+

+Rule + +

+ +

+ +(Appears on:PreCondition) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`expression`
+ +string + + +
+ + +

+Specifies a Go template expression that determines how the operation can be executed. +The return value must be either `true` or `false`. +Available built-in objects that can be referenced in the expression include: +

+
    +
  • +`params`: Input parameters. +
  • +
  • +`cluster`: The referenced Cluster object. +
  • +
  • +`component`: The referenced Component object. +
  • +
+ +
+ +`message`
+ +string + + +
+ + +

+Specifies the error or status message reported if the `expression` does not evaluate to `true`. +

+ +
+

+ScaleIn + +

+ +

+ +(Appears on:HorizontalScaling) + +

+
+ +

+ScaleIn defines the configuration for a scale-in operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ReplicaChanger`
+ + +ReplicaChanger + + + +
+ + +

+ +(Members of `ReplicaChanger` are embedded into this type.) + +

+ +

+Modifies the replicas of the component and instance templates. +

+ +
+ +`onlineInstancesToOffline`
+ +[]string + + +
+ +(Optional) + +

+Specifies the instance names that need to be taken offline. +

+ +
+

+ScaleOut + +

+ +

+ +(Appears on:HorizontalScaling) + +

+
+ +

+ScaleOut defines the configuration for a scale-out operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ReplicaChanger`
+ + +ReplicaChanger + + + +
+ + +

+ +(Members of `ReplicaChanger` are embedded into this type.) + +

+ +

+Modifies the replicas of the component and instance templates. +

+ +
+ +`newInstances`
+ +[]github.com/apecloud/kubeblocks/apis/apps/v1.InstanceTemplate + + +
+ +(Optional) + +

+Defines the configuration for new instances added during scaling, including resource requirements, labels, annotations, etc. +New instances are created based on the provided instance templates. +

+ +
+ +`offlineInstancesToOnline`
+ +[]string + + +
+ +(Optional) + +

+Specifies the instances in the offline list to bring back online. +

+ +
+

+SpecificOpsRequest + +

+ +

+ +(Appears on:OpsRequestSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`upgrade`
+ + +Upgrade + + + +
+ +(Optional) + +

+Specifies the desired new version of the Cluster. +

+ +

+Note: This field is immutable once set. +

+ +
+ +`horizontalScaling`
+ + +[]HorizontalScaling + + + +
+ +(Optional) + +

+Lists HorizontalScaling objects, each specifying scaling requirements for a Component, +including desired replica changes, configurations for new instances, modifications for existing instances, +and take offline/online the specified instances. +

+ +
+ +`volumeExpansion`
+ + +[]VolumeExpansion + + + +
+ +(Optional) + +

+Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates +that requires storage expansion. +

+ +
+ +`start`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be started. If empty, all components will be started. +

+ +
+ +`stop`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be stopped. If empty, all components will be stopped. +

+ +
+ +`restart`
+ + +[]ComponentOps + + + +
+ +(Optional) + +

+Lists Components to be restarted. +

+ +
+ +`switchover`
+ + +[]Switchover + + + +
+ +(Optional) + +

+Lists Switchover objects, each specifying a Component to perform the switchover operation. +

+ +
+ +`verticalScaling`
+ + +[]VerticalScaling + + + +
+ +(Optional) + +

+Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. +

+ +
+ +`reconfigures`
+ + +[]Reconfigure + + + +
+ +(Optional) + +

+Lists Reconfigure objects, each specifying a Component and its configuration updates. +

+ +
+ +`expose`
+ + +[]Expose + + + +
+ +(Optional) + +

+Lists Expose objects, each specifying a Component and its services to be exposed. +

+ +
+ +`backup`
+ + +Backup + + + +
+ +(Optional) + +

+Specifies the parameters to back up a Cluster. +

+ +
+ +`restore`
+ + +Restore + + + +
+ +(Optional) + +

+Specifies the parameters to restore a Cluster. +Note that this restore operation will roll back cluster services. +

+ +
+ +`rebuildFrom`
+ + +[]RebuildInstance + + + +
+ +(Optional) + +

+Specifies the parameters to rebuild some instances. +Rebuilding an instance involves restoring its data from a backup or another database replica. +The instances being rebuilt usually serve as standby in the cluster. +Hence, rebuilding instances is often also referred to as “standby reconstruction”. +

+ +
+ +`custom`
+ + +CustomOps + + + +
+ +(Optional) + +

+Specifies a custom operation defined by OpsDefinition. +

+ +
+

+Switchover + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component as defined in the cluster.spec. +

+ +
+ +`componentObjectName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Component object. +

+ +
+ +`instanceName`
+ +string + + +
+ + +

+Specifies the instance whose role will be transferred. A typical usage is to transfer the leader role +in a consensus system. +

+ +
+ +`candidateName`
+ +string + + +
+ +(Optional) + +

+If CandidateName is specified, the role will be transferred to this instance. +The name must match one of the pods in the component. +Refer to ComponentDefinition’s Swtichover lifecycle action for more details. +

+ +
+

+TypedObjectRef + +

+ +

+ +(Appears on:OpsResourceModifierAction) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiGroup`
+ +string + + +
+ + +

+Specifies the group for the resource being referenced. +If not specified, the referenced Kind must belong to the core API group. +For all third-party types, this is mandatory. +

+ +
+ +`kind`
+ +string + + +
+ + +

+Specifies the type of resource being referenced. +

+ +
+ +`name`
+ +string + + +
+ + +

+Indicates the name of the resource being referenced. +

+ +
+

+UpdatedParameters + +

+
+ +

+UpdatedParameters holds details about the modifications made to configuration parameters. +Example: +

+
+
+updatedParameters:
+	updatedKeys:
+	  my.cnf: '{"mysqld":{"max_connections":"100"}}'
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`addedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Maps newly added configuration files to their content. +

+ +
+ +`deletedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Lists the name of configuration files that have been deleted. +

+ +
+ +`updatedKeys`
+ +map[string]string + + +
+ +(Optional) + +

+Maps the name of configuration files to their updated content, detailing the changes made. +

+ +
+

+Upgrade + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+Upgrade defines the parameters for an upgrade operation. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`components`
+ + +[]UpgradeComponent + + + +
+ +(Optional) + +

+Lists components to be upgrade based on desired ComponentDefinition and ServiceVersion. +From the perspective of cluster API, the reasonable combinations should be: +1. (comp-def, service-ver) - upgrade to the specified service version and component definition, the user takes the responsibility to ensure that they are compatible. +2. (“”, service-ver) - upgrade to the specified service version, let the operator choose the latest compatible component definition. +3. (comp-def, “”) - upgrade to the specified component definition, let the operator choose the latest compatible service version. +4. (“”, “”) - upgrade to the latest service version and component definition, the operator will ensure the compatibility between the selected versions. +

+ +
+

+UpgradeComponent + +

+ +

+ +(Appears on:Upgrade) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`componentDefinitionName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the ComponentDefinition, only exact matches are supported. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+Specifies the version of the Service expected to be provisioned by this Component. +Referring to the ServiceVersion defined by the ComponentDefinition and ComponentVersion. +And ServiceVersion in ClusterComponentSpec is optional, when no version is specified, +use the latest available version in ComponentVersion. +

+ +
+

+VerticalScaling + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+VerticalScaling refers to the process of adjusting compute resources (e.g., CPU, memory) allocated to a Component. +It defines the parameters required for the operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`ResourceRequirements`
+ + +Kubernetes core/v1.ResourceRequirements + + + +
+ + +

+ +(Members of `ResourceRequirements` are embedded into this type.) + +

+ +

+Defines the desired compute resources of the Component’s instances. +

+ +
+ +`instances`
+ + +[]InstanceResourceTemplate + + + +
+ +(Optional) + +

+Specifies the desired compute resources of the instance template that need to vertical scale. +

+ +
+

+VolumeExpansion + +

+ +

+ +(Appears on:SpecificOpsRequest) + +

+
+ +

+VolumeExpansion encapsulates the parameters required for a volume expansion operation. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ComponentOps`
+ + +ComponentOps + + + +
+ + +

+ +(Members of `ComponentOps` are embedded into this type.) + +

+ +

+Specifies the name of the Component. +

+ +
+ +`volumeClaimTemplates`
+ + +[]OpsRequestVolumeClaimTemplate + + + +
+ + +

+Specifies a list of OpsRequestVolumeClaimTemplate objects, defining the volumeClaimTemplates +that are used to expand the storage and the desired storage size for each one. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/api-reference/parameters.mdx b/docs/zh/preview/user_docs/references/api-reference/parameters.mdx new file mode 100644 index 00000000..9db568a9 --- /dev/null +++ b/docs/zh/preview/user_docs/references/api-reference/parameters.mdx @@ -0,0 +1,5770 @@ +--- +title: Parameters API Reference +description: Parameters API Reference +keywords: [parameters, api] +sidebar_position: 3 +sidebar_label: Parameters +--- +
+ +

+Packages: +

+ +

parameters.kubeblocks.io/v1alpha1

+Resource Types: + +

+ComponentParameter + +

+
+ +

+ComponentParameter is the Schema for the componentparameters API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ComponentParameter` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ComponentParameterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigTemplateItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigTemplateItemDetail objects. +

+ +

+Each ConfigTemplateItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigTemplateItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+ +
+ +`status`
+ + +ComponentParameterStatus + + + +
+ + +
+

+ParamConfigRenderer + +

+
+ +

+ParamConfigRenderer is the Schema for the paramconfigrenderers API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ParamConfigRenderer` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParamConfigRendererSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`componentDef`
+ +string + + +
+ + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`parametersDefs`
+ +[]string + + +
+ +(Optional) + +

+Specifies the ParametersDefinition custom resource (CR) that defines the Component parameter’s schema and behavior. +

+ +
+ +`configs`
+ + +[]ComponentConfigDescription + + + +
+ +(Optional) + +

+Specifies the configuration files. +

+ +
+ +
+ +`status`
+ + +ParamConfigRendererStatus + + + +
+ + +
+

+Parameter + +

+
+ +

+Parameter is the Schema for the parameters API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`Parameter` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParameterSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`componentParameters`
+ + +[]ComponentParametersSpec + + + +
+ + +

+Lists ComponentParametersSpec objects, each specifying a Component and its parameters and template updates. +

+ +
+ +
+ +`status`
+ + +ParameterStatus + + + +
+ + +
+

+ParametersDefinition + +

+
+ +

+ParametersDefinition is the Schema for the parametersdefinitions API +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`apiVersion`
+string +
+ +`parameters.kubeblocks.io/v1alpha1` + +
+ +`kind`
+string + +
+`ParametersDefinition` +
+ +`metadata`
+ + +Kubernetes meta/v1.ObjectMeta + + + +
+ +Refer to the Kubernetes API documentation for the fields of the +`metadata` field. + +
+ +`spec`
+ + +ParametersDefinitionSpec + + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`fileName`
+ +string + + +
+ +(Optional) + +

+Specifies the config file name in the config template. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`deletedPolicy`
+ + +ParameterDeletedPolicy + + + +
+ +(Optional) + +

+Specifies the policy when parameter be removed. +

+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+ +
+ +`status`
+ + +ParametersDefinitionStatus + + + +
+ + +
+

+AutoTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+AutoTrigger automatically perform the reload when specified conditions are met. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`processName`
+ +string + + +
+ +(Optional) + +

+The name of the process. +

+ +
+

+CfgFileFormat +(`string` alias) +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+CfgFileFormat defines formatter of configuration files. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"dotenv" +

+
+ +
+ +

+"hcl" +

+
+ +
+ +

+"ini" +

+
+ +
+ +

+"json" +

+
+ +
+ +

+"properties" +

+
+ +
+ +

+"props-plus" +

+
+ +
+ +

+"redis" +

+
+ +
+ +

+"toml" +

+
+ +
+ +

+"xml" +

+
+ +
+ +

+"yaml" +

+
+ +
+

+ComponentConfigDescription + +

+ +

+ +(Appears on:ParamConfigRendererSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the config file name in the config template. +

+ +
+ +`templateName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the referenced componentTemplateSpec. +

+ +
+ +`fileFormatConfig`
+ + +FileFormatConfig + + + +
+ +(Optional) + +

+Specifies the format of the configuration file and any associated parameters that are specific to the chosen format. +Supported formats include `ini`, `xml`, `yaml`, `json`, `hcl`, `dotenv`, `properties`, and `toml`. +

+ +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +

+Example: +

+
+
+fileFormatConfig:
+ format: ini
+ iniConfig:
+   sectionName: mysqld
+
+
+ +
+ +`reRenderResourceTypes`
+ + +[]RerenderResourceType + + + +
+ +(Optional) + +

+Specifies whether the configuration needs to be re-rendered after v-scale or h-scale operations to reflect changes. +

+ +

+In some scenarios, the configuration may need to be updated to reflect the changes in resource allocation +or cluster topology. Examples: +

+
    +
  • +Redis: adjust maxmemory after v-scale operation. +
  • +
  • +MySQL: increase max connections after v-scale operation. +
  • +
  • +Zookeeper: update zoo.cfg with new node addresses after h-scale operation. +
  • +
+ +
+

+ComponentParameterSpec + +

+ +

+ +(Appears on:ComponentParameter) + +

+
+ +

+ComponentParameterSpec defines the desired state of ComponentConfiguration +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ +(Optional) + +

+Specifies the name of the Cluster that this configuration is associated with. +

+ +
+ +`componentName`
+ +string + + +
+ + +

+Represents the name of the Component that this configuration pertains to. +

+ +
+ +`configItemDetails`
+ + +[]ConfigTemplateItemDetail + + + +
+ +(Optional) + +

+ConfigItemDetails is an array of ConfigTemplateItemDetail objects. +

+ +

+Each ConfigTemplateItemDetail corresponds to a configuration template, +which is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+The ConfigTemplateItemDetail includes information such as: +

+
    +
  • +The configuration template (a ConfigMap) +
  • +
  • +The corresponding ConfigConstraint (constraints and validation rules for the configuration) +
  • +
  • +Volume mounts (for mounting the configuration files) +
  • +
+ +
+

+ComponentParameterStatus + +

+ +

+ +(Appears on:ComponentParameter) + +

+
+ +

+ComponentParameterStatus defines the observed state of ComponentConfiguration +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Provides detailed status information for opsRequest. +

+ +
+ +`configurationStatus`
+ + +[]ConfigTemplateItemDetailStatus + + + +
+ + +

+Provides the status of each component undergoing reconfiguration. +

+ +
+

+ComponentParameters +(`map[string]*string` alias) +

+ +

+ +(Appears on:ComponentParametersSpec) + +

+
+
+

+ComponentParametersSpec + +

+ +

+ +(Appears on:ParameterSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`parameters`
+ + +ComponentParameters + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template or parameters. +

+ +
+ +`userConfigTemplates`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+

+ComponentReconfiguringStatus + +

+ +

+ +(Appears on:ParameterStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentName`
+ +string + + +
+ + +

+Specifies the name of the Component. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`parameterStatus`
+ + +[]ReconfiguringStatus + + + +
+ + +

+Describes the status of the component reconfiguring. +

+ +
+

+ConfigTemplateExtension + +

+ +

+ +(Appears on:ComponentParametersSpec, ConfigTemplateItemDetail, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`templateRef`
+ +string + + +
+ + +

+Specifies the name of the referenced configuration template ConfigMap object. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace of the referenced configuration template ConfigMap object. +An empty namespace is equivalent to the “default” namespace. +

+ +
+ +`policy`
+ + +MergedPolicy + + + +
+ +(Optional) + +

+Defines the strategy for merging externally imported templates into component templates. +

+ +
+

+ConfigTemplateItemDetail + +

+ +

+ +(Appears on:ComponentParameterSpec) + +

+
+ +

+ConfigTemplateItemDetail corresponds to settings of a configuration template (a ConfigMap). +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Defines the unique identifier of the configuration template. +

+ +

+It must be a string of maximum 63 characters, and can only include lowercase alphanumeric characters, +hyphens, and periods. +The name must start and end with an alphanumeric character. +

+ +
+ +`payload`
+ + +Payload + + + +
+ +(Optional) + +

+External controllers can trigger a configuration rerender by modifying this field. +

+ +

+Note: Currently, the `payload` field is opaque and its content is not interpreted by the system. +Modifying this field will cause a rerender, regardless of the specific content of this field. +

+ +
+ +`configSpec`
+ +github.com/apecloud/kubeblocks/apis/apps/v1.ComponentFileTemplate + + +
+ +(Optional) + +

+Specifies the name of the configuration template (a ConfigMap), ConfigConstraint, and other miscellaneous options. +

+ +

+The configuration template is a ConfigMap that contains multiple configuration files. +Each configuration file is stored as a key-value pair within the ConfigMap. +

+ +

+ConfigConstraint allows defining constraints and validation rules for configuration parameters. +It ensures that the configuration adheres to certain requirements and limitations. +

+ +
+ +`userConfigTemplates`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+ +`configFileParams`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ParametersInFile + + + +
+ +(Optional) + +

+Specifies the user-defined configuration parameters. +

+ +

+When provided, the parameter values in `configFileParams` override the default configuration parameters. +This allows users to override the default configuration according to their specific needs. +

+ +
+

+ConfigTemplateItemDetailStatus + +

+ +

+ +(Appears on:ComponentParameterStatus, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the configuration template. It is a required field and must be a string of maximum 63 characters. +The name should only contain lowercase alphanumeric characters, hyphens, or periods. It should start and end with an alphanumeric character. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`lastDoneRevision`
+ +string + + +
+ +(Optional) + +

+Represents the last completed revision of the configuration item. This field is optional. +

+ +
+ +`updateRevision`
+ +string + + +
+ +(Optional) + +

+Represents the updated revision of the configuration item. This field is optional. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. This field is optional. +

+ +
+ +`reconcileDetail`
+ + +ReconcileDetail + + + +
+ +(Optional) + +

+Provides detailed information about the execution of the configuration change. This field is optional. +

+ +
+

+DownwardAPIChangeTriggeredAction + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+DownwardAPIChangeTriggeredAction defines an action that triggers specific commands in response to changes in Pod labels. +For example, a command might be executed when the ‘role’ label of the Pod is updated. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the field. It must be a string of maximum length 63. +The name should match the regex pattern `^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$`. +

+ +
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the mount point of the Downward API volume. +

+ +
+ +`items`
+ + +[]Kubernetes core/v1.DownwardAPIVolumeFile + + + +
+ + +

+Represents a list of files under the Downward API volume. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be triggered when changes are detected in Downward API volume files. +It relies on the inotify mechanism in the config-manager sidecar to monitor file changes. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the DownwardAction to perform specific tasks or configurations. +

+ +
+

+DynamicParameterSelectedPolicy +(`string` alias) +

+
+ +

+DynamicParameterSelectedPolicy determines how to select the parameters of dynamic reload actions +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"all" +

+
+ +
+ +

+"dynamic" +

+
+ +
+

+DynamicReloadType +(`string` alias) +

+
+ +

+DynamicReloadType defines reload method. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"auto" +

+
+ +
+ +

+"http" +

+
+ +
+ +

+"sql" +

+
+ +
+ +

+"exec" +

+
+ +
+ +

+"tpl" +

+
+ +
+ +

+"signal" +

+
+ +
+

+FileFormatConfig + +

+ +

+ +(Appears on:ComponentConfigDescription) + +

+
+ +

+FileFormatConfig specifies the format of the configuration file and any associated parameters +that are specific to the chosen format. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`FormatterAction`
+ + +FormatterAction + + + +
+ + +

+ +(Members of `FormatterAction` are embedded into this type.) + +

+(Optional) + +

+Each format may have its own set of parameters that can be configured. +For instance, when using the `ini` format, you can specify the section name. +

+ +
+ +`format`
+ + +CfgFileFormat + + + +
+ + +

+The config file format. Valid values are `ini`, `xml`, `yaml`, `json`, +`hcl`, `dotenv`, `properties` and `toml`. Each format has its own characteristics and use cases. +

+ + +
+

+FormatterAction + +

+ +

+ +(Appears on:FileFormatConfig) + +

+
+ +

+FormatterAction configures format-specific options for different configuration file format. +Note: Only one of its members should be specified at any given time. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`iniConfig`
+ + +IniConfig + + + +
+ +(Optional) + +

+Holds options specific to the ‘ini’ file format. +

+ +
+

+IniConfig + +

+ +

+ +(Appears on:FormatterAction) + +

+
+ +

+IniConfig holds options specific to the ‘ini’ file format. +

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+ +`sectionName`
+ +string + + +
+ +(Optional) + +

+A string that describes the name of the ini section. +

+ +
+

+MergedPolicy +(`string` alias) +

+ +

+ +(Appears on:ConfigTemplateExtension) + +

+
+ +

+MergedPolicy defines how to merge external imported templates into component templates. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"none" +

+
+ +
+ +

+"add" +

+
+ +
+ +

+"patch" +

+
+ +
+ +

+"replace" +

+
+ +
+

+ParamConfigRendererSpec + +

+ +

+ +(Appears on:ParamConfigRenderer) + +

+
+ +

+ParamConfigRendererSpec defines the desired state of ParamConfigRenderer +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`componentDef`
+ +string + + +
+ + +

+Specifies the ComponentDefinition custom resource (CR) that defines the Component’s characteristics and behavior. +

+ +
+ +`serviceVersion`
+ +string + + +
+ +(Optional) + +

+ServiceVersion specifies the version of the Service expected to be provisioned by this Component. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/). +If no version is specified, the latest available version will be used. +

+ +
+ +`parametersDefs`
+ +[]string + + +
+ +(Optional) + +

+Specifies the ParametersDefinition custom resource (CR) that defines the Component parameter’s schema and behavior. +

+ +
+ +`configs`
+ + +[]ComponentConfigDescription + + + +
+ +(Optional) + +

+Specifies the configuration files. +

+ +
+

+ParamConfigRendererStatus + +

+ +

+ +(Appears on:ParamConfigRenderer) + +

+
+ +

+ParamConfigRendererStatus defines the observed state of ParamConfigRenderer +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the ParamsDesc object that has been observed by the controller. +

+ +
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides additional information about the current phase. +

+ +
+ +`phase`
+ + +ParametersDescPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to PDAvailablePhase, the ParamsDesc can be referenced by ComponentDefinition. +

+ +
+

+ParameterDeletedMethod +(`string` alias) +

+ +

+ +(Appears on:ParameterDeletedPolicy) + +

+
+ +

+ParameterDeletedMethod defines how to handle parameter remove +

+
+ + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"RestoreToDefault" +

+
+ +
+ +

+"Reset" +

+
+ +
+

+ParameterDeletedPolicy + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`deletedMethod`
+ + +ParameterDeletedMethod + + + +
+ + +

+Specifies the method to handle the deletion of a parameter. +If set to “RestoreToDefault”, the parameter will be restored to its default value, +which requires engine support, such as pg. +If set to “Reset”, the parameter will be re-rendered through the configuration template. +

+ +
+ +`defaultValue`
+ +string + + +
+ +(Optional) + +

+Specifies the value to use if DeletedMethod is RestoreToDefault. +Example: pg +SET configuration_parameter TO DEFAULT; +

+ +
+

+ParameterPhase +(`string` alias) +

+ +

+ +(Appears on:ComponentParameterStatus, ComponentReconfiguringStatus, ConfigTemplateItemDetailStatus, ParameterStatus) + +

+
+ +

+ParameterPhase defines the Configuration FSM phase +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Creating" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"FailedAndPause" +

+
+ +
+ +

+"FailedAndRetry" +

+
+ +
+ +

+"Finished" +

+
+ +
+ +

+"Init" +

+
+ +
+ +

+"MergeFailed" +

+
+ +
+ +

+"Merged" +

+
+ +
+ +

+"Pending" +

+
+ +
+ +

+"Running" +

+
+ +
+ +

+"Upgrading" +

+
+ +
+

+ParameterSpec + +

+ +

+ +(Appears on:Parameter) + +

+
+ +

+ParameterSpec defines the desired state of Parameter +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`clusterName`
+ +string + + +
+ + +

+Specifies the name of the Cluster resource that this operation is targeting. +

+ +
+ +`componentParameters`
+ + +[]ComponentParametersSpec + + + +
+ + +

+Lists ComponentParametersSpec objects, each specifying a Component and its parameters and template updates. +

+ +
+

+ParameterStatus + +

+ +

+ +(Appears on:Parameter) + +

+
+ +

+ParameterStatus defines the observed state of Parameter +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`message`
+ +string + + +
+ +(Optional) + +

+Provides a description of any abnormal status. +

+ +
+ +`phase`
+ + +ParameterPhase + + + +
+ +(Optional) + +

+Indicates the current status of the configuration item. +

+ +

+Possible values include “Creating”, “Init”, “Running”, “Pending”, “Merged”, “MergeFailed”, “FailedAndPause”, +“Upgrading”, “Deleting”, “FailedAndRetry”, “Finished”. +

+ +
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+Represents the latest generation observed for this +ClusterDefinition. It corresponds to the ConfigConstraint’s generation, which is +updated by the API Server. +

+ +
+ +`componentReconfiguringStatus`
+ + +[]ComponentReconfiguringStatus + + + +
+ +(Optional) + +

+Records the status of a reconfiguring operation if `opsRequest.spec.type` equals to “Reconfiguring”. +

+ +
+

+ParametersDefinitionSpec + +

+ +

+ +(Appears on:ParametersDefinition) + +

+
+ +

+ParametersDefinitionSpec defines the desired state of ParametersDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`fileName`
+ +string + + +
+ +(Optional) + +

+Specifies the config file name in the config template. +

+ +
+ +`parametersSchema`
+ + +ParametersSchema + + + +
+ +(Optional) + +

+Defines a list of parameters including their names, default values, descriptions, +types, and constraints (permissible values or the range of valid values). +

+ +
+ +`reloadAction`
+ + +ReloadAction + + + +
+ +(Optional) + +

+Specifies the dynamic reload (dynamic reconfiguration) actions supported by the engine. +When set, the controller executes the scripts defined in these actions to handle dynamic parameter updates. +

+ +

+Dynamic reloading is triggered only if both of the following conditions are met: +

+
    +
  1. +The modified parameters are listed in the `dynamicParameters` field. +If `dynamicParameterSelectedPolicy` is set to “all”, modifications to `staticParameters` +can also trigger a reload. +
  2. +
  3. +`reloadAction` is set. +
  4. +
+ +

+If `reloadAction` is not set or the modified parameters are not listed in `dynamicParameters`, +dynamic reloading will not be triggered. +

+ +

+Example: +

+
+
+dynamicReloadAction:
+ tplScriptTrigger:
+   namespace: kb-system
+   scriptConfigMapRef: mysql-reload-script
+   sync: true
+
+
+ +
+ +`downwardAPIChangeTriggeredActions`
+ + +[]DownwardAPIChangeTriggeredAction + + + +
+ +(Optional) + +

+Specifies a list of actions to execute specified commands based on Pod labels. +

+ +

+It utilizes the K8s Downward API to mount label information as a volume into the pod. +The ‘config-manager’ sidecar container watches for changes in the role label and dynamically invoke +registered commands (usually execute some SQL statements) when a change is detected. +

+ +

+It is designed for scenarios where: +

+
    +
  • +Replicas with different roles have different configurations, such as Redis primary & secondary replicas. +
  • +
  • +After a role switch (e.g., from secondary to primary), some changes in configuration are needed +to reflect the new role. +
  • +
+ +
+ +`deletedPolicy`
+ + +ParameterDeletedPolicy + + + +
+ +(Optional) + +

+Specifies the policy when parameter be removed. +

+ +
+ +`mergeReloadAndRestart`
+ +bool + + +
+ +(Optional) + +

+Indicates whether to consolidate dynamic reload and restart actions into a single restart. +

+
    +
  • +If true, updates requiring both actions will result in only a restart, merging the actions. +
  • +
  • +If false, updates will trigger both actions executed sequentially: first dynamic reload, then restart. +
  • +
+ +

+This flag allows for more efficient handling of configuration changes by potentially eliminating +an unnecessary reload step. +

+ +
+ +`reloadStaticParamsBeforeRestart`
+ +bool + + +
+ +(Optional) + +

+Configures whether the dynamic reload specified in `reloadAction` applies only to dynamic parameters or +to all parameters (including static parameters). +

+
    +
  • +false (default): Only modifications to the dynamic parameters listed in `dynamicParameters` +will trigger a dynamic reload. +
  • +
  • +true: Modifications to both dynamic parameters listed in `dynamicParameters` and static parameters +listed in `staticParameters` will trigger a dynamic reload. +The “all” option is for certain engines that require static parameters to be set +via SQL statements before they can take effect on restart. +
  • +
+ +
+ +`staticParameters`
+ +[]string + + +
+ +(Optional) + +

+List static parameters. +Modifications to any of these parameters require a restart of the process to take effect. +

+ +
+ +`dynamicParameters`
+ +[]string + + +
+ +(Optional) + +

+List dynamic parameters. +Modifications to these parameters trigger a configuration reload without requiring a process restart. +

+ +
+ +`immutableParameters`
+ +[]string + + +
+ +(Optional) + +

+Lists the parameters that cannot be modified once set. +Attempting to change any of these parameters will be ignored. +

+ +
+

+ParametersDefinitionStatus + +

+ +

+ +(Appears on:ParametersDefinition) + +

+
+ +

+ParametersDefinitionStatus defines the observed state of ParametersDefinition +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`observedGeneration`
+ +int64 + + +
+ +(Optional) + +

+The most recent generation number of the ParamsDesc object that has been observed by the controller. +

+ +
+ +`phase`
+ + +ParametersDescPhase + + + +
+ +(Optional) + +

+Specifies the status of the configuration template. +When set to PDAvailablePhase, the ParamsDesc can be referenced by ComponentDefinition. +

+ +
+ +`conditions`
+ + +[]Kubernetes meta/v1.Condition + + + +
+ +(Optional) + +

+Represents a list of detailed status of the ParametersDescription object. +

+ +

+This field is crucial for administrators and developers to monitor and respond to changes within the ParametersDescription. +It provides a history of state transitions and a snapshot of the current state that can be used for +automated logic or direct inspection. +

+ +
+

+ParametersDescPhase +(`string` alias) +

+ +

+ +(Appears on:ParamConfigRendererStatus, ParametersDefinitionStatus) + +

+
+ +

+ParametersDescPhase defines the ParametersDescription CR .status.phase +

+
+ + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"Available" +

+
+ +
+ +

+"Deleting" +

+
+ +
+ +

+"Unavailable" +

+
+ +
+

+ParametersInFile + +

+ +

+ +(Appears on:ConfigTemplateItemDetail, ReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`content`
+ +string + + +
+ +(Optional) + +

+Holds the configuration keys and values. This field is a workaround for issues found in kubebuilder and code-generator. +Refer to https://github.com/kubernetes-sigs/kubebuilder/issues/528 and https://github.com/kubernetes/code-generator/issues/50 for more details. +

+ +

+Represents the content of the configuration file. +

+ +
+ +`parameters`
+ +map[string]*string + + +
+ +(Optional) + +

+Represents the updated parameters for a single configuration file. +

+ +
+

+ParametersSchema + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+ParametersSchema Defines a list of configuration items with their names, default values, descriptions, +types, and constraints. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`topLevelKey`
+ +string + + +
+ +(Optional) + +

+Specifies the top-level key in the ‘configSchema.cue’ that organizes the validation rules for parameters. +This key must exist within the CUE script defined in ‘configSchema.cue’. +

+ +
+ +`cue`
+ +string + + +
+ +(Optional) + +

+Hold a string that contains a script written in CUE language that defines a list of configuration items. +Each item is detailed with its name, default value, description, type (e.g. string, integer, float), +and constraints (permissible values or the valid range of values). +

+ +

+CUE (Configure, Unify, Execute) is a declarative language designed for defining and validating +complex data configurations. +It is particularly useful in environments like K8s where complex configurations and validation rules are common. +

+ +

+This script functions as a validator for user-provided configurations, ensuring compliance with +the established specifications and constraints. +

+ +
+ +`schemaInJSON`
+ + +Kubernetes api extensions v1.JSONSchemaProps + + + +
+ + +

+Generated from the ‘cue’ field and transformed into a JSON format. +

+ +
+

+Payload +(`map[string]encoding/json.RawMessage` alias) +

+ +

+ +(Appears on:ConfigTemplateItemDetail) + +

+
+ +

+Payload holds the payload data. This field is optional and can contain any type of data. +Not included in the JSON representation of the object. +

+
+

+ReconcileDetail + +

+ +

+ +(Appears on:ConfigTemplateItemDetailStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`policy`
+ +string + + +
+ +(Optional) + +

+Represents the policy applied during the most recent execution. +

+ +
+ +`execResult`
+ +string + + +
+ +(Optional) + +

+Represents the outcome of the most recent execution. +

+ +
+ +`currentRevision`
+ +string + + +
+ +(Optional) + +

+Represents the current revision of the configuration item. +

+ +
+ +`succeedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the number of pods where configuration changes were successfully applied. +

+ +
+ +`expectedCount`
+ +int32 + + +
+ +(Optional) + +

+Represents the total number of pods that require execution of configuration changes. +

+ +
+ +`errMessage`
+ +string + + +
+ +(Optional) + +

+Represents the error message generated when the execution of configuration changes fails. +

+ +
+

+ReconfiguringStatus + +

+ +

+ +(Appears on:ComponentReconfiguringStatus) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ConfigTemplateItemDetailStatus`
+ + +ConfigTemplateItemDetailStatus + + + +
+ + +

+ +(Members of `ConfigTemplateItemDetailStatus` are embedded into this type.) + +

+ +
+ +`updatedParameters`
+ + +map[string]github.com/apecloud/kubeblocks/apis/parameters/v1alpha1.ParametersInFile + + + +
+ +(Optional) + +

+Contains the updated parameters. +

+ +
+ +`userConfigTemplates`
+ + +ConfigTemplateExtension + + + +
+ +(Optional) + +

+Specifies the user-defined configuration template. +

+ +

+When provided, the `importTemplateRef` overrides the default configuration template +specified in `configSpec.templateRef`. +This allows users to customize the configuration template according to their specific requirements. +

+ +
+

+ReloadAction + +

+ +

+ +(Appears on:ParametersDefinitionSpec) + +

+
+ +

+ReloadAction defines the mechanisms available for dynamically reloading a process within K8s without requiring a restart. +

+ +

+Only one of the mechanisms can be specified at a time. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`unixSignalTrigger`
+ + +UnixSignalTrigger + + + +
+ +(Optional) + +

+Used to trigger a reload by sending a specific Unix signal to the process. +

+ +
+ +`shellTrigger`
+ + +ShellTrigger + + + +
+ +(Optional) + +

+Allows to execute a custom shell script to reload the process. +

+ +
+ +`tplScriptTrigger`
+ + +TPLScriptTrigger + + + +
+ +(Optional) + +

+Enables reloading process using a Go template script. +

+ +
+ +`autoTrigger`
+ + +AutoTrigger + + + +
+ +(Optional) + +

+Automatically perform the reload when specified conditions are met. +

+ +
+ +`targetPodSelector`
+ + +Kubernetes meta/v1.LabelSelector + + + +
+ +(Optional) + +

+Used to match labels on the pod to determine whether a dynamic reload should be performed. +

+ +

+In some scenarios, only specific pods (e.g., primary replicas) need to undergo a dynamic reload. +The `reloadedPodSelector` allows you to specify label selectors to target the desired pods for the reload process. +

+ +

+If the `reloadedPodSelector` is not specified or is nil, all pods managed by the workload will be considered for the dynamic +reload. +

+ +
+

+ReloadPolicy +(`string` alias) +

+
+ +

+ReloadPolicy defines the policy of reconfiguring. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"asyncReload" +

+
+ +
+ +

+"dynamicReloadBeginRestart" +

+
+ +
+ +

+"none" +

+
+ +
+ +

+"restartContainer" +

+
+ +
+ +

+"restart" +

+
+ +
+ +

+"rolling" +

+
+ +
+ +

+"syncReload" +

+
+ +
+

+RerenderResourceType +(`string` alias) +

+ +

+ +(Appears on:ComponentConfigDescription) + +

+
+ +

+RerenderResourceType defines the resource requirements for a component. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"hscale" +

+
+ +
+ +

+"tls" +

+
+ +
+ +

+"vscale" +

+
+ +
+ +

+"shardingHScale" +

+
+ +
+

+ScriptConfig + +

+ +

+ +(Appears on:DownwardAPIChangeTriggeredAction, ShellTrigger, TPLScriptTrigger) + +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`scriptConfigMapRef`
+ +string + + +
+ + +

+Specifies the reference to the ConfigMap containing the scripts. +

+ +
+ +`namespace`
+ +string + + +
+ +(Optional) + +

+Specifies the namespace for the ConfigMap. +If not specified, it defaults to the “default” namespace. +

+ +
+

+ShellTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+ShellTrigger allows to execute a custom shell script to reload the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`command`
+ +[]string + + +
+ + +

+Specifies the command to execute in order to reload the process. It should be a valid shell command. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines the synchronization mode of parameter updates with “config-manager”. +

+
    +
  • +‘True’: Executes reload actions synchronously, pausing until completion. +
  • +
  • +‘False’: Executes reload actions asynchronously, without waiting for completion. +
  • +
+ +
+ +`batchReload`
+ +bool + + +
+ +(Optional) + +

+Controls whether parameter updates are processed individually or collectively in a batch: +

+
    +
  • +‘True’: Processes all changes in one batch reload. +
  • +
  • +‘False’: Processes each change individually. +
  • +
+ +

+Defaults to ‘False’ if unspecified. +

+ +
+ +`batchParamsFormatterTemplate`
+ +string + + +
+ +(Optional) + +

+Specifies a Go template string for formatting batch input data. +It’s used when `batchReload` is ‘True’ to format data passed into STDIN of the script. +The template accesses key-value pairs of updated parameters via the ‘$’ variable. +This allows for custom formatting of the input data. +

+ +

+Example template: +

+
+
+batchParamsFormatterTemplate: |-
+{{- range $pKey, $pValue := $ }}
+{{ printf "%s:%s" $pKey $pValue }}
+{{- end }}
+
+
+ +

+This example generates batch input data in a key:value format, sorted by keys. +

+
+
+key1:value1
+key2:value2
+key3:value3
+
+
+ +

+If not specified, the default format is key=value, sorted by keys, for each updated parameter. +

+
+
+key1=value1
+key2=value2
+key3=value3
+
+
+ +
+ +`toolsSetup`
+ + +ToolsSetup + + + +
+ +(Optional) + +

+Specifies the tools container image used by ShellTrigger for dynamic reload. +If the dynamic reload action is triggered by a ShellTrigger, this field is required. +This image must contain all necessary tools for executing the ShellTrigger scripts. +

+ +

+Usually the specified image is referenced by the init container, +which is then responsible for copy the tools from the image to a bin volume. +This ensures that the tools are available to the ‘config-manager’ sidecar. +

+ +
+ +`scriptConfig`
+ + +ScriptConfig + + + +
+ +(Optional) + +

+ScriptConfig object specifies a ConfigMap that contains script files that should be mounted inside the pod. +The scripts are mounted as volumes and can be referenced and executed by the dynamic reload. +

+ +
+

+SignalType +(`string` alias) +

+ +

+ +(Appears on:UnixSignalTrigger) + +

+
+ +

+SignalType defines which signals are valid. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueDescription
+ +

+"SIGABRT" +

+
+ +
+ +

+"SIGALRM" +

+
+ +
+ +

+"SIGBUS" +

+
+ +
+ +

+"SIGCHLD" +

+
+ +
+ +

+"SIGCONT" +

+
+ +
+ +

+"SIGFPE" +

+
+ +
+ +

+"SIGHUP" +

+
+ +
+ +

+"SIGILL" +

+
+ +
+ +

+"SIGINT" +

+
+ +
+ +

+"SIGIO" +

+
+ +
+ +

+"SIGKILL" +

+
+ +
+ +

+"SIGPIPE" +

+
+ +
+ +

+"SIGPROF" +

+
+ +
+ +

+"SIGPWR" +

+
+ +
+ +

+"SIGQUIT" +

+
+ +
+ +

+"SIGSEGV" +

+
+ +
+ +

+"SIGSTKFLT" +

+
+ +
+ +

+"SIGSTOP" +

+
+ +
+ +

+"SIGSYS" +

+
+ +
+ +

+"SIGTERM" +

+
+ +
+ +

+"SIGTRAP" +

+
+ +
+ +

+"SIGTSTP" +

+
+ +
+ +

+"SIGTTIN" +

+
+ +
+ +

+"SIGTTOU" +

+
+ +
+ +

+"SIGURG" +

+
+ +
+ +

+"SIGUSR1" +

+
+ +
+ +

+"SIGUSR2" +

+
+ +
+ +

+"SIGVTALRM" +

+
+ +
+ +

+"SIGWINCH" +

+
+ +
+ +

+"SIGXCPU" +

+
+ +
+ +

+"SIGXFSZ" +

+
+ +
+

+TPLScriptTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+TPLScriptTrigger Enables reloading process using a Go template script. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`ScriptConfig`
+ + +ScriptConfig + + + +
+ + +

+ +(Members of `ScriptConfig` are embedded into this type.) + +

+ +

+Specifies the ConfigMap that contains the script to be executed for reload. +

+ +
+ +`sync`
+ +bool + + +
+ +(Optional) + +

+Determines whether parameter updates should be synchronized with the “config-manager”. +Specifies the controller’s reload strategy: +

+
    +
  • +If set to ‘True’, the controller executes the reload action in synchronous mode, +pausing execution until the reload completes. +
  • +
  • +If set to ‘False’, the controller executes the reload action in asynchronous mode, +updating the ConfigMap without waiting for the reload process to finish. +
  • +
+ +
+

+ToolConfig + +

+ +

+ +(Appears on:ToolsSetup) + +

+
+ +

+ToolConfig specifies the settings of an init container that prepare tools for dynamic reload. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`name`
+ +string + + +
+ + +

+Specifies the name of the init container. +

+ +
+ +`asContainerImage`
+ +bool + + +
+ +(Optional) + +

+Indicates whether the tool image should be used as the container image for a sidecar. +This is useful for large tool images, such as those for C++ tools, which may depend on +numerous libraries (e.g., *.so files). +

+ +

+If enabled, the tool image is deployed as a sidecar container image. +

+ +

+Examples: +

+
+
+ toolsSetup::
+   mountPoint: /kb_tools
+   toolConfigs:
+     - name: kb-tools
+       asContainerImage: true
+       image:  apecloud/oceanbase:4.2.0.0-100010032023083021
+
+
+ +

+generated containers: +

+
+
+initContainers:
+ - name: install-config-manager-tool
+   image: apecloud/kubeblocks-tools:${version}
+   command:
+   - cp
+   - /bin/config_render
+   - /opt/tools
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+containers:
+ - name: config-manager
+   image: apecloud/oceanbase:4.2.0.0-100010032023083021
+   imagePullPolicy: IfNotPresent
+	  command:
+   - /opt/tools/reloader
+   - --log-level
+   - info
+   - --operator-update-enable
+   - --tcp
+   - "9901"
+   - --config
+   - /opt/config-manager/config-manager.yaml
+   volumemounts:
+   - name: kb-tools
+     mountpath: /opt/tools
+
+
+ +
+ +`image`
+ +string + + +
+ +(Optional) + +

+Specifies the tool container image. +

+ +
+ +`command`
+ +[]string + + +
+ +(Optional) + +

+Specifies the command to be executed by the init container. +

+ +
+

+ToolsSetup + +

+ +

+ +(Appears on:ShellTrigger) + +

+
+ +

+ToolsSetup prepares the tools for dynamic reloads used in ShellTrigger from a specified container image. +

+ +

+Example: +

+
+
+
+toolsSetup:
+	 mountPoint: /kb_tools
+	 toolConfigs:
+	   - name: kb-tools
+	     command:
+	       - cp
+	       - /bin/ob-tools
+	       - /kb_tools/obtools
+	     image: docker.io/apecloud/obtools
+
+
+ +

+This example copies the “/bin/ob-tools” binary from the image to “/kb_tools/obtools”. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`mountPoint`
+ +string + + +
+ + +

+Specifies the directory path in the container where the tools-related files are to be copied. +This field is typically used with an emptyDir volume to ensure a temporary, empty directory is provided at pod creation. +

+ +
+ +`toolConfigs`
+ + +[]ToolConfig + + + +
+ +(Optional) + +

+Specifies a list of settings of init containers that prepare tools for dynamic reload. +

+ +
+

+UnixSignalTrigger + +

+ +

+ +(Appears on:ReloadAction) + +

+
+ +

+UnixSignalTrigger is used to trigger a reload by sending a specific Unix signal to the process. +

+
+ + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ +`signal`
+ + +SignalType + + + +
+ + +

+Specifies a valid Unix signal to be sent. +For a comprehensive list of all Unix signals, see: ../../pkg/configuration/configmap/handler.go:allUnixSignals +

+ +
+ +`processName`
+ +string + + +
+ + +

+Identifies the name of the process to which the Unix signal will be sent. +

+ +
+
+ +

+ +Generated with `gen-crd-api-reference-docs` + +

\ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/install-addons.mdx b/docs/zh/preview/user_docs/references/install-addons.mdx new file mode 100644 index 00000000..c9c7312d --- /dev/null +++ b/docs/zh/preview/user_docs/references/install-addons.mdx @@ -0,0 +1,170 @@ +--- +description: 使用 Helm 安装 KubeBlocks 插件 +keywords: +- addon +- helm +- KubeBlocks +sidebar_label: 安装插件 +sidebar_position: 3 +title: 安装插件 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 安装插件 + +随着 KubeBlocks v0.8.0 的发布,插件已从 KubeBlocks 中解耦,部分插件默认不再安装。如需使用这些插件,需先通过索引安装。若您卸载了某些插件,可按照本教程步骤重新安装。 + +本教程以 elasticsearch 为例,您可将 elasticsearch 替换为您需要的插件名称。 + +官方索引仓库为 [KubeBlocks index](https://github.com/apecloud/block-index),插件维护在 [KubeBlocks Addon repo](https://github.com/apecloud/kubeblocks-addons)。 + +:::重要提示 + +请确保插件与 KubeBlocks 的主版本号一致。 + +例如,您可以在 KubeBlocks v0.9.2 上安装 v0.9.0 版本的插件,但若主版本不匹配(如在 KubeBlocks v0.9.2 上使用 v0.8.0 版插件)可能导致错误。 + +::: + + + + + +1. (可选)添加 KubeBlocks 仓库。若已通过 Helm 安装 KubeBlocks,直接运行 `helm repo update` 即可。 + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update + ``` + +2. 查看插件版本。 + + ```bash + helm search repo kubeblocks/elasticsearch --devel --versions + ``` + +3. 安装插件(以 elasticsearch 为例)。通过 `--version` 指定版本。 + + ```bash + helm install kb-addon-es kubeblocks/elasticsearch --namespace kb-system --create-namespace --version + ``` + +4. 验证插件是否安装成功。 + + 当 STATUS 显示为 `deployed` 时表示安装成功。 + + ```bash + helm list -A + > + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + ... + kb-addon-es kb-system 1 2024-11-27 10:04:59.730127 +0800 CST deployed elasticsearch-0.9.0 8.8.2 + ``` + +5. (可选)可通过以下命令卸载插件。 + + 若已创建相关集群,请先删除集群。 + + ```bash + helm uninstall kb-addon-es --namespace kb-system + ``` + + + + + +1. 查看索引。 + + kbcli 默认创建名为 `kubeblocks` 的索引,可通过 `kbcli addon index list` 确认索引是否存在。 + + ```bash + kbcli addon index list + > + INDEX URL + kubeblocks https://github.com/apecloud/block-index.git + ``` + + 若列表为空或需添加自定义索引,可通过 `kbcli addon index add <索引名称> <源地址>` 手动添加。例如: + + ```bash + kbcli addon index add kubeblocks https://github.com/apecloud/block-index.git + ``` + + 若不确认索引是否为最新版本,可执行更新: + + ```bash + kbcli addon index update kubeblocks + ``` + +2. (可选)搜索插件是否存在于索引中。 + + ```bash + kbcli addon search elasticsearch + > + ADDON VERSION INDEX + elasticsearch 0.7.0 kubeblocks + elasticsearch 0.7.1 kubeblocks + elasticsearch 0.7.2 kubeblocks + elasticsearch 0.8.0 kubeblocks + elasticsearch 0.9.0 kubeblocks + ``` + +3. 安装插件。 + + 若插件存在多个索引源或版本,可通过 `--index` 指定索引,`--version` 指定版本。默认会安装 `kubeblocks` 索引中的最新版本。 + + ```bash + kbcli addon install elasticsearch --index kubeblocks --version + ``` + +4. 验证插件是否安装成功。 + + 当 STATUS 显示为 `Enabled` 时表示安装成功。 + + ```bash + kbcli addon list + > + NAME VERSION PROVIDER STATUS AUTO-INSTALL + elasticsearch 0.9.0 apecloud Enabled true + ``` + +5. (可选)可通过以下命令禁用插件。 + + 若已创建相关集群,请先删除集群。 + + ```bash + kbcli addon disable elasticsearch + ``` + + 或通过以下命令完全卸载插件: + + ```bash + kbcli addon uninstall elasticsearch + ``` + +:::注意 + +kbcli 支持启用/禁用插件。您可根据实际需求操作。 +此外,通过 kbcli 安装 KubeBlocks 时,部分插件默认安装但处于禁用状态(状态显示为 `Disabled`)。可通过 kbcli 启用它们。例如: +::: + + 6. 禁用/启用插件 + + * 启用插件 + + ```bash + kbcli addon enable qdrant + ``` + + * 禁用插件 + + ```bash + kbcli addon disable qdrant + ``` + +操作完成后,可通过插件列表确认状态是否按预期变更。 + + + + \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/install-kbcli.mdx b/docs/zh/preview/user_docs/references/install-kbcli.mdx new file mode 100644 index 00000000..f7ce342a --- /dev/null +++ b/docs/zh/preview/user_docs/references/install-kbcli.mdx @@ -0,0 +1,285 @@ +--- +description: KubeBlocks集群管理工具kbcli的安装、验证与配置完整指南 +keywords: +- kbcli +- KubeBlocks CLI +- installation +- configuration +- command-line +sidebar_label: 安装 kbcli +sidebar_position: 4 +title: 安装并配置 KubeBlocks CLI (kbcli) +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# KubeBlocks 命令行工具 (kbcli) + +kbcli 是管理 KubeBlocks 集群的官方命令行工具,提供以下功能: +- 集群生命周期管理(创建、扩缩容、删除) +- 配置和故障排查工具 +- 版本兼容性检查 +- Shell 自动补全支持 + +## 先决条件 + +安装 kbcli 前,请确保系统满足以下要求: + +- **所有平台**: + - 可访问网络以下载安装包 + - 管理员/sudo 权限 +- **Windows**: + - PowerShell 5.0 或更高版本 +- **macOS/Linux**: + - 已安装 curl 或 wget + - macOS 需安装 Homebrew(用于 brew 安装方式) + +## 安装 kbcli + +**支持平台** + +kbcli 支持以下平台: +- **macOS**(Intel 和 Apple Silicon 芯片) +- **Windows**(x86-64 架构) +- **Linux**(x86-64 和 ARM64 架构) + + + + +选择您偏好的安装方式: + +- **curl**(推荐大多数用户使用) +- **Homebrew**(macOS 包管理器) + +**选项 1:使用 curl 安装** + +安装最新稳定版: + +```bash +curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash +``` + +安装特定版本: + +1. 查看 [kbcli 发布页](https://github.com/apecloud/kbcli/releases/) 获取可用版本 +2. 使用 `-s` 参数指定版本后运行以下命令 + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s + ``` + +:::note +**版本兼容性说明** + +- kbcli 版本需与 KubeBlocks 部署版本匹配 +- 新安装建议使用最新稳定版 +- 现有部署需严格版本匹配: + - KubeBlocks v1.0.0 → kbcli v1.0.x + - KubeBlocks v0.9.x → kbcli v0.9.x +- 版本不匹配可能导致操作异常 +::: + +2. 运行 `kbcli version` 检查版本号,确认安装成功 + +:::tip +**故障排查** +若安装失败: +1. 检查网络连接 +2. 验证防火墙/代理设置 +::: + +**选项 2:使用 Homebrew 安装** + +1. 添加 ApeCloud 的 Homebrew 仓库 + + ```bash + brew tap apecloud/tap + ``` + +2. 安装 kbcli + + ```bash + brew install kbcli + ``` + + 如需安装指定版本: + + ```bash + # 查看可用版本 + brew search kbcli + + # 指定版本安装 + brew install kbcli@ + ``` + +3. 验证安装是否成功 + + ```bash + kbcli -h + ``` + + + + + +选择安装方式: + +**选项 1:脚本安装(推荐)** + +:::note + +默认安装路径为 C:\Program Files\kbcli-windows-amd64 且不可修改 + +如需自定义路径,请使用 zip 安装包 + +::: + +1. 以**管理员身份**运行 PowerShell 并执行 `Set-ExecutionPolicy Unrestricted` +2. 安装 kbcli + + 以下脚本会自动在 C:\Program Files\kbcli-windows-amd64 添加环境变量 + + ```bash + powershell -Command " & ([scriptblock]::Create((iwr https://www.kubeblocks.io/installer/install_cli.ps1)))" + ``` + + 安装指定版本时,在命令后添加 `-v` 参数: + + ```bash + powershell -Command " & ([scriptblock]::Create((iwr https://www.kubeblocks.io/installer/install_cli.ps1))) -v 0.5.2" + ``` + +**选项 2:手动安装** + +1. 从 [kbcli 发布页](https://github.com/apecloud/kbcli/releases/) 下载 zip 安装包 +2. 解压后添加至环境变量: + 1. 点击 Windows 图标选择**系统设置** + 2. 进入**设置** -> **相关设置** -> **高级系统设置** + 3. 在**高级**选项卡点击**环境变量** + 4. 在用户变量和系统变量中**新建**添加 kbcli 安装路径 + 5. 点击**应用**并**确定** + + + + + +使用 `curl` 安装: + +1. 安装 kbcli + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash + ``` + + 安装特定版本: + + 1. 查看 [kbcli 发布页](https://github.com/apecloud/kbcli/releases/) 获取可用版本 + 2. 使用 `-s` 参数指定版本: + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s + ``` + +2. 运行 `kbcli version` 检查版本号,确认安装成功 + +:::note + +- 如安装过程出现超时异常,请检查网络设置后重试 + +::: + + + + +## 卸载 + +完全移除 kbcli 及其配置: + + + + + +`curl` 安装方式执行: + +```bash +sudo rm /usr/local/bin/kbcli +``` + +`brew` 安装方式执行: + +```bash +brew uninstall kbcli +``` + +kbcli 会在 HOME 目录下创建隐藏文件夹 `~/.kbcli` 存储配置信息和临时文件,卸载后可删除此文件夹 + + + + + +1. 前往 `kbcli` 安装路径删除文件夹: + * 脚本安装方式:删除 `C:\Program Files\kbcli-windows-amd64` + * 自定义路径:删除对应安装文件夹 + +2. 删除环境变量: + 1. 点击 Windows 图标进入**系统** + 2. 进入**设置** -> **相关设置** -> **高级系统设置** + 3. 在**高级**选项卡点击**环境变量** + 4. 在**用户变量**或**系统变量**列表中双击 **Path**: + * 脚本安装方式:双击**用户变量**中的 Path + * 自定义路径:根据之前创建位置选择对应 Path + 5. 选择 `C:\Program Files\kbcli-windows-amd64` 或自定义路径删除(需二次确认) + +3. 删除 `.kbcli` 文件夹: + kbcli 会在 C:\Users\用户名 目录下创建 `.kbcli` 文件夹存储配置信息,卸载后可删除 + + + + + +使用 `curl` 安装方式的卸载命令: + +```bash +sudo rm /usr/local/bin/kbcli +``` + +kbcli 会在 HOME 目录下创建隐藏文件夹 `~/.kbcli` 存储配置信息,卸载后可删除此文件夹 + + + + + +## Shell 自动补全 + +kbcli 支持以下 shell 的自动补全: +- bash +- zsh +- fish +- PowerShell + +```bash +# 将 SHELL-TYPE 替换为 bash/fish/PowerShell/zsh 之一 +kbcli completion SHELL-TYPE -h +``` + +例如为 zsh 启用自动补全: + +***操作步骤:*** + +1. 查看使用指南 + + ```bash + kbcli completion zsh -h + ``` + +2. 先启用终端的补全功能 + + ```bash + echo "autoload -U compinit; compinit" >> ~/.zshrc + ``` + +3. 启用 `kbcli` 自动补全功能 + + ```bash + echo "source <(kbcli completion zsh); compdef _kbcli kbcli" >> ~/.zshrc + ``` \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/install-minio.mdx b/docs/zh/preview/user_docs/references/install-minio.mdx new file mode 100644 index 00000000..91dc5ee6 --- /dev/null +++ b/docs/zh/preview/user_docs/references/install-minio.mdx @@ -0,0 +1,55 @@ +--- +description: KubeBlocks集群管理之MinIO安装完整指南 +keywords: +- MinIO +- installation +- configuration +- command-line +sidebar_label: 安装 MinIO +sidebar_position: 10 +title: 安装 MinIO +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + +# 安装 MinIO + +如果您没有云服务商提供的对象存储服务,可以在 Kubernetes 中部署开源服务 MinIO 并用于配置 BackupRepo。 + +1. 在 `kb-system` 命名空间安装 MinIO。 + + ```bash + helm install minio oci://registry-1.docker.io/bitnamicharts/minio --namespace kb-system --create-namespace --set "extraEnvVars[0].name=MINIO_BROWSER_LOGIN_ANIMATION" --set "extraEnvVars[0].value=off" --version 14.10.5 + ``` + + 获取初始用户名和密码: + + ```bash + # 初始用户名 + echo $(kubectl get secret --namespace kb-system minio -o jsonpath="{.data.root-user}" | base64 -d) + + # 初始密码 + echo $(kubectl get secret --namespace kb-system minio -o jsonpath="{.data.root-password}" | base64 -d) + ``` + +2. 生成访问凭证。 + + 执行 `kubectl port-forward --namespace kb-system svc/minio 9001:9001` 后访问 `127.0.0.1:9001` 进入登录页面。 + + 登录仪表板后,可以生成 `access key` 和 `secret key`。 + + ![backup-and-restore-backup-repo-1](/img/docs/en/backup-and-restore-backup-repo-1.png) + +3. 创建存储桶。 + + 为测试创建一个名为 `test-minio` 的存储桶。 + + ![backup-and-restore-backup-repo-2](/img/docs/en/backup-and-restore-backup-repo-2.png) + ![backup-and-restore-backup-repo-3](/img/docs/en/backup-and-restore-backup-repo-3.png) + + :::note + + 安装的 MinIO 访问地址(endpoint)为 `http://minio.kb-system.svc.cluster.local:9000`。其中 `kb-system` 是安装 MinIO 的命名空间名称。 + + ::: \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/install-snapshot-controller.mdx b/docs/zh/preview/user_docs/references/install-snapshot-controller.mdx new file mode 100644 index 00000000..108e65c5 --- /dev/null +++ b/docs/zh/preview/user_docs/references/install-snapshot-controller.mdx @@ -0,0 +1,78 @@ +--- +description: 安装快照控制器 +keywords: +- kbcli +- kubeblocks +- SnapshotController +- K8s +- CSI +sidebar_label: 快照控制器 +sidebar_position: 5 +title: 安装快照控制器 +--- +# 安装快照控制器 + +快照控制器(Snapshot Controller)用于管理CSI存储卷快照,支持创建、恢复和删除持久卷(PV)快照。KubeBlocks的数据保护控制器(DataProtection Controller)依赖该组件实现数据库快照操作。 + +**步骤1:检查先决条件** +验证所需CRD是否存在: + +```bash +kubectl get crd volumesnapshotclasses.snapshot.storage.k8s.io +kubectl get crd volumesnapshots.snapshot.storage.k8s.io +kubectl get crd volumesnapshotcontents.snapshot.storage.k8s.io +``` + +如果集群中缺少这些CRD,需要先安装: + +```bash +# v8.2.0是external-snapshotter的最新版本,可按需替换为其他版本 +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +``` + +:::note + +**可选安装** + +如果不需要快照备份功能,可以仅安装CRD并跳过后续步骤。 + +::: + +**步骤2:部署快照控制器** + +通过Helm执行安装: + +```bash +helm repo add piraeus-charts https://piraeus.io/helm-charts/ +helm repo update +# 将命名空间修改为适合您环境的值(例如kb-system) +helm install snapshot-controller piraeus-charts/snapshot-controller -n kb-system --create-namespace +``` + +高级配置选项请参考[快照控制器文档](https://artifacthub.io/packages/helm/piraeus-charts/snapshot-controller#configuration)。 + +**步骤3:验证部署** + +检查快照控制器Pod是否正常运行: + +```bash +kubectl get pods -n kb-system | grep snapshot-controller +``` + +
+ +预期输出 + +```bash +snapshot-controller-xxxx-yyyy 1/1 Running 0 30s +``` + +
+ +如果Pod处于CrashLoopBackOff状态,可查看日志: + +```bash +kubectl logs -n kb-system deployment/snapshot-controller +``` \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/kubeblocks_options.mdx b/docs/zh/preview/user_docs/references/kubeblocks_options.mdx new file mode 100644 index 00000000..68cdfa66 --- /dev/null +++ b/docs/zh/preview/user_docs/references/kubeblocks_options.mdx @@ -0,0 +1,211 @@ +--- +description: KubeBlocks 选项与角色 +keywords: +- kubeblocks +- options +- roles +sidebar_position: 8 +title: KubeBlocks 选项与角色 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# KubeBlocks 选项与角色 + +## KubeBlocks 配置选项 + +### KubeBlocks 基础选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| image.registry | KubeBlocks 镜像仓库地址 | apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com | +| image.repository | KubeBlocks 镜像仓库名称 | apecloud/kubeblocks | +| image.pullPolicy | 镜像拉取策略 | IfNotPresent | +| image.tag | 镜像标签,默认使用 chart 的 appVersion | "" | +| image.imagePullSecrets | 镜像拉取密钥 | [] | +| image.tools.repository | 工具镜像仓库名称 | apecloud/kubeblocks-tools | +| replicaCount | 副本数量 | 1 | +| reconcileWorkers | 协调工作线程数 | "" | + +### 数据保护选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| dataProtection.enabled | 启用数据保护控制器 | true | +| dataProtection.leaderElectId | 数据保护领导者选举ID | "" | +| dataProtection.encryptionKey | 备份加密密钥 | "" | +| dataProtection.encryptionKeySecretKeyRef.name | 加密密钥 Secret 名称 | "" | +| dataProtection.encryptionKeySecretKeyRef.key | 加密密钥 Secret 键名 | "" | +| dataProtection.encryptionKeySecretKeyRef.skipValidation | 跳过密钥验证 | false | +| dataProtection.enableBackupEncryption | 启用备份加密 | false | +| dataProtection.backupEncryptionAlgorithm | 备份加密算法,可选 "AES-128-CFB"、"AES-192-CFB"、"AES-256-CFB" | "" | +| dataProtection.gcFrequencySeconds | 垃圾回收频率(秒) | 3600 | +| dataProtection.reconcileWorkers | 备份控制器并发数 | "" | +| dataProtection.image.registry | 数据保护镜像仓库地址 | "" | +| dataProtection.image.repository | 数据保护镜像仓库名称 | | +| dataProtection.image.pullPolicy | 镜像拉取策略 | IfNotPresent | +| dataProtection.image.tag | 镜像标签 | "" | +| dataProtection.image.imagePullSecrets | 镜像拉取密钥 | [] | +| dataProtection.image.datasafed.repository | Datasafed 镜像仓库名称 | apecloud/datasafed | +| dataProtection.image.datasafed.tag | Datasafed 镜像标签 | 0.2.0 | + +### 备份仓库选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| backupRepo.create | 安装时创建备份仓库 | false | +| backupRepo.default | 将创建的仓库设为默认 | true | +| backupRepo.accessMethod | 备份仓库访问方式,可选 [Mount, Tool] | Tool | +| backupRepo.storageProvider | 仓库使用的存储提供商,可选 [s3, oss, minio] | "" | +| backupRepo.pvReclaimPolicy | PV 回收策略,可选 [Retain, Delete] | Retain | +| backupRepo.volumeCapacity | 创建 PVC 的容量 | "" | +| backupRepo.config.bucket | 存储桶名称 | "" | +| backupRepo.config.endpoint | 存储端点地址 | "" | +| backupRepo.config.region | 存储区域 | "" | +| backupRepo.secrets.accessKeyId | 存储访问密钥ID | "" | +| backupRepo.secrets.secretAccessKey | 存储访问密钥 | "" | + +### 插件选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| addonController.enabled | 启用插件控制器(需要 `cluster-admin` ClusterRole) | true | +| addonController.jobTTL | 插件作业的存活时间(time.Duration 格式) | 5m | +| addonController.jobImagePullPolicy | 插件安装作业的镜像拉取策略 | IfNotPresent | +| keepAddons | 卸载 chart 时保留 Addon CR 对象 | true | +| addonChartLocationBase | KubeBlocks 官方插件 chart 基础路径。离线环境下,若 URL 以 "file://" 开头,KubeBlocks 将使用从 addonChartsImage 复制的 Helm charts | file:// | +| addonChartsImage.registry | 插件 charts 镜像仓库地址(未指定时使用 image.registry) | "" | +| addonChartsImage.repository | 插件 charts 镜像仓库名称 | apecloud/kubeblocks-charts | +| addonChartsImage.pullPolicy | 镜像拉取策略 | IfNotPresent | +| addonChartsImage.tag | 镜像标签 | "" | +| addonChartsImage.chartsPath | 插件 charts 镜像中的 Helm charts 路径 | /charts | +| addonChartsImage.pullSecrets | 镜像拉取密钥 | [] | +| addonHelmInstallOptions | 插件 Helm 安装选项 | ["--atomic", "--cleanup-on-fail", "--wait", "--insecure-skip-tls-verify"] | +| upgradeAddons | 升级 chart 时升级插件。设为 false 可防止 chart 升级时插件 CR 被更新 | false | +| autoInstalledAddons | 安装和升级时自动安装的插件列表 | ["apecloud-mysql", "etcd", "kafka", "mongodb", "mysql", "postgresql", "qdrant", "redis", "rabbitmq"] | + +### 控制器选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| controllers.apps.enabled | 启用应用控制器 | true | +| controllers.workloads.enabled | 启用工作负载控制器 | true | +| controllers.operations.enabled | 启用操作控制器 | true | +| controllers.experimental.enabled | 启用实验性控制器 | false | +| controllers.trace.enabled | 启用追踪控制器 | false | + +### 特性开关选项 +| 参数 | 描述 | 默认值 | +|----------|------|--------| +| featureGates.inPlacePodVerticalScaling.enabled | 启用原地 Pod 垂直扩缩容 | false | + +更新配置选项可使用以下命令: + + + + +1. 安装 +```bash +helm install kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --create-namespace \ + --version {{VERSION}} \ + --set optionName=optionValue +``` + +1. 升级 +```bash +helm upgrade kubeblocks kubeblocks/kubeblocks \ + --namespace kb-system \ + --version {{VERSION}} \ + --set optionName=optionValue +``` + + + + + +1. 安装 +```bash +kbcli kubeblocks install \ + --version={{VERSION}} \ + --create-namespace \ + --set optionName=optionValue +``` + +1. 升级 +```bash +kbcli kubeblocks upgrade \ + --version={{VERSION}} \ + --set optionName=optionValue +``` + + + + +## KubeBlocks Operator RBAC 权限说明 +KubeBlocks operator 需要以下权限才能正常工作。 + +### 1. Kubernetes 资源权限 +**主要权限包括:** + +#### 核心集群权限: +- **节点(Node)**: `list`, `watch` +- **Pod**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch`, `exec`, `log` +- **服务(Service)**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **ConfigMap**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **Secret**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **ServiceAccount**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **PersistentVolumeClaim**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **PersistentVolume**: `get`, `list`, `patch`, `update`, `watch` +- **事件(Event)**: `create`, `get`, `list`, `patch`, `watch` + +#### 应用资源权限: +- **部署(Deployment)**: `get`, `list`, `watch` +- **StatefulSet**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **Job**: `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` +- **CronJob**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### 存储相关权限: +- **StorageClass**: `create`, `delete`, `get`, `list`, `watch` +- **CSIDriver**: `get`, `list`, `watch` +- **VolumeSnapshot**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` +- **VolumeSnapshotClass**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### RBAC 权限: +- **Role**: `get`, `list`, `watch` +- **RoleBinding**: `create`, `delete`, `get`, `list`, `patch`, `update`, `watch` + +#### 协调机制权限: +- **Lease**: `create`, `get`, `list`, `patch`, `update`, `watch` + +#### 认证代理权限 +- **TokenReview**: `create` +- **SubjectAccessReview**: `create` + +### 2. KubeBlocks 自定义资源权限 + +- **apps.kubeblocks.io** API 组: **ClusterDefinition**, **Cluster**, **ComponentDefinition**, **Component**, **ComponentVersion**, **Rollout**, - **ServiceDescriptor**, **ShardingDefinition**, **SidecarDefinition** +- **dataprotection.kubeblocks.io** API 组: **ActionSet**, **BackupPolicy**, **BackupPolicyTemplate**, **BackupRepo**, **Backup**, **BackupSchedule**, - **Restore**, **StorageProvider** +- **operations.kubeblocks.io** API 组: **OpsDefinition**, **OpsRequest** +- **parameters.kubeblocks.io** API 组: **ComponentParameter**, **ParamConfigRenderer**, **Parameter**, **ParameterDefinition** +- **experimental.kubeblocks.io** API 组: **NodeCountScaler** +- **extensions.kubeblocks.io** API 组: **Addon** +- **trace.kubeblocks.io** API 组: **ReconciliationTrace** +- **workloads.kubeblocks.io** API 组: **InstanceSet** + +### 3. 条件性权限 + +**数据保护功能 (dataProtection.enabled=true):** +- 备份相关权限 + +**Webhook 转换功能 (webhooks.conversionEnabled=true):** +- **CustomResourceDefinition**: `create`, `get`, `list`, `patch`, `update`, `watch` +- **Deployment**: 额外的部署管理权限 + +**插件控制器 (addonControllerEnabled=true):** +- **cluster-admin**: 完整的集群管理员权限 + +:::note + +插件控制器需要 `cluster-admin` ClusterRole。 +如果不想授予此权限,可以在安装 KubeBlocks 时设置 `addonController.enabled=false`。 + +禁用后,仍可以通过 `helm` 方式[安装插件](../install_addons)。 + +::: \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/kubernetes_and_operator_101.mdx b/docs/zh/preview/user_docs/references/kubernetes_and_operator_101.mdx new file mode 100644 index 00000000..57f37eba --- /dev/null +++ b/docs/zh/preview/user_docs/references/kubernetes_and_operator_101.mdx @@ -0,0 +1,135 @@ +--- +description: 关于Kubernetes你需要了解的事项 +keywords: +- K8s +- operator +- concept +sidebar_position: 7 +title: Kubernetes 与 Operator 基础入门 +--- +# Kubernetes 与 Operator 基础入门 + +## Kubernetes + +什么是Kubernetes?有人说它是一个容器编排系统,也有人将其描述为分布式操作系统,还有人视其为多云PaaS(平台即服务)平台,更有观点认为它是构建PaaS解决方案的基础平台。 + +本文将介绍Kubernetes中的核心概念与基础组件。 + + + +## Kubernetes 控制平面 + +Kubernetes 控制平面是 Kubernetes 的大脑和心脏。它负责管理集群的整体运行,包括处理 API 请求、存储配置数据以及确保集群达到期望状态。核心组件包括: + +- **API Server**:处理所有通信请求 +- **etcd**:存储所有集群数据 +- **Controller Manager**:确保系统维持期望状态 +- **Scheduler**:将工作负载分配到各个节点 +- **Cloud Controller Manager**:管理云平台特定的集成功能(如负载均衡器、存储和网络) + +这些组件协同工作,共同完成集群中容器的部署、扩缩容和管理任务。 + +## 节点 + +有人将Kubernetes比作分布式操作系统,能够管理众多节点。节点(Node)是集群中的工作单元,可以是物理机或虚拟机。每个节点都运行着关键服务,包括容器运行时(如Docker或containerd)、kubelet和kube-proxy。其中,kubelet确保容器按照Pod(Kubernetes中最小的可部署单元)中的定义运行;kube-proxy负责网络路由维护,管理网络规则,并实现Pod与服务之间的通信。节点为运行容器化应用提供所需的计算资源,由Kubernetes主节点(Master)统一管理,包括任务分发、节点健康状态监控以及维护集群的期望状态。 + +:::note + +在某些场景下,当同时讨论Kubernetes(K8s)和数据库时,"节点"这一术语可能引发混淆。在Kubernetes中,"节点"指的是参与集群的物理机或虚拟机,作为运行容器化应用的工作单元。但当数据库运行于Kubernetes内部时,"数据库节点"通常特指承载数据库实例的Pod。 + +在KubeBlocks文档中,"节点"一般指代数据库节点。若需指代Kubernetes节点,我们会明确标注为"K8s节点"以避免歧义。 + +::: + +## kubelet + +kubelet 是 Kubernetes 控制平面用来管理集群中每个节点的代理组件。它确保容器按照 Kubernetes 控制平面定义的规范在 Pod 中运行。kubelet 持续监控容器的状态,确保它们健康且按预期运行。如果某个容器发生故障,kubelet 会根据指定的策略尝试重启该容器。 + + + +## Pod + +在Kubernetes中,Pod可以类比为虚拟机,但更加轻量级且专用化。它是Kubernetes中最小的可部署单元。 + +Pod代表一个或多个紧密耦合、需要协同工作的容器,这些容器共享存储(存储卷)、网络资源以及运行容器的配置规范。这些容器可以通过localhost相互通信,并共享内存和存储等资源。 + +Kubernetes会动态管理Pod,确保它们按照规范运行,并在发生故障时自动重启或替换。Pod可以跨节点(Node)分布以实现冗余,这使得它成为在Kubernetes中部署和管理容器化应用(包括数据库)的基础单元。 + +## 存储类 + +当为 Pod 内部的工作负载(如数据库)创建磁盘时,您可能需要指定磁盘介质的类型,例如是 HDD 还是 SSD。在云环境中,通常会有更多选项可供选择。例如,AWS EBS 提供多种卷类型,如通用型 SSD(gp2/gp3)、预配置 IOPS SSD(io1/io2)和吞吐优化型 HDD(st1)。在 Kubernetes 中,您可以通过 StorageClass 来选择所需的磁盘类型。 + +## PVC + +在Kubernetes中,持久卷声明(Persistent Volume Claim,PVC)是用户对存储资源的请求。PVC本质上是一种申请具有特定属性存储的方式,这些属性包括存储类(storage class)、容量大小以及访问模式(例如读写或只读)。通过PVC,Pod可以使用存储资源而无需了解底层基础设施的具体细节。 + +在K8s中,用户通过创建PVC来使用存储资源。当PVC被创建时,Kubernetes会寻找与该请求匹配的存储类(StorageClass)。如果找到匹配的StorageClass,系统将根据定义的参数(无论是SSD、HDD、EBS还是NAS)自动配置存储资源。若PVC未指定StorageClass,Kubernetes则会使用默认的StorageClass(如果已配置)来分配存储空间。 + +## CSI(容器存储接口) + +在Kubernetes中,各类存储类(StorageClass)通过容器存储接口(CSI)提供,该接口负责为应用程序配置底层存储"磁盘"。CSI的功能类似于Kubernetes中的"磁盘驱动程序",使平台能够适配并整合多种存储系统,例如本地磁盘、AWS EBS和Ceph等。这些存储类及其关联的存储资源,均由特定的CSI驱动进行配置,这些驱动负责与底层存储基础设施的交互操作。 + +CSI是一种标准API,使得Kubernetes能够以统一且可扩展的方式与各类存储系统交互。由存储供应商或Kubernetes社区开发的CSI驱动,向Kubernetes暴露了动态配置、挂载、卸载和快照等核心存储功能。 + +当您在Kubernetes中定义存储类时,通常会指定一个CSI驱动作为其配置器(provisioner)。该驱动会根据存储类中的参数及关联的持久卷声明(PVC),自动配置持久卷(PV),确保为您的应用提供适当类型和配置的存储——无论是SSD、HDD还是其他类型。 + +## 持久卷(PV) + +在Kubernetes中,持久卷(Persistent Volume,简称PV)代表一种存储资源,其底层可由多种系统支持,包括本地磁盘、NFS或云存储(如AWS EBS、Google Cloud Persistent Disks等),通常由不同的CSI驱动管理。 + +PV拥有独立于Pod的生命周期,由Kubernetes控制平面管理。即使关联的Pod被删除,PV也能确保数据持久化。持久卷会与持久卷声明(PVC)进行绑定,PVC用于申请特定的存储特性(如容量大小和访问模式),从而保证应用程序获得所需的存储资源。 + +简而言之,PV是实际的存储资源,而PVC是对存储资源的申请。通过PVC中指定的StorageClass,可以将其与不同CSI驱动提供的PV进行绑定。 + +## 服务 + +在Kubernetes中,服务(Service)充当负载均衡器的角色。它定义了一组逻辑上的Pod,并提供了访问这些Pod的策略。由于Pod是临时性的,可以被动态创建和销毁,其IP地址并不稳定。服务通过提供一个稳定的网络端点(虚拟IP地址,称为ClusterIP)来解决这个问题,该端点始终保持不变,使得其他Pod或外部客户端无需知道具体Pod的IP地址就能与服务背后的Pod集合通信。 + +服务支持多种类型:ClusterIP(集群内部访问)、NodePort(通过`<节点IP>:<节点端口>`实现外部访问)、LoadBalancer(使用云提供商的负载均衡器对外暴露服务)以及ExternalName(将服务映射到外部DNS)。 + +## ConfigMap + +ConfigMap 用于以键值对的形式存储配置数据,使您能够将配置与应用程序代码解耦。通过这种方式,您可以独立管理应用设置,并在多个环境中复用这些配置。ConfigMap 可用于将配置数据以环境变量、命令行参数或配置文件的形式注入到 Pod 中。它提供了一种灵活便捷的方式来管理应用配置,无需将配置值硬编码到应用容器内部。 + + + +## Secret + +Secret 用于存储敏感数据,如密码、令牌或加密密钥。通过 Secret,您可以将机密信息与应用代码分离管理,避免在容器镜像中暴露敏感数据。Kubernetes Secret 可以以环境变量形式注入到 Pod 中,或作为文件挂载,确保敏感信息以安全可控的方式处理。 + +需要注意的是,Secret 默认并不加密——它们仅经过 base64 编码,这并不提供真正的加密保护。使用时仍需谨慎,确保配置适当的访问控制措施。 + +## CRD(自定义资源定义) + +若要通过 Kubernetes 管理数据库对象,您需要扩展 Kubernetes API 来描述所管理的数据库对象。这正是 CRD(Custom Resource Definition)机制的作用所在——它允许您定义与特定用例(如数据库集群或备份)相关的自定义资源,并像管理原生 Kubernetes 资源一样对其进行管理。 + + + +## 自定义资源(CR) + +自定义资源(Custom Resource,简称CR)是自定义资源定义(CRD)的一个实例。它代表了一种扩展Kubernetes API的特定配置或对象。通过CR,您可以使用Kubernetes原生工具来定义和管理自定义资源(如数据库或应用程序)。当CR创建后,Kubernetes控制器或Operator会持续监控该资源,并执行相应操作以维持其期望状态。 + +CRD与CR是开发Kubernetes Operator的基础。CRD通常用于实现自定义控制器或Operator,这些控制器会持续监听CR的变化(例如代表数据库集群的CR),并自动执行相应操作。 + +## 什么是 Kubernetes Operator? + +Kubernetes Operator 是一种软件,通常由一个或多个控制器组成,它通过将对自定义资源(CR)的修改转化为对原生 Kubernetes 对象(如 Pod、服务、PVC、ConfigMap 和 Secret)的操作,来自动化管理复杂应用。 + +- 输入:用户对 CR 的修改。 +- 输出:根据被管理应用的需求,对底层 Kubernetes 资源进行相应变更或与外部系统交互(例如写入数据库或调用 API)。 + +Operator 持续监控这些 Kubernetes 对象的状态。当发生变更时(例如 Pod 崩溃),Operator 会自动采取纠正措施,例如重新创建 Pod 或调整流量(例如更新服务端点)。 + +本质上,Kubernetes Operator 将复杂的运维知识封装为软件,自动化执行部署、扩缩容、升级和备份等任务,确保应用始终维持其期望状态而无需人工干预。 + +## Helm 与 Helm Chart + +Helm 是 Kubernetes 上流行的包管理工具,用于简化和统一应用程序的管理与部署。它将所有必需的 Kubernetes 资源打包成一个 Helm Chart,用户只需一条命令(`helm install`)即可完成应用安装。同时,Helm 还支持配置管理和版本更新(`helm upgrade`),使得应用程序的整个生命周期管理更加便捷。 + +Helm Chart 的核心组件包括: + +- **Templates(模板)**:包含占位符的 YAML 文件,用于定义 Kubernetes 资源(如 Pod、服务和 ConfigMap)。 +- **values.yaml**:用户在此文件中指定模板的默认值,实现快速定制。Helm 允许基于现有 Chart,通过 `values.yaml` 或命令行参数覆盖默认值,从而无需修改底层模板即可为不同环境提供特定配置。 +- **Chart.yaml**:描述 Chart 的元数据信息,包括名称、版本和说明等。 + +Helm 能够与 Jenkins、GitLab CI 和 GitHub Actions 等 CI/CD 工具无缝集成。通过将其纳入持续交付流水线,可实现自动化部署和回滚,确保应用程序在不同环境中保持一致的部署状态。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/prepare-a-local-k8s-cluster.mdx b/docs/zh/preview/user_docs/references/prepare-a-local-k8s-cluster.mdx new file mode 100644 index 00000000..609f7395 --- /dev/null +++ b/docs/zh/preview/user_docs/references/prepare-a-local-k8s-cluster.mdx @@ -0,0 +1,259 @@ +--- +description: 创建一个测试用Kubernetes集群 +keywords: +- kbcli +- kubeblocks +- addons +- installation +sidebar_label: 创建一个测试用Kubernetes集群 +sidebar_position: 5 +title: 创建一个测试用Kubernetes集群 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 创建测试用 Kubernetes 集群 + +本教程介绍如何使用 Minikube、K3d 和 Kind 创建本地 Kubernetes 测试集群。这些工具能帮助您在本地主机轻松体验 KubeBlocks,为开发、测试和实验提供便捷方案,无需搭建完整生产级集群的复杂度。 + +## 准备工作 + +确保本地主机已安装以下工具: +- Docker:所有三种工具都依赖 Docker 来创建容器化的 Kubernetes 集群 +- kubectl:用于与集群交互的 Kubernetes 命令行工具。参考 [kubectl 安装指南](https://kubernetes.io/docs/tasks/tools/) + + + + + +## 使用 Kind 创建 Kubernetes 集群 + +Kind(Kubernetes IN Docker)在 Docker 容器内运行 Kubernetes 集群,是理想的本地测试工具。 + +1. 安装 Kind。详情参考 [Kind 快速入门](https://kind.sigs.k8s.io/docs/user/quick-start/)。 + + + + + + ```bash + brew install kind + ``` + + + + + + ```bash + # 适用于 AMD64 / x86_64 + [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64 + # 适用于 ARM64 + [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-arm64 + chmod +x ./kind + sudo cp ./kind /usr/local/bin/kind + rm -rf kind + ``` + + + + + + 可使用 chocolatey 安装 Kind。 + + ```bash + choco install kind + ``` + + + + + +2. 创建 Kind 集群。 + + ```bash + kind create cluster --name mykindcluster + ``` + + 该命令会在 Docker 容器中创建单节点 Kubernetes 集群。 + +3. 检查集群是否启动运行。 + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + mykindcluster-control-plane Ready control-plane 25s v1.31.0 + ``` + + 从输出可见名为 `mykindcluster-control-plane` 的节点,表示集群创建成功。 + +4. (可选)配置多节点集群。 + + Kind 支持多节点集群,可通过配置文件创建。 + + ```yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + nodes: + role: control-plane + role: worker + role: worker + ``` + + 使用配置文件创建多节点集群: + + ```bash + kind create cluster --name multinode-cluster --config kind-config.yaml + ``` + +5. 如需删除 Kind 集群,运行以下命令: + + ```bash + kind delete cluster --name mykindcluster + ``` + + + + + +## 使用 Minikube 创建 Kubernetes 集群 + +Minikube 在本地机器上运行单节点 Kubernetes 集群,支持虚拟机或容器两种方式。 + +1. 安装 Minikube。详情参考 [Minikube 快速入门](https://minikube.sigs.k8s.io/docs/start/)。 + + + + + + ```bash + brew install minikube + ``` + + + + + + ```bash + curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm + sudo rpm -Uvh minikube-latest.x86_64.rpm + ``` + + + + + + 可使用 chocolatey 安装 Minikube。 + + ```bash + choco install minikube + ``` + + + + + +2. 启动 Minikube。该命令将创建本地 Kubernetes 集群。 + + ```bash + minikube start + ``` + + 也可指定其他驱动(如 Docker、Hyperkit、KVM)启动: + + ```bash + minikube start --driver=docker + ``` + +3. 验证 Minikube 和 K8s 集群是否正常运行。 + + 检查 Minikube 状态: + + ```bash + minikube status + > + minikube + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + ``` + + 检查 K8s 集群状态: + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + minikube Ready control-plane 1d v1.26.3 + ``` + + 输出显示 Minikube 节点已就绪。 + + + + + +## 使用 k3d 创建 Kubernetes 集群 + +k3d 是轻量级工具,可在 Docker 容器中运行 k3s(轻量级 Kubernetes 发行版)。 + +1. 安装 k3d。详情参考 [k3d 快速入门](https://k3d.io/v5.7.4/#releases)。 + + + + + + ```bash + brew install k3d + ``` + + + + + + ```bash + curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash + ``` + + + + + + 可使用 chocolatey 安装 k3d。 + + ```bash + choco install k3d + ``` + + + + + +2. 创建 k3s 集群。 + + ```bash + k3d cluster create myk3s + ``` + + 该命令将创建名为 `myk3s` 的单节点 Kubernetes 集群。 + +3. 验证集群是否正常运行。 + + ```bash + kubectl get nodes + > + NAME STATUS ROLES AGE VERSION + k3d-myk3s-server-0 Ready control-plane,master 31s v1.30.4+k3s1 + ``` + +4. 如需删除 k3s 集群,运行以下命令: + + ```bash + k3d cluster delete myk3s + ``` + + + + \ No newline at end of file diff --git a/docs/zh/preview/user_docs/references/terminology.mdx b/docs/zh/preview/user_docs/references/terminology.mdx new file mode 100644 index 00000000..5bbd51c8 --- /dev/null +++ b/docs/zh/preview/user_docs/references/terminology.mdx @@ -0,0 +1,99 @@ +--- +description: KubeBlocks 核心术语解析 +keywords: +- terminology +sidebar_label: 术语 +sidebar_position: 2 +title: 术语 +--- +# 术语表 + +### 插件(Addon) + +插件是一种高效开放的扩展机制。通过KubeBlocks插件,开发者可以快速将新的数据库引擎添加到KubeBlocks中,并获取该数据库引擎特定的基础管理功能,包括但不限于生命周期管理、数据备份恢复、指标与日志采集等。 + +### 操作集(ActionSet) + +操作集声明了一组使用特定工具执行备份和恢复操作的命令,例如使用xtrabackup备份MySQL的命令,以及从备份中恢复数据的命令。 + +### 备份策略(BackupPolicy) + +备份策略表示集群的备份方案,包含备份仓库(BackupRepo)、备份目标和备份方法等详细信息。一个备份策略中可以定义多种备份方法,每种方法引用对应的操作集。创建备份时,可以指定备份策略和备份方法进行备份操作。 + +### 备份仓库(BackupRepo) + +备份仓库是备份数据的存储库,其原理是使用CSI驱动将备份数据上传到各类存储系统,例如S3、GCS等对象存储系统,以及FTP、NFS等存储服务器。 + +### 备份计划(BackupSchedule) + +备份计划声明了集群自动备份的配置,包括备份频率、保留期限、备份策略和备份方法。备份计划控制器(BackupSchedule Controller)会根据自定义资源(CR)中的配置创建CronJob来自动备份集群。 + +### 集群(Cluster) + +集群由[组件](#component-is-the-fundamental-assembly-component-used-to-build-a-data-storage-and-processing-system-a-component-utilizes-a-statefulset-either-native-to-kubernetes-or-specified-by-the-customer-such-as-openkruise-to-manage-one-to-multiple-pods)构成。 + +### 组件(Component) + +组件是构建数据存储和处理系统的基础组装单元。一个组件使用StatefulSet(可以是Kubernetes原生的,也可以是客户指定的,如OpenKruise)来管理一个到多个Pod。 + +### 组件引用(ComponentRef) + +组件引用用于选择要引用的组件及其字段。 + +### 配置约束(ConfigConstraint) + +KubeBlocks将引擎配置文件抽象为配置约束,以更好地支持配置变更。配置约束中抽象的信息包括以下内容: + - 配置文件格式; + - 动态参数和静态参数以及不可变参数; + - 动态变化的参数; + - 参数校验规则。 + +### 自定义资源定义(CRD) + +自定义资源定义(CRD)扩展了Kubernetes API,使开发者能够引入称为自定义资源的新数据类型和对象。 + +### 操作器(Operator) + +操作器是一种自定义资源,可自动化通常由人工操作员在管理一个或多个应用或服务时执行的任务。通过确保资源的定义状态始终与其观察状态一致,操作器支持Kubernetes履行其管理职责。 + +### 运维定义(OpsDefinition) + +运维(Ops)是"Operations"的缩写,代表数据库维护操作。它定义了与数据库管理相关的运维任务,指定集群和组件支持哪些操作。 + +### 运维请求(OpsRequest) + +运维请求表示单个操作请求。 + +### 基于角色的访问控制(RBAC) + +基于角色的访问控制(RBAC),也称为基于角色的安全,是计算机系统安全中用于限制对系统网络和资源的访问仅限授权用户的方法。Kubernetes内置了用于管理命名空间和集群中角色的API,支持将其与特定资源和个体关联。 + +### 服务描述符(ServiceDescriptor) + +服务描述符是用于描述引用存储服务的API对象的自定义资源(CR)对象。它允许用户抽象出由Kubernetes或非Kubernetes环境提供的服务,使其可供KubeBlocks中的其他集群对象引用。"服务描述符"可用于解决KubeBlocks中的服务依赖、组件依赖和组件共享等问题。 + +KubeBlocks对容器化分布式数据库的管理映射为四个层次的对象:集群(Cluster)、组件(Component)、实例集(InstanceSet)和实例(Instance),形成分层架构: + +### 集群层 + +集群对象代表一个完整的分布式数据库集群。集群是顶层抽象,包含数据库的所有组件和服务。 + +### 组件层 + +组件代表构成集群对象的逻辑组件,如元数据管理、数据存储、查询引擎等。每个组件对象都有其特定的任务和功能。一个集群对象包含一个或多个组件对象。 + +### 实例集层 + +实例集对象管理组件对象内部多个副本所需的工作负载,感知副本的角色。一个组件对象包含一个实例集对象。 + +### 实例层 + +实例对象代表实例集对象中实际运行的实例,对应Kubernetes中的一个Pod。一个实例集对象可以管理零到多个实例对象。 + +### 组件定义(ComponentDefinition) + +组件定义是用于定义分布式数据库组件的API,描述组件的实现细节和行为。通过组件定义,可以定义组件的关键信息,如容器镜像、配置模板、启动脚本、存储卷等。还可以设置组件对不同事件(例如节点加入、节点离开、组件添加、组件移除、角色切换等)的行为和逻辑。每个组件可以有自己的独立组件定义,也可以共享相同的组件定义。 + +### 集群定义(ClusterDefinition) + +集群定义是用于定义分布式数据库集群整体结构和拓扑的API。在集群定义中,可以引用其包含组件的组件定义,并定义组件之间的依赖和引用关系。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/_category_.yml b/docs/zh/preview/user_docs/release_notes/_category_.yml new file mode 100644 index 00000000..ecac5d94 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 版本说明 +position: 101 diff --git a/docs/zh/preview/user_docs/release_notes/release-09/090.mdx b/docs/zh/preview/user_docs/release_notes/release-09/090.mdx new file mode 100644 index 00000000..42dd7ec3 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/090.mdx @@ -0,0 +1,126 @@ +--- +description: 版本说明 v0.9.0 +keywords: +- kubeblocks +- release notes +sidebar_position: 10 +title: v0.9.0 +--- +# KubeBlocks 0.9.0 (2024-07-09) + +我们非常高兴地宣布 KubeBlocks v0.9.0 版本发布,这标志着我们距离备受期待的 v1.0 正式版又近了一步。该版本带来了多项重大改进和新功能,全面提升了 KubeBlocks 平台的功能性和用户体验。 + +主要更新亮点包括: + + +## API 亮点 + +- 在 KubeBlocks v0.9 中,随着对拓扑结构的支持引入,集群构建体验变得更加灵活直观,就像用积木搭建集群一样。ClusterDefinition API 新增了 `topologies` 字段,允许开发者提供具有不同拓扑结构的多种部署模式。数据库用户可以通过 `topology` 字段在创建 Cluster 时选择拓扑结构。例如,Redis 插件提供了三种拓扑:单机模式(Standalone)、主从复制(Replication)和代理模式(Proxy)。单机拓扑仅包含 RedisServer 组件,主从复制拓扑包含 RedisServer 和 Sentinel 组件,而代理拓扑则额外添加了第三个组件(如 Twemproxy)。 + +- KubeBlocks 现已支持管理分布式数据库的水平扩展(Reshard)。您可以用一个组件(Component)表示一个水平分片,并通过增减组件来实现该水平分片的扩缩容。这一扩展能力也将应用于 Redis 和 Pika 的分布式部署场景。 + +- KubeBlocks 现在使用 InstanceSet 替代 StatefulSet 来管理 Pod。InstanceSet 支持指定 Pod 下线及原地更新,在数据库主从架构中还能为主从节点采用不同的 pod 规格(这些特性是 StatefulSet 所不具备的)。 + +- 开发者现在可以为组件添加更多自定义事件处理器!v0.8 引入的 ComponentDefinition API 包含 `lifeCycleActions` 字段,允许定义各类自定义事件处理器。在此基础上,KubeBlocks v0.9 为插件实现提供了更多处理器类型,包括角色探测(roleprobe)、节点下线(memberLeave)、组件下线前(preTerminate)和组件上线后(postProvision)。事件处理器的扩展增强了 KubeBlocks 的表达能力。例如,preTerminate 和 postProvision 可用于在分布式数据库中执行跨分片数据重平衡(Rebalance),或向 Sentinel、Orchestrator 等第三方高可用管理器发起注册。 + +## 插件亮点 + +- KubeBlocks 支持 Redis 集群模式(分片模式)[#5833](https://github.com/apecloud/kubeblocks/issues/5833) + Redis Cluster 除了出色的故障转移能力外,还旨在提供水平写入扩展能力和智能客户端高可用策略。Redis Cluster 将数据分布在多个 Redis 节点上,显著提升了系统容量、性能和可用性。 + +- KubeBlocks 引入 MySQL 主从复制模式 [#1330](https://github.com/apecloud/kubeblocks/issues/1330) + 相比 MGR 集群,MySQL 主从复制拓扑所需资源更少(仅需两个数据库副本),数据复制开销更低。当对服务可用性和数据可靠性没有极端要求时,主从复制拓扑是更具性价比的选择。您可以通过 kbcli 主动切换 MySQL 副本角色,或通过 kubectl 删除指定 Kubernetes pod 触发被动故障转移。若不存在长事务和大表 DDL 操作,故障转移通常可在 30 秒内完成。 + +## 变更内容 + +### 新功能 + +**KubeBlocks** +- ClusterDefinition API + - 支持拓扑API,允许开发者自定义多种拓扑结构。 [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- Cluster API + - 支持ShardingSpec API。 [#6437](https://github.com/apecloud/kubeblocks/pull/6437) + - 支持分片扩缩容。 [#6774](https://github.com/apecloud/kubeblocks/pull/6774) +- ComponentDefinition API + - lifecycleActions API支持用户自定义操作动作,包括角色探测(roleprobe)、成员离开(memberLeave)、终止前(preTerminate)、供应后(postProvision)。 [#6037](https://github.com/apecloud/kubeblocks/pull/6037) [#6582](https://github.com/apecloud/kubeblocks/pull/6582) [#6720](https://github.com/apecloud/kubeblocks/pull/6720) [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - 新增Vars API用于引用实例相关的动态资源和信息,包括Secret、Service和服务引用。 + - 支持Vars API。 [#5919](https://github.com/apecloud/kubeblocks/pull/5919) + - 支持跨组件Vars引用。 [#7155](https://github.com/apecloud/kubeblocks/pull/7155) + - 优化ServiceRef引用机制。 [#7006](https://github.com/apecloud/kubeblocks/pull/7006) + - 支持动态配置,在垂直扩缩容或水平扩缩容后重新生成指定变量。 [#6273](https://github.com/apecloud/kubeblocks/issues/6273) [#6690](https://github.com/apecloud/kubeblocks/issues/6690) +- 组件(Component) + - 支持删除组件。 [#6774](https://github.com/apecloud/kubeblocks/pull/6774) + - 支持ComponentVersion。 [#6582](https://github.com/apecloud/kubeblocks/pull/6582) +- InstanceSet API + - InstanceSet替代StatefulSet管理Pod。 [#7084](https://github.com/apecloud/kubeblocks/pull/7084) + - 支持实例模板(Instance Template)。 [#5799](https://github.com/apecloud/kubeblocks/issues/5799) + - 支持指定实例缩容。 [#6958](https://github.com/apecloud/kubeblocks/pull/6958) + - 支持原地更新(In-place Update)。 [#7000](https://github.com/apecloud/kubeblocks/pull/7000) +- OpsRequest API + - 支持重建故障备份实例。 [#6872](https://github.com/apecloud/kubeblocks/pull/6872) + - 支持通过force标志控制并发操作。 [#6828](https://github.com/apecloud/kubeblocks/pull/6828) + - 支持自定义多任务顺序执行。 [#6735](https://github.com/apecloud/kubeblocks/pull/6735) +- 支持NodeCountScaler。 [#7258](https://github.com/apecloud/kubeblocks/pull/7258) +- 支持时间点恢复(PITR)。 [#6779](https://github.com/apecloud/kubeblocks/pull/6779) +- 支持跨命名空间恢复。 [#6778](https://github.com/apecloud/kubeblocks/pull/6778) + +**kbcli** +- 支持PostgreSQL时间点恢复。 [#329](https://github.com/apecloud/kbcli/pull/329) +- cluster命令新增rebuild-instance子命令用于重建实例。 [#285](https://github.com/apecloud/kbcli/pull/295) +- cluster create子命令支持elasticsearch。 [#389](https://github.com/apecloud/kbcli/pull/389) +- 支持在创建备份仓库时指定路径前缀。 [#294](https://github.com/apecloud/kbcli/pull/294) + +**插件(Addons)** +*Redis* +- 支持官方Redis Cluster拓扑结构。 [#301](https://github.com/apecloud/kubeblocks-addons/pull/301) +- 增强Redis功能与稳定性: + - 适配ComponentDefinition、ComponentVersion等新API,支持多种拓扑形态。 [#501](https://github.com/apecloud/kubeblocks-addons/pull/501) + - 优化Redis Replication Cluster初始化逻辑,移除对DownwardAPI的依赖。 [#462](https://github.com/apecloud/kubeblocks-addons/pull/462) [#616](https://github.com/apecloud/kubeblocks-addons/pull/616) + - 支持Redis v7.2.4。 [#571](https://github.com/apecloud/kubeblocks-addons/pull/571) +- Redis分片集群支持备份与恢复。 [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MySQL* +- 新增开源组件Orchestrator Addon用于管理MySQL。 [#625](https://github.com/apecloud/kubeblocks-addons/pull/625) [#567](https://github.com/apecloud/kubeblocks-addons/pull/567) +*PostgreSQL* +- 支持PostgreSQL时间点恢复(PITR)。 [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +- 支持PostgreSQL v15.7。 [#361](https://github.com/apecloud/kubeblocks-addons/pull/361) +*Qdrant* +- Qdrant分片集群支持备份与恢复。 [#442](https://github.com/apecloud/kubeblocks-addons/pull/442) +*MogDB* +- 支持v5.0.5版本MogDB复制集群的创建、扩缩容、备份和切换。 [#343](https://github.com/apecloud/kubeblocks-addons/pull/343) [#350](https://github.com/apecloud/kubeblocks-addons/pull/350) +*ElasticSearch* +- 支持Elasticsearch v7.7.1、v7.10.1和v8.8.2版本。 [#767](https://github.com/apecloud/kubeblocks-addons/pull/767) +*Pulsar* +- 支持v3.0.2版本。 [#340](https://github.com/apecloud/kubeblocks-addons/pull/40) +- 支持NodePort。 [#358](https://github.com/apecloud/kubeblocks-addons/pull/358) +*VictoriaMetrics + +## 版本 0.9 的 API 弃用及其他变更说明 + +- **ConfigConstraint API** 进入稳定阶段,从 v1alpha1 升级至 v1beta1。 +- **StorageProvider** 的组别变更,从 `storage.kubeblocks.io` 迁移至 `dataprotection.kubeblocks.io`。 +- **ClusterVersion v1alpha1 CRD** 将在版本 1.0 中移除。 +- **ComponentClassDefinition v1alpha1 CRD** 将在版本 1.0 中移除。 +- **ComponentResourceConstraint v1alpha1 CRD** 将在版本 1.0 中移除。 +- **ClusterDefinition API** + - `type`、`componentDefs`、`connectionCredential` 字段将在版本 1.0 中移除。 +- **Cluster API** + - **调度相关**:`tenancy` 和 `availabilityPolicy` 字段将在版本 1.0 中移除。 + - **API 简化**:`replicas`、`resources`、`storage` 和 `network` 字段将在版本 1.0 中移除。 +- **ComponentDefinition API** + - `switchPolicy` 字段将在版本 1.0 中移除,相同功能可通过 `componentDefinition.spec.lifecycleActions.switchover` API 实现。 +- **ServiceRef API** + - `Cluster` 字段将在版本 1.0 中移除,相同功能可通过 `serviceRef.clusterServiceSelector` 实现。 + +此外,所有引用上述 API 的字段均被标记为**弃用**,并将在版本 1.0 中移除: +- `clusterVersionRef` +- `componentDefRef` +- `classDefRef` + +KubeBlocks 版本 0.9 仍保持对已弃用 API 的兼容性。 + +### 已弃用功能 +**kbcli 弃用功能** +- 移除了 `bench`、`fault` 和 `migration` 子命令以精简功能。 + +## 升级至 v0.9 版本 +请参阅 [升级至 KubeBlocks v0.9](../upgrade/upgrade-to-0_9_0) 文档。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-09/091.mdx b/docs/zh/preview/user_docs/release_notes/release-09/091.mdx new file mode 100644 index 00000000..1a315274 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/091.mdx @@ -0,0 +1,98 @@ +--- +description: 版本说明 v0.9.1 +keywords: +- kubeblocks +- release notes +sidebar_position: 9 +title: v0.9.1 +--- +# KubeBlocks 0.9.1 (2024-10-18) + +我们很高兴地宣布 KubeBlocks v0.9.1 正式发布! + +在此版本中,KubeBlocks 进一步优化了其 API 和插件系统,为您带来新功能和更好的用户体验。本次更新包含通过 Cluster API 启停集群、OpsRequest 中的实例重建能力、Redis 的时间点恢复(PITR)和基于键的恢复等新特性。我们还修复了一些错误并进行了多项改进以增强整体功能。 + +阅读完整的[版本说明](../upgrade/upgrade-to-v09-version)并升级到 KubeBlocks v0.9.1,探索更多功能! + +## 亮点功能 + +### KubeBlocks 核心 + +- 支持通过 Cluster API 启停集群 + + 该特性提供了新的操作选项,以满足不同场景下的需求。 + +- 增强 OpsRequest 中的实例重建能力 + + 结合 KubeBlocks 的 [InstanceSet](https://kubeblocks.io/blog/instanceset-introduction),该功能大幅提升了系统在故障场景下的恢复能力。 + +### 插件系统 + +- Redis + + 支持时间点恢复(PITR)和基于键的恢复。 + +- ZooKeeper + + 新增备份支持。 + +- 新版本支持 + + MySQL 和 PostgreSQL 插件支持更多版本。有关插件的最新版本信息,请参阅[插件列表](https://github.com/apecloud/kubeblocks-addons?tab=readme-ov-file#supported-add-ons)。 + +## 变更内容 + +### 新特性 + +#### KubeBlocks 核心 + +- OpsDefinition 和 BackupPolicyTemplate 支持组件名前缀和正则匹配 [#8174](https://github.com/apecloud/kubeblocks/pull/8174) + + 现在支持通过组件名前缀和正则表达式进行匹配,提供更高的灵活性。 + +- 高可用性(HA)记录 [#8089](https://github.com/apecloud/kubeblocks/pull/8089) + + KubeBlocks 新增高可用性记录功能,增强系统容错能力和可靠性。 + +- 支持通过 Cluster API 启停集群 [#7783](https://github.com/apecloud/kubeblocks/pull/7783) + + 简化集群管理操作。 + +- 支持实例重建时的水平扩展 [#7710](https://github.com/apecloud/kubeblocks/pull/7710) + + 通过 OpsRequest API 中的 inPlace 字段,可选择原地重建或远程重建实例。 + +- 自动清理失败的 OpsRequests [#7796](https://github.com/apecloud/kubeblocks/pull/7796) + + 新增自动清理机制,优化资源管理。 + +- 备份失败时的日志收集 [#8208](https://github.com/apecloud/kubeblocks/pull/8208) + + 支持在备份操作失败时收集相关日志。 + +#### 插件系统 + +- Redis 插件 + - 单机副本支持时间点恢复(PITR)[#7998](https://github.com/apecloud/kubeblocks/pull/7998) + - 支持基于键的恢复 [#8129](https://github.com/apecloud/kubeblocks/pull/8129) +- 新增 Loki 支持 [#707](https://github.com/apecloud/kubeblocks-addons/pull/707) +- 新增 MinIO 支持 [#926](https://github.com/apecloud/kubeblocks-addons/pull/926) +- 新增 RabbitMQ 支持 [#746](https://github.com/apecloud/kubeblocks-addons/pull/746) +- 支持 MySQL 8.4 [#987](https://github.com/apecloud/kubeblocks-addons/pull/987) +- 支持 PostgreSQL 16 [#973](https://github.com/apecloud/kubeblocks-addons/pull/973) +- ZooKeeper 插件 + - 新增备份支持 [#794](https://github.com/apecloud/kubeblocks-addons/pull/794), [#851](https://github.com/apecloud/kubeblocks-addons/pull/851) + +### 其他改进 + +- ComponentDefinition 不可变性检查 + + 新增默认的不可变性检查机制,防止意外修改,提升系统稳定性。 + +- 移除 Application 插件 (#7866) + + 该插件已从代码库中移除且不再默认安装,但仍可手动安装使用。 + +## 升级至 v0.9.1 + +请参考[升级至 KubeBlocks v0.9.x](../upgrade/upgrade-to-v09-version)指南。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-09/092.mdx b/docs/zh/preview/user_docs/release_notes/release-09/092.mdx new file mode 100644 index 00000000..f75b05ca --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/092.mdx @@ -0,0 +1,58 @@ +--- +description: 版本说明 v0.9.2 +keywords: +- kubeblocks +- release notes +sidebar_position: 8 +title: v0.9.2 +--- +# KubeBlocks 0.9.2 (2024-12-03) + +我们很高兴地宣布 KubeBlocks v0.9.2 版本正式发布。本次更新包含多项新功能、错误修复以及各类改进。 + +阅读完整发布说明并[升级至 KubeBlocks v0.9.2](../upgrade/upgrade-to-v09-version) 以探索更多功能!v0.9.2 的升级流程与 v0.9.1 完全一致,只需按照 v0.9.1 的教程操作,在需要时更新版本号即可完成升级至 v0.9.2。 + +## KubeBlocks 核心功能 + +- 新增容器镜像滚动更新支持,实现最小停机时间的无缝更新 ([#8389](https://github.com/apecloud/kubeblocks/pull/8389)) +- 引入组件级停止/启动能力,实现对集群组件的细粒度控制 ([#8480](https://github.com/apecloud/kubeblocks/pull/8480)) +- 增强分片集群的主机网络支持 ([#8517](https://github.com/apecloud/kubeblocks/pull/8517), [#8502](https://github.com/apecloud/kubeblocks/pull/8502)) +- 改进分片集群的水平扩缩容 OpeRequest 操作 ([#8530](https://github.com/apecloud/kubeblocks/pull/8530)) +- 新增 Pod 重建更新策略支持,增强更新策略灵活性 ([#8466](https://github.com/apecloud/kubeblocks/pull/8466)) +- KubeBlocks 安装优化:支持定义额外注解和环境变量 ([#8454](https://github.com/apecloud/kubeblocks/pull/8454)) + +## 插件增强 + +### MySQL + +- 新增 Jemalloc 支持以优化内存管理 ([#1158](https://github.com/apecloud/kubeblocks-addons/pull/1158)) + +### Redis + +- 为 Redis Sentinel 新增 NodePort 通告模式支持 ([#1227](https://github.com/apecloud/kubeblocks-addons/pull/1227)) +- 引入固定 Pod IP、自定义主节点名称和完整 FQDN 域名支持 ([#1222](https://github.com/apecloud/kubeblocks-addons/pull/1222)) +- 优化 PITR 备份中的用户 ACL 备份频率 ([#1180](https://github.com/apecloud/kubeblocks-addons/pull/1180)) + +### RabbitMQ + +- 新增成员移除操作支持,实现集群缩容场景 ([#1229](https://github.com/apecloud/kubeblocks-addons/pull/1229)) +- 通过配置约束和基于文件的日志增强 RabbitMQ 配置管理 ([#1199](https://github.com/apecloud/kubeblocks-addons/pull/1199)) + +### MongoDB + +- 新增主机网络支持 ([#1152](https://github.com/apecloud/kubeblocks-addons/pull/1152)) + +### PostgreSQL + +- 增强原生 PostgreSQL 集成并新增 PostgreSQL 15 支持 ([#1092](https://github.com/apecloud/kubeblocks-addons/pull/1092)) +- 新增 Supabase PostgreSQL 支持 ([#1154](https://github.com/apecloud/kubeblocks-addons/pull/1154)) + +### Xinference + +- 新增 Xinference v0.15.4 支持 ([#1248](https://github.com/apecloud/kubeblocks-addons/pull/1248)) + +您可以查看[完整变更日志](https://github.com/apecloud/kubeblocks/compare/v0.9.1...v0.9.2)。 + +## 升级至 v0.9.2 + +请参考[升级至 KubeBlocks v0.9.x](../upgrade/upgrade-to-v09-version)指南。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-09/093.mdx b/docs/zh/preview/user_docs/release_notes/release-09/093.mdx new file mode 100644 index 00000000..3073d00f --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/093.mdx @@ -0,0 +1,77 @@ +--- +description: 版本说明 v0.9.3 +keywords: +- kubeblocks +- release notes +sidebar_position: 7 +title: v0.9.3 +--- +# KubeBlocks 0.9.3 (2025-02-19) + +## KubeBlocks 核心功能 + +### 新特性 + +- **成员加入动作**:生命周期动作新增支持 `memberjoin` 操作。 +- **增量备份**:数据保护功能现支持增量备份 ([#8757](https://github.com/apecloud/kubeblocks/pull/8757))。 +- **分片集群优化** + - 支持从备份重建分片集群实例 ([#8777](https://github.com/apecloud/kubeblocks/pull/8777))。 + - 在运维请求(OpsRequest)中支持分片组件切换 ([#8786](https://github.com/apecloud/kubeblocks/pull/8786))。 + +## KubeBlocks 插件生态 + +### MySQL + +- **功能增强** + - 支持配置 MySQL 的 `lower_case_table_names` 参数 ([#1335](https://github.com/apecloud/kubeblocks-addons/pull/1335))。 + - 更新 MySQL 配置以支持 `default_time_zone` 参数动态更新 ([#1377](https://github.com/apecloud/kubeblocks-addons/pull/1377))。 +- **问题修复** + - 修复 MySQL 备份策略未生效的问题 ([#1310](https://github.com/apecloud/kubeblocks-addons/pull/1310))。 + +### PostgreSQL + +- **问题修复** + - 修复因 WAL 日志备份缺失导致的 PITR(时间点恢复)失败问题 ([#1280](https://github.com/apecloud/kubeblocks-addons/pull/1280))。 + - 解决 PITR 恢复失败问题 ([#1290](https://github.com/apecloud/kubeblocks-addons/pull/1290))。 + - 优化 WAL 日志归档机制。 + +### Redis + +- **功能增强** + - 更新 Redis 版本以修复 [CVE-2024-46981](https://access.redhat.com/security/cve/cve-2024-46981) 漏洞 ([#1405](https://github.com/apecloud/kubeblocks-addons/pull/1405))。 + - 支持 Redis 自定义密钥密码 ([#1406](https://github.com/apecloud/kubeblocks-addons/pull/1406))。 + - 为 Redis Sentinel 支持 `storageClassName` 参数配置 ([#1418](https://github.com/apecloud/kubeblocks-addons/pull/1418))。 +- **问题修复** + - 修复 Redis 集群分片扩容时的 FQDN 解析问题 ([#1283](https://github.com/apecloud/kubeblocks-addons/pull/1283))。 + +### MongoDB + +- **功能增强** + - 新增 MongoDB 版本支持:v5.0.30/6.0.20/7.0.16/8.0.4 ([#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431))。 + - 更新 MongoDB 集群创建示例 ([#1363](https://github.com/apecloud/kubeblocks-addons/pull/1363))。 + +### ClickHouse + +- **功能增强** + - 新增 ClickHouse 重配置示例 ([#1401](https://github.com/apecloud/kubeblocks-addons/pull/1401))。 + - 修复 ClickHouse 分片初始化问题 ([#1402](https://github.com/apecloud/kubeblocks-addons/pull/1402))。 + - 新增存储配置,修复水平扩展失败问题,并禁用分片功能 ([#1450](https://github.com/apecloud/kubeblocks-addons/pull/1450))。 +- **问题修复** + - 解决分布式表跨分片查询失败的问题 ([#1411](https://github.com/apecloud/kubeblocks-addons/pull/1411))。 + +### Zookeeper + +- **功能增强** + - 使用 `nc` 工具替代 Java 实现 Zookeeper 健康检查探针。 + +- **问题修复** + - 修复备份大小显示异常问题。 + - 解决数据恢复失败问题。 + - 修复快照日志挂载错误。 + +### TiDB + +- **功能增强** + - 新增 TiDB v8.4 版本支持 ([#1275](https://github.com/apecloud/kubeblocks-addons/pull/1275))。 + +## 升级至 v0.9.3 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-09/094.mdx b/docs/zh/preview/user_docs/release_notes/release-09/094.mdx new file mode 100644 index 00000000..d7c23cfd --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/094.mdx @@ -0,0 +1,71 @@ +--- +description: 版本说明 v0.9.4 +keywords: +- kubeblocks +- release notes +sidebar_position: 6 +title: v0.9.4 +--- +# KubeBlocks 0.9.4 版本发布 (2025-06-30) + +我们很高兴地宣布 KubeBlocks v0.9.4 版本正式发布。本次更新包含多项新功能、错误修复及各类改进。以下是详细的更新内容。 + +## KubeBlocks 核心功能 + +### 新增特性 + +* **集群支持指定 PVC 注解和标签** 通过为不同类型的 PVC 应用不同的注解/标签,CSI 驱动可以为不同存储卷设置不同的 IO 配额 [(#8799)](https://github.com/apecloud/kubeblocks/pull/8799) + +* **新增跳过预终止操作的注解键** 注解键:`apps.kubeblocks.io/skip-pre-terminate` [(#9121)](https://github.com/apecloud/kubeblocks/pull/9121) + +* **支持分片组件水平扩缩容时重新渲染配置** 在分片组件水平扩展后重新渲染配置文件 [(#9195)](https://github.com/apecloud/kubeblocks/pull/9195) + +## KubeBlocks 插件生态 + +### MySQL + +* 更新 `innodb_redo_log_capacity` 和 `secure_file_priv` 参数的适用范围 [(#1510)](https://github.com/apecloud/kubeblocks-addons/pull/1510) [(#1585)](https://github.com/apecloud/kubeblocks-addons/pull/1585) + +### PostgreSQL + +* 修复 PostgreSQL 15 & 16 版本缺失 `backupPolicy` 的问题 [(#1546)](https://github.com/apecloud/kubeblocks-addons/pull/1546) + +### Redis + +* 优化 Redis 集群的停止-启动流程 [(#1554)](https://github.com/apecloud/kubeblocks-addons/pull/1554) +* 改进成员退出逻辑(不再依赖公告地址) [(#1548)](https://github.com/apecloud/kubeblocks-addons/pull/1548) +* 完善 Redis 主机网络变量配置 [(#1603)](https://github.com/apecloud/kubeblocks-addons/pull/1603) + +### MongoDB + +* 新增对 MongoDB 导出器的支持 [(#1721)](https://github.com/apecloud/kubeblocks-addons/pull/1721) + +### RabbitMQ + +* 修复 RabbitMQ 启动失败问题 [(#1479)](https://github.com/apecloud/kubeblocks-addons/pull/1479) +* 解决 RabbitMQ 成员退出异常 [(#1657)](https://github.com/apecloud/kubeblocks-addons/pull/1657) + +### ZooKeeper + +* 为 ZooKeeper 新增 CMPD 定义 [(#1514)](https://github.com/apecloud/kubeblocks-addons/pull/1514) +* 修复快照日志丢失问题 [(#1509)](https://github.com/apecloud/kubeblocks-addons/pull/1509) +* 新增 `minSessionTimeout` 参数支持 [(#1535)](https://github.com/apecloud/kubeblocks-addons/pull/1535) +* 修复备份恢复逻辑 [(#1550)](https://github.com/apecloud/kubeblocks-addons/pull/1550) +* 优化 ZooKeeper 角色探测机制 [(#1542)](https://github.com/apecloud/kubeblocks-addons/pull/1542) +* 增强 ZooKeeper 跟踪日志 [(#1693)](https://github.com/apecloud/kubeblocks-addons/pull/1693) + +### VictoriaMetrics + +* 支持创建 VictoriaMetrics 集群时配置环境变量 [(#1622)](https://github.com/apecloud/kubeblocks-addons/pull/1622) + +### ClickHouse + +* 持久化 ClickHouse 和 Keeper 的日志文件 [(#1560)](https://github.com/apecloud/kubeblocks-addons/pull/1560) + +## 升级至 v0.9.4 + +请参考[升级至 KubeBlocks v0.9.x 指南](https://kubeblocks.io/docs/preview/user_docs/upgrade/upgrade-to-v09-version)。 + +## 完整变更日志 + +您可以通过[完整变更日志](https://github.com/apecloud/kubeblocks/compare/v0.9.3...v0.9.4)查看所有变更细节。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-09/_category_.yml b/docs/zh/preview/user_docs/release_notes/release-09/_category_.yml new file mode 100644 index 00000000..acfd5355 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-09/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 发布 v0.9 版本 +position: 2 diff --git a/docs/zh/preview/user_docs/release_notes/release-10/100-cn.mdx b/docs/zh/preview/user_docs/release_notes/release-10/100-cn.mdx new file mode 100644 index 00000000..9a81674e --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-10/100-cn.mdx @@ -0,0 +1,116 @@ +--- +description: 版本说明 v1.0.0 +hidden: true +keywords: +- kubeblocks +- release notes +sidebar_position: 1 +title: v1.0.0-中文版 +--- +# KubeBlocks 1.0.0 (2025-05-28) + +我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 + +KubeBlocks 1.0.0 标志着项目发展的重要里程碑,核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现重大增强,为生产环境带来更高的灵活性和可靠性。 + +## 核心亮点 + +### API 进入稳定阶段 + +在 KubeBlocks v0.9 中引入的诸多核心能力(如灵活拓扑、InstanceSet、生命周期扩展)已在 KubeBlocks 1.0 中正式稳定。 + +以下 CRD 现在为 **`v1` 且已稳定**,将长期支持: + +**`apps.kubeblocks.io` API 组:** + +* `ClusterDefinition` +* `Cluster` +* `ComponentDefinition` +* `Component` +* `ComponentVersion` +* `ServiceDescriptor` +* `ShardingDefinition` +* `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组:** + +* `InstanceSet` + +### KubeBlocks 核心功能 + +* **滚动更新**:通过 Cluster API 支持滚动升级,最大限度减少更新期间的停机时间 +* **增量备份**:新增增量备份支持,提升性能并减少存储占用 + +### 插件增强特性 + +* **MySQL 改进**:新增 TLS 支持、基于 ProxySQL 的组复制和 WAL-G 实现的 PITR 功能,显著提升安全性和恢复能力 +* **MongoDB PITR 与版本支持**:为 MongoDB 引入时间点恢复功能并支持新版本 +* **Kafka 优化**:支持外部 ZooKeeper、自定义 Prometheus 指标和多网络访问,提升灵活性和可观测性 +* **Redis 增强**:新增集群切换、实例重建和外部配置支持,提高运维健壮性 + +--- + +## 变更详情 + +### KubeBlocks 核心 + +#### 集群管理 + +* **滚动升级**:通过 Cluster API 实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) +* **动态镜像仓库**:支持动态替换镜像仓库实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) +* **分片 Pod 反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) +* **Pod 标签/注解更新**:可更新底层 Pod 标签与注解,增强运维能力 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) +* **PVC 卷属性**:支持为 PVC 设置 volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) +* **组件定义策略规则**:新增细粒度策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) +* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) + +#### 数据保护 + +* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) +* **备份参数一致性**:支持备份与恢复参数,确保一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) +* **保留最近备份**:支持保留最新备份,提升恢复能力 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) + +#### 运维管理 + +* **OpsRequest 验证**:引入验证策略确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) + +--- + +### KubeBlocks 插件 + +#### MySQL + +* **TLS 支持**:新增安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) +* **组复制 + ProxySQL**:支持高可用架构 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) +* **PITR 恢复**:使用 WAL-G 实现时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) +* **持续与增量备份**:通过 WAL-G 改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) + +#### Redis + +* **集群切换与哨兵优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) +* **实例重建**:支持 Redis 实例重建 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) + +#### MongoDB + +* **PITR 恢复**:新增时间点恢复功能 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) +* **新版本支持**:新增 MongoDB 8.0.8 和 8.0.6 版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) + +#### Kafka + +* **外部 ZooKeeper**:为 Kafka 2.7 添加外部 ZooKeeper 支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) +* **自定义 Prometheus 指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) +* **跳过端口解析**:提升使用 Pod IP 时的灵活性 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) +* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) + +#### RabbitMQ + +* **新版本支持**:新增 RabbitMQ 4.0.9 版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) + +#### ClickHouse + +* **22.9.4 支持**:新增 ClickHouse 22.9.4 兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) + +#### TiDB + +* **8.4 版本支持**:新增 TiDB 8.4 支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) +* **升级至 6.5.12**:将 TiDB 6 更新至 v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/p \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-10/100.mdx b/docs/zh/preview/user_docs/release_notes/release-10/100.mdx new file mode 100644 index 00000000..b32b5785 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-10/100.mdx @@ -0,0 +1,189 @@ +--- +description: 版本说明 v1.0.0 +keywords: +- kubeblocks +- release notes +sidebar_position: 1 +title: v1.0.0 +--- +# KubeBlocks 1.0.0 (2025-05-28) + +我们很高兴宣布 KubeBlocks 1.0.0 版本正式发布。 + +KubeBlocks 1.0.0 标志着一个重要里程碑,其核心 API 已升级至稳定版本(v1),并在集群管理、数据保护和运行稳定性方面实现了重大改进,为生产环境带来了更高的灵活性和可靠性。 + +## 核心亮点 + +### API 进入稳定阶段 + +KubeBlocks 0.9 版本引入了灵活的拓扑结构、高级 Pod 管理(InstanceSet)和生命周期钩子功能。这些特性现已**正式进入稳定阶段**。 + +以下 CRD 已升级至 **`v1` 版本并视为稳定**,将获得长期支持: + +**`apps.kubeblocks.io` API 组:** + +* `ClusterDefinition` +* `Cluster` +* `ComponentDefinition` +* `Component` +* `ComponentVersion` +* `ServiceDescriptor` +* `ShardingDefinition` +* `SidecarDefinition` + +**`workloads.kubeblocks.io` API 组:** + +* `InstanceSet` + +### KubeBlocks 功能特性 + +* **滚动更新**:通过 Cluster API 支持滚动升级,最大程度减少更新期间的停机时间 +* **增量备份**:新增增量备份支持,提升性能并降低存储消耗 + +### KubeBlocks 插件亮点 + +* **MySQL 增强**:新增 TLS 支持、通过 ProxySQL 实现的组复制以及基于 WAL-G 的时间点恢复(PITR),显著提升安全性和恢复能力 +* **MongoDB PITR 与多版本支持**:为 MongoDB 引入时间点恢复功能并新增多版本支持 +* **Kafka 改进**:支持外部 ZooKeeper、自定义 Prometheus 指标以及多网络访问,提升灵活性和可观测性 +* **Redis 增强**:新增集群切换、实例重建和外部配置支持,强化运维健壮性 + +--- + + + +## 变更内容 + +### KubeBlocks + +#### 集群管理 + +* **滚动升级**:通过Cluster API实现零停机升级 [#8973](https://github.com/apecloud/kubeblocks/pull/8973) +* **动态镜像仓库**:支持动态替换镜像仓库以实现更灵活的部署 [#8018](https://github.com/apecloud/kubeblocks/pull/8018) +* **分片Pod反亲和性**:为分片组件添加反亲和性规则 [#8705](https://github.com/apecloud/kubeblocks/pull/8705) +* **Pod元数据更新**:允许更新底层Pod的标签和注解 [#8571](https://github.com/apecloud/kubeblocks/pull/8571) +* **PVC存储卷属性**:支持为PVC设置volumeAttributesClass [#8783](https://github.com/apecloud/kubeblocks/pull/8783) +* **组件定义策略规则**:添加细粒度的策略控制 [#8328](https://github.com/apecloud/kubeblocks/pull/8328) +* **组件角色重构**:改进组件管理的角色定义 [#8416](https://github.com/apecloud/kubeblocks/pull/8416) + +#### 数据保护 + +* **增量备份**:新增高效增量备份支持 [#8693](https://github.com/apecloud/kubeblocks/pull/8693) +* **备份/恢复参数**:确保备份/恢复过程中的配置一致性 [#8472](https://github.com/apecloud/kubeblocks/pull/8472) +* **保留最新备份**:新增保留最新备份的选项 [#9088](https://github.com/apecloud/kubeblocks/pull/9088) + +#### 运维 + +* **OpsRequest验证**:引入验证策略以确保操作正确性 [#8232](https://github.com/apecloud/kubeblocks/pull/8232) + +--- + +### KubeBlocks 插件 + +#### MySQL + +* **TLS支持**:添加TLS安全连接支持 [#1462](https://github.com/apecloud/kubeblocks-addons/pull/1462) +* **ProxySQL组复制**:通过ProxySQL集成增强高可用性 [#1467](https://github.com/apecloud/kubeblocks-addons/pull/1467) +* **时间点恢复(PITR)**:支持使用WAL-G进行时间点恢复 [#1451](https://github.com/apecloud/kubeblocks-addons/pull/1451) +* **持续与增量备份**:使用WAL-G改进备份策略 [#1456](https://github.com/apecloud/kubeblocks-addons/pull/1456) + +#### Redis + +* **集群切换与Sentinel优化**:增强故障转移能力 [#1414](https://github.com/apecloud/kubeblocks-addons/pull/1414) +* **实例重建**:支持重建Redis实例 [#1417](https://github.com/apecloud/kubeblocks-addons/pull/1417) + +#### MongoDB + +* **时间点恢复(PITR)**:新增时间点恢复支持 [#1487](https://github.com/apecloud/kubeblocks-addons/pull/1487) +* **新版本支持**:新增MongoDB 8.0.8和8.0.6版本支持 [#1431](https://github.com/apecloud/kubeblocks-addons/pull/1431), [#1590](https://github.com/apecloud/kubeblocks-addons/pull/1590) + +#### Kafka + +* **外部ZooKeeper**:为Kafka 2.7添加外部ZooKeeper支持 [#1297](https://github.com/apecloud/kubeblocks-addons/pull/1297) +* **自定义Prometheus指标**:支持配置自定义指标 [#1544](https://github.com/apecloud/kubeblocks-addons/pull/1544) +* **跳过广告端口解析**:使用Pod IP时避免解析 [#1569](https://github.com/apecloud/kubeblocks-addons/pull/1569) +* **自定义安全上下文**:支持自定义安全设置 [#1337](https://github.com/apecloud/kubeblocks-addons/pull/1337) + +#### RabbitMQ + +* **新版本支持**:新增RabbitMQ 4.0.9版本支持 [#1596](https://github.com/apecloud/kubeblocks-addons/pull/1596) + +#### ClickHouse + +* **22.9.4支持**:新增ClickHouse 22.9.4兼容性 [#1376](https://github.com/apecloud/kubeblocks-addons/pull/1376) + +#### TiDB + +* **8.4版本支持**:新增TiDB 8.4支持 [#1384](https://github.com/apecloud/kubeblocks-addons/pull/1384) +* **升级至6.5.12**:将TiDB 6更新至v6.5.12 [#1664](https://github.com/apecloud/kubeblocks-addons/pull/1664) + +--- + +### API版本演进、弃用与移除 + +#### 正式发布(v1)API晋升 + +以下CRD现已**晋升至`v1`版本并视为稳定**,将获得长期支持: + +**`apps.kubeblocks.io` API组** + +- `ClusterDefinition` +- `Cluster` +- `ComponentDefinition` +- `Component` +- `ComponentVersion` +- `ServiceDescriptor` +- `ShardingDefinition` +- `SidecarDefinition` + +**`workloads.kubeblocks.io` API组** + +- `InstanceSet` + +> 这些资源的`v1alpha1`和`v1beta1`版本现已弃用,可能在未来的版本中移除。 + +#### 弃用声明 + +以下CRD已被弃用,将在**后续版本中移除**,请相应迁移您的配置: + +* `ConfigConstraint` +* `Configuration` + +> 这些资源不再维护或接收更新。 + +#### 新Alpha API(实验性) + +新的**`parameters.kubeblocks.io`** API组引入了细粒度配置API: + +* `ComponentParameter` +* `ParamConfigRenderer` +* `Parameter` +* `ParametersDefinition` + +> 这些API旨在替代已弃用的`ConfigConstraint`和`Configuration`。 + +#### API组重组 + +部分API已迁移至新组以更好地体现其领域职责,请相应更新您的清单: + +| 资源 | 原API组 | 新API组 | +| ----------------------------- | --------------------- | ---------------------------- | +| `OpsDefinition`/`OpsRequest` | `apps.kubeblocks.io` | `operations.kubeblocks.io` | +| `BackupPolicyTemplate` | `apps.kubeblocks.io` | `dataprotection.kubeblocks.io` | + +## 升级公告 + +:::note +请注意,目前**不支持**从 0.9 版本直接升级到 1.0 版本。 +::: + +我们正在积极开发一个经过充分测试的可靠升级方案,该方案将包含在即将发布的版本中。 + +## 致谢 + +在庆祝 KubeBlocks 1.0.0 发布之际,我们要向所有帮助塑造这一项目的工程师、贡献者和合作伙伴致以诚挚的谢意。 + +特别感谢快手、中国移动云、唯品会、腾讯和 360 的技术贡献、深度反馈及真实场景用例,这些极大地推动了项目架构、性能和生产就绪度的进步。 + +我们同样深切感激社区贡献者们——你们的代码提交、问题报告、讨论和评审,对推动项目在质量与创新方面的发展起到了关键作用。 + +1.0.0 版本标志着我们在构建健壮的云原生数据库平台道路上迈出了重要一步。期待与各位继续携手同行——共同扩展生态、应对新挑战、突破数据基础设施的边界。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/release_notes/release-10/_category_.yml b/docs/zh/preview/user_docs/release_notes/release-10/_category_.yml new file mode 100644 index 00000000..a424ddc5 --- /dev/null +++ b/docs/zh/preview/user_docs/release_notes/release-10/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 发布 v1.0 版本 +position: 1 diff --git a/docs/zh/preview/user_docs/troubleshooting/_category_.yml b/docs/zh/preview/user_docs/troubleshooting/_category_.yml new file mode 100644 index 00000000..ee02de77 --- /dev/null +++ b/docs/zh/preview/user_docs/troubleshooting/_category_.yml @@ -0,0 +1,4 @@ +collapsed: true +collapsible: true +label: 故障排除 +position: 61 diff --git a/docs/zh/preview/user_docs/troubleshooting/handle-a-cluster-exception.mdx b/docs/zh/preview/user_docs/troubleshooting/handle-a-cluster-exception.mdx new file mode 100644 index 00000000..ec925f6d --- /dev/null +++ b/docs/zh/preview/user_docs/troubleshooting/handle-a-cluster-exception.mdx @@ -0,0 +1,197 @@ +--- +description: 如何处理集群中的异常 +keywords: +- cluster exception +sidebar_label: 常见问题 +sidebar_position: 1 +title: 常见问题 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 常见问题解答 + +### 创建集群时KubeBlocks生成的K8s资源列表 + +获取KubeBlocks为指定集群创建的所有关联资源列表: + +```bash +kubectl get cmp,its,po -l app.kubernetes.io/instance= -n demo # 集群和工作负载 +kubectl get backuppolicy,backupschedule,backup -l app.kubernetes.io/instance= -n demo # 数据保护资源 +kubectl get componentparameter,parameter -l app.kubernetes.io/instance= -n demo # 配置资源 +kubectl get opsrequest -l app.kubernetes.io/instance= -n demo # 运维请求资源 +kubectl get svc,secret,cm,pvc -l app.kubernetes.io/instance= -n demo # K8s原生资源 +``` + +故障排查时: + +1. 描述资源状态(如Cluster、Component等) +```bash +kubectl describe TYPE NAME +``` + +2. 检查数据库实例日志 +```bash +kubectl logs -c +``` + +3. 检查KubeBlocks日志 +```bash +kubectl -n kb-system logs deployments/kubeblocks -f +``` + +### 如何获取各备份方法的详细信息 + +每个备份方法的细节定义在KubeBlocks的`ActionSet`中。 + +例如获取PostgreSQL中名为`wal-g-archive`的备份方法对应的`ActionSet`: + +```bash +kubectl -n demo get bp pg-cluster-postgresql-backup-policy -oyaml | yq '.spec.backupMethods[] | select(.name=="wal-g-archive") | .actionSetName' +``` + +ActionSet定义了: +- 备份类型 +- 备份和恢复流程 +- 流程中使用的环境变量 + +通过查看每个ActionSet的细节可以了解具体的备份和恢复执行方式。 + +### 如何检查兼容版本 + +版本及其兼容性规则内置于KubeBlocks的`ComponentVersion` CR中。 +查看兼容版本列表: + +```bash +kubectl get cmpv postgresql -ojson | jq '.spec.compatibilityRules' +``` + +
+ +示例输出 + +```json +[ + { + "compDefs": [ + "postgresql-12-" + ], + "releases": [ + "12.14.0", + "12.14.1", + "12.15.0" + ] + }, + { + "compDefs": [ + "postgresql-14-" + ], + "releases": [ + "14.7.2", + "14.8.0" + ] + } +] +``` + +
+ +版本按组件定义分组,每个组包含兼容的版本列表。 +此示例显示可以从`12.14.0`升级到`12.14.1`或`12.15.0`,以及从`14.7.2`升级到`14.8.0`。 +但不能从`12.14.0`直接升级到`14.8.0`。 + +### ComponentDefinition状态显示不可用 + +如果修改了ComponentDefinition,其状态可能变为`Unavailable`。 +KubeBlocks将ComponentDefinition设为`Unavailable`以防止变更影响现有集群。 + +通过describe命令可以看到如下信息: + +```text +Status: + Message: 不可变字段不允许更新 + Observed Generation: 3 + Phase: Unavailable +``` + +如果是预期变更,可以通过以下命令添加注解: + +```bash +kubectl annotate componentdefinition \ apps.kubeblocks.io/skip-immutable-check\=true +``` + +### 在K8s 1.23及以下版本安装KubeBlocks失败 + +使用K8s 1.23及以下版本时可能遇到以下错误: + +```bash +unknown field "x-kubernetes-validations" .... 如需忽略此错误,请使用--validate\=false关闭验证 +``` + +这是因为K8s 1.23及以下版本不支持`x-kubernetes-validations`字段。 + +解决方案: + +```bash +kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v1.0.0/kubeblocks_crds.yaml --validate\=false +``` + +### 如何取消正在执行的运维请求 + +KubeBlocks支持取消满足以下条件的运维请求: +- 处于`Running`状态 +- 类型为`VerticalScaling`或`HorizontalScaling` + +取消命令: + +```bash +kubectl patch opsrequest -p '{"spec":{"cancel":true}}' --type=merge +``` + +### 集群/组件卡在`Updating`状态 + +排查步骤: +1. 检查所有Pod是否处于`Running`状态 +2. 检查Pod日志是否有错误 +3. 检查各Pod是否具有预期角色(如需): +```bash +kubectl get po -L kubeblocks.io/role +``` +4. 对比Pod配置中的容器镜像与实际运行镜像是否一致: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: +spec: + containers: + - image: repo/image:tag # <==== 配置中的镜像 + name: c1 +status: + containerStatuses: + containerID: containerd://123456 + image: repo/image:tag # <====== 实际运行的镜像 + imageID: repo/image:tag@sha256:123456 + name: c1 +``` +若不一致,请检查节点上是否存在多个相同IMAGE ID但不同TAG的镜像。如有,请清理节点镜像后重建集群。 + +### 集群卡在`Deleting`状态且日志报错"has no pods to running the pre-terminate action" + +删除集群时可能出现此情况,KubeBlocks日志中会显示: + +```bash +kubectl -n kb-system logs deployments/kubeblocks -f +``` +错误信息: +```bash +> INFO build error: has no pods to running the pre-terminate action +``` + +这是因为KubeBlocks会执行`ComponentDefinition`中定义的`pre-terminate`生命周期动作。当没有Pod可执行该动作时,集群会一直处于删除状态。 + +解决方案(跳过pre-terminate动作): +```bash +kubectl annotate component apps.kubeblocks.io/skip-pre-terminate-action=true +``` +该问题通常发生在集群创建失败(如镜像拉取失败/资源不足等)导致没有Pod生成的情况下。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/troubleshooting/known-issues.mdx b/docs/zh/preview/user_docs/troubleshooting/known-issues.mdx new file mode 100644 index 00000000..651702eb --- /dev/null +++ b/docs/zh/preview/user_docs/troubleshooting/known-issues.mdx @@ -0,0 +1,92 @@ +--- +description: KubeBlocks 已知问题 +keywords: +- KubeBlocks +- Known Issues +sidebar_label: 已知问题 +sidebar_position: 2 +title: 已知问题 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 已知问题 + +## 问题1:KubeBlocks 创建大量 Secret + +### 问题描述 +KubeBlocks 持续为每个集群创建大量 Secret 且不会停止。您可能在 **KubeBlocks** 日志中看到以下信息: + +```bash +INFO reconcile object *v1.ServiceAccount with action UPDATE OK +``` + +### 受影响版本 +- KubeBlocks v1.0.0 及 Kubernetes 版本 ≤ 1.24 + +### 根本原因 +在 Kubernetes 1.24 版本之前,Kubernetes 会自动为 ServiceAccount 生成基于 Secret 的令牌,详见 [Kubernetes 服务账户令牌文档](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/)。 + +### 解决方案 +将 KubeBlocks 升级至 `v1.0.1-beta.3` 或更高版本。 + +--- + +## 问题2:PostgreSQL 因密码含特殊字符启动失败 + +### 问题描述 +当密码包含某些特殊字符时,PostgreSQL 可能启动失败。通过检查 Pod 日志可见: +```bash +File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 116, in check_token + self.fetch_more_tokens() + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 231, in fetch_more_tokens + return self.fetch_anchor() + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 621, in fetch_anchor + self.tokens.append(self.scan_anchor(AnchorToken)) + File "/usr/lib/python3/dist-packages/yaml/scanner.py", line 929, in scan_anchor + raise ScannerError("while scanning an %s" % name, start_mark, +yaml.scanner.ScannerError: while scanning an anchor + in "", line 45, column 17: + password: &JgE#F5x&eNwis*2dW!7& ... + ^ +``` + +### 受影响版本 +- KubeBlocks v0.9.4 及之前版本 +- KubeBlocks v1.0.0 + +### 解决方案 + +升级 KubeBlocks 至 `v1.0.1-beta.6` 或 `v0.9.5-beta.4` 及以上版本。 + +可通过显式设置密码生成策略中允许的符号列表来解决此问题: + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: +spec: + componentSpecs: + - name: postgresql + systemAccounts: + - name: postgres + passwordConfig: + length: 20 # 密码长度:20个字符 + numDigits: 4 # 至少4位数字 + numSymbols: 2 # 至少2个符号 + letterCase: MixedCases # 大小写字母混合 + symbolCharacters: '!' # 设置生成密码时允许使用的符号 +# 为简洁起见,省略了Cluster清单中的其他字段 +``` + +## 如何报告问题 + +您可以通过以下方式报告问题: + +1. 在 [KubeBlocks GitHub 仓库](https://github.com/apecloud/kubeblocks/issues/) 创建 issue +2. [可选] 提供通过 `kbcli report` 命令生成的压缩文件: +```bash +kbcli report cluster --with-logs --mask # 打包集群清单、版本和日志 +kbcli report kubeblocks --with-logs --mask # 打包kubeblocks日志 +``` +其中 `clusterName` 是您要报告问题的集群名称,`--mask` 参数会对 Secret 和 ConfigMap 中的敏感信息进行脱敏处理。 \ No newline at end of file diff --git a/docs/zh/preview/user_docs/upgrade/_category_.yml b/docs/zh/preview/user_docs/upgrade/_category_.yml new file mode 100644 index 00000000..2fdfdbd2 --- /dev/null +++ b/docs/zh/preview/user_docs/upgrade/_category_.yml @@ -0,0 +1,5 @@ +collapsed: true +collapsible: true +hidden: true +label: 升级 KubeBlocks +position: 100 diff --git a/docs/zh/preview/user_docs/upgrade/upgrade-to-0_8.mdx b/docs/zh/preview/user_docs/upgrade/upgrade-to-0_8.mdx new file mode 100644 index 00000000..28ca3d4c --- /dev/null +++ b/docs/zh/preview/user_docs/upgrade/upgrade-to-0_8.mdx @@ -0,0 +1,115 @@ +--- +description: 升级至 KubeBlocks v0.8:操作指南、技巧与注意事项 +keywords: +- upgrade +- 0.8 +title: 升级至 v0.8 版本 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 升级至 KubeBlocks v0.8 + +本教程将指导您如何升级至 KubeBlocks v0.8 版本。 + +:::note + +执行 `helm -n kb-system list | grep kubeblocks` 或 `kbcli version` 命令可查看当前运行的 KubeBlocks 版本,再进行升级操作。 + +::: + +## 从 KubeBlocks v0.7 升级 + + + + + +1. 设置 keepAddons 参数 + + KubeBlocks v0.8 精简了默认安装的数据库引擎,并将 greptime、influxdb、neon、oracle-mysql、orioledb、tdengine、mariadb、nebula、risingwave、starrocks、tidb 和 zookeeper 等插件从 KubeBlocks Operator 分离至 KubeBlocks-Addons 仓库。为避免升级过程中删除正在使用的插件资源,请执行以下操作: + +- 检查当前 KubeBlocks 版本 + + ```bash + helm -n kb-system list | grep kubeblocks + ``` + +- 将 keepAddons 参数设为 true + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + helm repo update kubeblocks + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version \{VERSION\} --set keepAddons=true + ``` + + 请将 \{VERSION\} 替换为您当前的 KubeBlocks 版本号(例如 0.7.2)。 + +- 验证插件配置 + + 执行以下命令确保插件注解中包含 `"helm.sh/resource-policy": "keep"` 标记。 + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, annotations: .metadata.annotations}' + ``` + +2. 安装 CRD + + 为减小 Helm Chart 体积,KubeBlocks v0.8 移除了 Chart 中的 CRD 定义。升级前需手动安装 CRD: + + ```bash + kubectl replace -f https://github.com/apecloud/kubeblocks/releases/download/v0.8.1/kubeblocks_crds.yaml + ``` + +3. 执行升级 + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.8.1 --set dataProtection.image.datasafed.tag=0.1.0 + ``` + +:::note + +为避免影响现有数据库集群,升级至 KubeBlocks v0.8 时默认不会升级已安装插件的版本。如需将插件升级至 v0.8 内置版本,可执行以下命令。请注意此操作可能导致集群重启并影响服务可用性,请谨慎操作。 + +```bash +helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.8.1 --set upgradeAddons=true +``` + +::: + + + + + +1. 下载 kbcli v0.8 + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.8.1 + ``` + +2. 执行升级 + + ```bash + kbcli kb upgrade --version 0.8.1 --set dataProtection.image.datasafed.tag=0.1.0 + ``` + + kbcli 会自动添加 `"helm.sh/resource-policy": "keep"` 注解,确保升级过程中不会删除现有插件。 + + + + + +## 从 KubeBlocks v0.6 升级 + +若当前运行的是 KubeBlocks v0.6 版本,请先升级至 v0.7.2 版本。 + +1. 下载 kbcli v0.7.2 + + ```shell + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.7.2 + ``` + +2. 升级至 KubeBlocks v0.7.2 + + ```shell + kbcli kb upgrade --version 0.7.2 + ``` \ No newline at end of file diff --git a/docs/zh/preview/user_docs/upgrade/upgrade-to-0_9_0.mdx b/docs/zh/preview/user_docs/upgrade/upgrade-to-0_9_0.mdx new file mode 100644 index 00000000..e571ff55 --- /dev/null +++ b/docs/zh/preview/user_docs/upgrade/upgrade-to-0_9_0.mdx @@ -0,0 +1,171 @@ +--- +description: 升级至 KubeBlocks v0.9.0:操作指南、技巧与注意事项 +keywords: +- upgrade +- 0.9.0 +title: 升级至 v0.9.0 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 升级至 KubeBlocks v0.9.0 + +本教程将指导您如何升级至 KubeBlocks v0.9.0 版本。 + +:::note + +执行 `helm -n kb-system list | grep kubeblocks` 或 `kbcli version` 命令可查看当前运行的 KubeBlocks 版本,再进行升级操作。 + +::: + +## 兼容性说明 + +KubeBlocks 0.9.0 兼容 KubeBlocks 0.8 版本的 API,但不保证与 v0.8 之前版本的 API 兼容。若您正在使用 KubeBlocks 0.7 或更早版本(0.6 等)的 Addons,请务必[先升级至 v0.8 版本](../upgrade/upgrade-to-0_8),确保服务可用性后再升级至 v0.9.0。 + +## 从 KubeBlocks v0.8 升级 + + + + + +1. 为 Addons 添加 `"helm.sh/resource-policy": "keep"` 注解 + + KubeBlocks v0.8 精简了默认安装的引擎。为避免升级过程中删除正在使用的 Addon 资源,请先执行以下命令: + + - 为 Addons 添加注解(可将 `-l app.kubernetes.io/name=kubeblocks` 替换为实际筛选条件): + + ```bash + kubectl annotate addons.extensions.kubeblocks.io -l app.kubernetes.io/name=kubeblocks helm.sh/resource-policy=keep + ``` + + - 验证 Addons 注解 + + 执行以下命令确保 Addon 注解中包含 `"helm.sh/resource-policy": "keep"`: + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, annotations: .metadata.annotations}' + ``` + +2. 删除不兼容的 OpsDefinition + + ```bash + kubectl delete opsdefinitions.apps.kubeblocks.io kafka-quota kafka-topic kafka-user-acl switchover + ``` + +3. 升级前安装 StorageProvider CRD + + 若网络较慢,建议提前下载 CRD YAML 文件到本地: + + ```bash + kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v0.9.0/dataprotection.kubeblocks.io_storageproviders.yaml + ``` + +4. 升级 KubeBlocks + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.0 + ``` + + :::note + + 为避免影响现有数据库集群,升级至 KubeBlocks v0.9.0 时默认不会升级已安装的 Addons 版本。如需将 Addons 升级至 v0.9.0 内置版本,请执行以下命令(注意:此操作可能导致集群重启并影响可用性,请谨慎操作): + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.0 \ + --set upgradeAddons=true + ``` + + ::: + + + + + +1. 下载 kbcli v0.9.0 + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.9.0 + ``` + +2. 升级 KubeBlocks + + ```bash + kbcli kb upgrade --version 0.9.0 + ``` + + :::note + + 为避免影响现有数据库集群,升级至 KubeBlocks v0.9.0 时默认不会升级已安装的 Addons 版本。如需将 Addons 升级至 v0.9.0 内置版本,请执行以下命令(注意:此操作可能导致集群重启并影响可用性,请谨慎操作): + + ```bash + kbcli kb upgrade --version 0.9.0 --set upgradeAddons=true + ``` + + ::: + + kbcli 会自动添加 `"helm.sh/resource-policy": "keep"` 注解,确保升级过程中不会删除现有 Addons。 + + + + + +## 升级 Addons + +若未设置 `upgradeAddons=true` 或您的 Addon 不在默认安装列表中,可通过以下命令升级 Addons 以使用 v0.9.0 API。 + +:::note + +特殊注意事项: +- 若需升级 `mysql` Addon,必须升级并重启集群,否则 v0.8 创建的集群将无法在 v0.9.0 中使用 +- 对于 `clickhouse/milvus/elasticsearch/llm` Addons,需先升级 KubeBlocks 再升级 Addon,否则这些 Addon 在 v0.9.0 中将无法正常使用 + +::: + + + + + +```bash +# 添加 Helm 仓库 +helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + +# 若 GitHub 访问困难,可使用以下镜像仓库 +helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + +# 更新仓库索引 +helm repo update + +# 更新 Addon 版本 +helm upgrade -i {addon-release-name} kubeblocks-addons/{addon-name} --version x.y.z -n kb-system +``` + + + + + +```bash +# 查看 Addon 索引列表 +kbcli addon index list + +# 更新索引(默认索引为 kubeblocks) +kbcli addon index update kubeblocks + +# 搜索可用 Addon 版本 +kbcli addon search + +# 安装 Addon +kbcli addon install --version x.y.z + +# 升级至指定版本 +kbcli addon upgrade --version x.y.z + +# 强制升级至指定版本 +kbcli addon upgrade --version x.y.z --force + +# 查看可用 Addon 版本 +kbcli addon list | grep +``` + + + + \ No newline at end of file diff --git a/docs/zh/preview/user_docs/upgrade/upgrade-to-v09-version.mdx b/docs/zh/preview/user_docs/upgrade/upgrade-to-v09-version.mdx new file mode 100644 index 00000000..614166ed --- /dev/null +++ b/docs/zh/preview/user_docs/upgrade/upgrade-to-v09-version.mdx @@ -0,0 +1,298 @@ +--- +description: 升级至 KubeBlocks v0.9.x:操作指南、技巧与注意事项 +keywords: +- upgrade +- 0.9.3 +sidebar_label: 升级至 v0.9.x +sidebar_position: 1 +title: 升级至 v0.9.x +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 升级至 KubeBlocks v0.9.x 版本 + +:::note + +- 升级前请确认当前 KubeBlocks 版本: + + 执行命令 `helm -n kb-system list | grep kubeblocks` 或 `kbcli version`。 +- 不同版本的升级指南: + + - 如需升级至 v0.9.2 或 v0.9.1 版本,请参照本升级教程,将版本号相应替换为 v0.9.2 或 v0.9.1。 + - [v0.9.0 版本升级指南](./upgrade-to-0_9_0) + - [v0.8.x 版本升级指南](./upgrade-to-0_8)。 + + 建议安装最新版本以获得更优的性能和功能特性。 + +::: + +## 兼容性说明 + +KubeBlocks v0.9.3 兼容 KubeBlocks v0.8 版本的 API,但不保证与 v0.8 之前版本的 API 兼容。如果您正在使用 KubeBlocks v0.7 或更早版本(如 v0.6 等)的插件(Addons),请务必[先将 KubeBlocks 及所有插件升级至 v0.8 版本](./upgrade-to-0_8),确保服务可用性后再升级至 v0.9 版本。 + +若您正从 v0.8 版本升级至 v0.9,建议启用 webhook 功能以确保服务可用性。 + +## 从 KubeBlocks v0.9.x 升级 + + + + + +1. 查看 Addon 并检查是否存在 `"helm.sh/resource-policy": "keep"` 注解。 + + KubeBlocks 精简了默认安装的引擎。添加 `"helm.sh/resource-policy": "keep"` 注解可避免升级过程中删除正在使用的 Addon 资源。 + + 检查是否已添加 `"helm.sh/resource-policy": "keep"` 注解。 + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, resource_policy: .metadata.annotations["helm.sh/resource-policy"]}' + ``` + + 如果注解不存在,运行以下命令添加。您可以将 `-l app.kubernetes.io/name=kubeblocks` 替换为实际的过滤名称。 + + ```bash + kubectl annotate addons.extensions.kubeblocks.io -l app.kubernetes.io/name=kubeblocks helm.sh/resource-policy=keep + ``` + +2. 安装 CRD。 + + 为减小 Helm Chart 体积,KubeBlocks v0.8 从 Helm Chart 中移除了 CRD。升级前需要先安装 CRD。 + + ```bash + kubectl replace -f https://github.com/apecloud/kubeblocks/releases/download/v0.9.3/kubeblocks_crds.yaml + ``` + +3. 升级 KubeBlocks。 + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.3 --set crd.enabled=false + ``` + + 从 v0.9.0/v0.9.1/v0.9.2 升级到 v0.9.3 不涉及 API 变更,因此可以设置 `--set crd.enabled=false` 跳过 API 升级任务。 + + :::warning + + 为避免影响现有数据库集群,升级到 KubeBlocks v0.9.3 时默认不会升级已安装 Addons 的版本。如需将 Addons 升级至 KubeBlocks v0.9.3 内置版本,请执行以下命令。请注意这可能会重启现有集群并影响可用性,请谨慎操作。 + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.3 \ + --set upgradeAddons=true \ + --set crd.enabled=false + ``` + + ::: + + + + + +1. 下载 kbcli v0.9.3。 + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.9.3 + ``` + +2. 升级 KubeBlocks。 + + ```bash + kbcli kb upgrade --version 0.9.3 + ``` + + :::warning + + 为避免影响现有数据库集群,升级到 KubeBlocks v0.9.3 时默认不会升级已安装 Addons 的版本。如需将 Addons 升级至 KubeBlocks v0.9.3 内置版本,请执行以下命令。请注意这可能会重启现有集群并影响可用性,请谨慎操作。 + + ```bash + kbcli kb upgrade --version 0.9.3 --set upgradeAddons=true + ``` + + ::: + + `kbcli` 会自动添加 `"helm.sh/resource-policy": "keep"` 注解以确保升级过程中不会删除现有 Addons。 + + + + + +## 从 KubeBlocks v0.8.x 升级 + + + + + +1. 查看 Addon 并检查是否存在 `"helm.sh/resource-policy": "keep"` 注解。 + + KubeBlocks 精简了默认安装的引擎。添加 `"helm.sh/resource-policy": "keep"` 注解可避免升级过程中删除正在使用的 Addon 资源。 + + 检查是否已添加该注解: + + ```bash + kubectl get addon -o json | jq '.items[] | {name: .metadata.name, resource_policy: .metadata.annotations["helm.sh/resource-policy"]}' + ``` + + 如果注解不存在,运行以下命令添加。可将 `-l app.kubernetes.io/name=kubeblocks` 替换为实际的过滤名称。 + + ```bash + kubectl annotate addons.extensions.kubeblocks.io -l app.kubernetes.io/name=kubeblocks helm.sh/resource-policy=keep + ``` + +2. 删除不兼容的 OpsDefinition。 + + ```bash + kubectl delete opsdefinitions.apps.kubeblocks.io kafka-quota kafka-topic kafka-user-acl switchover + ``` + +3. 安装 CRD。 + + 为减小 Helm Chart 体积,KubeBlocks v0.8 从 Helm Chart 中移除了 CRD 并更改了 StorageProvider 的分组。升级前需先安装 StorageProvider CRD。 + + 若网络较慢,建议先下载 CRD YAML 文件到本地再操作。 + + ```bash + kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v0.9.3/dataprotection.kubeblocks.io_storageproviders.yaml + ``` + +4. 升级 KubeBlocks。 + + 升级前需注意以下选项: + + - 设置 `admissionWebhooks.enabled=true` 启用 webhook,支持 ConfigConstraint API 的多版本转换 + - 设置 `admissionWebhooks.ignoreReplicasCheck=true` 后,默认仅在 KubeBlocks 部署为 3 副本时启用 webhook。若仅部署单副本,可通过此配置绕过检查 + - 若当前运行的 KubeBlocks 使用 `infracreate-registry` 开头的镜像仓库,建议在升级时显式配置镜像仓库 + + ```bash + helm repo add kubeblocks https://apecloud.github.io/helm-charts + + helm repo update kubeblocks + + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.3 \ + --set admissionWebhooks.enabled=true \ + --set admissionWebhooks.ignoreReplicasCheck=true + ``` + + :::warning + + 为避免影响现有数据库集群,升级到 KubeBlocks v0.9.3 时默认不会升级已安装 Addon 的版本。如需将 Addon 升级至 KubeBlocks v0.9.3 内置版本,请执行以下命令。注意此操作可能导致现有集群重启并影响可用性,请谨慎操作。 + + ```bash + helm -n kb-system upgrade kubeblocks kubeblocks/kubeblocks --version 0.9.3 \ + --set upgradeAddons=true \ + --set admissionWebhooks.enabled=true \ + --set admissionWebhooks.ignoreReplicasCheck=true + ``` + + ::: + + + + + +1. 下载 kbcli v0.9.3。 + + ```bash + curl -fsSL https://kubeblocks.io/installer/install_cli.sh | bash -s 0.9.3 + ``` + +2. 升级 KubeBlocks。 + + 检查 kbcli 版本确保使用 v0.9.3: + + ```bash + kbcli version + ``` + + 升级前需注意以下选项: + + - 设置 `admissionWebhooks.enabled=true` 启用 webhook,支持 ConfigConstraint API 的多版本转换 + - 设置 `admissionWebhooks.ignoreReplicasCheck=true` 后,默认仅在 KubeBlocks 部署为 3 副本时启用 webhook。若仅部署单副本,可通过此配置绕过检查 + - 若当前运行的 KubeBlocks 使用 `infracreate-registry` 开头的镜像仓库,建议在升级时显式配置镜像仓库 + + ```bash + kbcli kb upgrade --version 0.9.3 \ + --set admissionWebhooks.enabled=true \ + --set admissionWebhooks.ignoreReplicasCheck=true + ``` + + :::warning + + 为避免影响现有数据库集群,升级到 KubeBlocks v0.9.3 时默认不会升级已安装 Addon 的版本。如需将 Addon 升级至 KubeBlocks v0.9.3 内置版本,请执行以下命令。注意此操作可能导致现有集群重启并影响可用性,请谨慎操作。 + + ```bash + kbcli kb upgrade --version 0.9.3 \ + --set upgradeAddons=true \ + --set admissionWebhooks.enabled=true \ + --set admissionWebhooks.ignoreReplicasCheck=true + ``` + + ::: + + `kbcli` 会自动添加 `"helm.sh/resource-policy": "keep"` 注解确保升级时不删除现有 Addon。 + + + + + +## 升级插件 + +如果您未将 `upgradeAddons` 设置为 `true` 或您的插件未包含在默认安装的插件列表中,可以通过运行以下提供的命令来升级插件,以使用 v0.9.x 版本的 API。 + +:::note + +- 如果您要升级的插件是 `mysql`,则需要升级该插件并重启集群。否则,在 KubeBlocks v0.8.x 中创建的集群将无法在 v0.9.x 版本中使用。 + +- 如果您要使用的插件是 `clickhouse/milvus/elasticsearch/llm`,则需要先升级 KubeBlocks,然后再升级该插件。否则,这些插件将无法在 KubeBlocks v0.9.x 中正常使用。 + +::: + + + + + +```bash +# 添加 Helm 仓库 +helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts + +# 如果无法访问 github 或网络非常缓慢,请使用以下仓库替代 +helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable + +# 更新 Helm 仓库 +helm repo update + +# 搜索可用的插件版本 +helm search repo kubeblocks-addons/{addon-name} --versions --devel + +# 更新插件版本 +helm upgrade -i {addon-release-name} kubeblocks-addons/{addon-name} --version x.y.z -n kb-system +``` + + + + + +```bash +# 查看插件索引列表 +kbcli addon index list + +# 更新一个索引,默认索引为 kubeblocks +kbcli addon index update kubeblocks + +# 搜索可用的插件版本 +kbcli addon search {addon-name} + +# 安装一个插件 +kbcli addon install {addon-name} --version x.y.z + +# 将插件升级到指定版本 +kbcli addon upgrade {addon-name} --version x.y.z + +# 强制升级到指定版本 +kbcli addon upgrade {addon-name} --version x.y.z --force + +# 查看可用的插件版本 +kbcli addon list | grep {addon-name} +``` + + + + \ No newline at end of file diff --git a/scripts/python/transalate_mdx.py b/scripts/python/transalate_mdx.py index 485f3daa..c2ea6502 100644 --- a/scripts/python/transalate_mdx.py +++ b/scripts/python/transalate_mdx.py @@ -263,22 +263,17 @@ def get_translation_prompt(self, is_frontmatter: bool = False) -> str: return f"""You are a professional technical documentation translation assistant. Please translate the following {source_name} text to {dest_name} with these requirements: 1. This is frontmatter metadata, so keep it concise and clear 2. Maintain technical term accuracy -3. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name} -4. The result should be natural and fluent {dest_name} -5. Do not add any extra formatting or markdown{glossary_prompt} +3. The result should be natural and fluent {dest_name} +4. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name}. If you don't know the translation, use the given glossary{glossary_prompt} """ else: return f"""You are a professional technical documentation translation assistant. Please translate the following {source_name} technical documentation to {dest_name} with these requirements: 1. Maintain technical term accuracy -2. Keep Markdown formatting unchanged, including line breaks and spacing -3. Do not translate content in import statements -4. Do not touch the code block content -5. Do not translate links or images -6. Preserve all formatting, spacing, and line breaks exactly as in the original -7. Translation should be natural and fluent, conforming to {dest_name} expression habits -8. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name} and add {dest_name} explanations on first occurrence{glossary_prompt} - -IMPORTANT: Preserve exact spacing and line breaks between text and code blocks.""" +2. Keep Markdown structure unchanged +3. Do not translate content in import statements, code blocks, links, images, etc. +4. Translation should be natural and fluent, conforming to {dest_name} expression habits +5. For technical terms, use standard {dest_name} translations if available, otherwise keep {source_name}. If you don't know the translation, use the given glossary{glossary_prompt} +""" def translate_frontmatter_field(self, text: str) -> str: """