From 352296c6cd1f5437a9a4fc6b882411fab9e5ed42 Mon Sep 17 00:00:00 2001 From: Yunus M Date: Tue, 5 Nov 2024 22:13:12 +0530 Subject: [PATCH 01/30] fix: initialize target to 3 in anomaly detection alert (#6362) --- frontend/src/container/CreateAlertRule/defaults.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/container/CreateAlertRule/defaults.ts b/frontend/src/container/CreateAlertRule/defaults.ts index 44dee01d31..34058a06f6 100644 --- a/frontend/src/container/CreateAlertRule/defaults.ts +++ b/frontend/src/container/CreateAlertRule/defaults.ts @@ -94,6 +94,7 @@ export const anamolyAlertDefaults: AlertDef = { matchType: defaultMatchType, algorithm: defaultAlgorithm, seasonality: defaultSeasonality, + target: 3, }, labels: { severity: 'warning', From 7086470ce2992f3397b90e611e62202a07e60e20 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:23:51 +0530 Subject: [PATCH 02/30] feat: added healthcheck and attribute checklist component for Kafka (#6371) * feat: added healthcheck and attribute checklist component for Kafka * feat: corrected the onboardingapi payload * feat: added missing configuration button at overview and onboarding flow --- .../onboarding/getOnboardingStatus.ts | 6 +- ...ot-kubernetes-runApplication-producers.md} | 0 .../ConnectionStatus/ConnectionStatus.tsx | 52 +++-- .../Steps/DataSource/DataSource.tsx | 12 +- .../constants/apmDocFilePaths.ts | 4 +- .../ onboarding/useOnboardingStatus.tsx | 11 +- .../AttributeCheckList.tsx | 206 ++++++++++++++++++ .../MessagingQueueHealthCheck.styles.scss | 165 ++++++++++++++ .../MessagingQueueHealthCheck.tsx | 133 +++++++++++ .../MessagingQueues.styles.scss | 30 ++- .../pages/MessagingQueues/MessagingQueues.tsx | 19 +- .../MessagingQueues/MessagingQueuesUtils.ts | 6 + 12 files changed, 603 insertions(+), 41 deletions(-) rename frontend/src/container/OnboardingContainer/Modules/APM/Java/md-docs/SpringBoot/Kubernetes/{springBoot-kubernetes-runApplication-producer.md => springBoot-kubernetes-runApplication-producers.md} (100%) create mode 100644 frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx create mode 100644 frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss create mode 100644 frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.tsx diff --git a/frontend/src/api/messagingQueues/onboarding/getOnboardingStatus.ts b/frontend/src/api/messagingQueues/onboarding/getOnboardingStatus.ts index b5da83aa13..da82e70134 100644 --- a/frontend/src/api/messagingQueues/onboarding/getOnboardingStatus.ts +++ b/frontend/src/api/messagingQueues/onboarding/getOnboardingStatus.ts @@ -16,11 +16,13 @@ export interface OnboardingStatusResponse { const getOnboardingStatus = async (props: { start: number; end: number; + endpointService?: string; }): Promise | ErrorResponse> => { + const { endpointService, ...rest } = props; try { const response = await ApiBaseInstance.post( - '/messaging-queues/kafka/onboarding/consumers', - props, + `/messaging-queues/kafka/onboarding/${endpointService || 'consumers'}`, + rest, ); return { diff --git a/frontend/src/container/OnboardingContainer/Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producer.md b/frontend/src/container/OnboardingContainer/Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producers.md similarity index 100% rename from frontend/src/container/OnboardingContainer/Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producer.md rename to frontend/src/container/OnboardingContainer/Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producers.md diff --git a/frontend/src/container/OnboardingContainer/Steps/ConnectionStatus/ConnectionStatus.tsx b/frontend/src/container/OnboardingContainer/Steps/ConnectionStatus/ConnectionStatus.tsx index 97238b6553..0fc81c0533 100644 --- a/frontend/src/container/OnboardingContainer/Steps/ConnectionStatus/ConnectionStatus.tsx +++ b/frontend/src/container/OnboardingContainer/Steps/ConnectionStatus/ConnectionStatus.tsx @@ -14,6 +14,7 @@ import { useQueryService } from 'hooks/useQueryService'; import useResourceAttribute from 'hooks/useResourceAttribute'; import { convertRawQueriesToTraceSelectedTags } from 'hooks/useResourceAttribute/utils'; import useUrlQuery from 'hooks/useUrlQuery'; +import MessagingQueueHealthCheck from 'pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck'; import { getAttributeDataFromOnboardingStatus } from 'pages/MessagingQueues/MessagingQueuesUtils'; import { useEffect, useMemo, useState } from 'react'; import { useDispatch, useSelector } from 'react-redux'; @@ -33,6 +34,9 @@ export default function ConnectionStatus(): JSX.Element { const urlQuery = useUrlQuery(); const getStartedSource = urlQuery.get(QueryParams.getStartedSource); + const getStartedSourceService = urlQuery.get( + QueryParams.getStartedSourceService, + ); const { serviceName, @@ -74,10 +78,14 @@ export default function ConnectionStatus(): JSX.Element { data: onbData, error: onbErr, isFetching: onbFetching, - } = useOnboardingStatus({ - enabled: getStartedSource === 'kafka', - refetchInterval: pollInterval, - }); + } = useOnboardingStatus( + { + enabled: getStartedSource === 'kafka', + refetchInterval: pollInterval, + }, + getStartedSourceService || '', + 'query-key-onboarding-status', + ); const [ shouldRetryOnboardingCall, @@ -326,18 +334,30 @@ export default function ConnectionStatus(): JSX.Element {
{isQueryServiceLoading && } - {!isQueryServiceLoading && isReceivingData && ( - <> - - Success - - )} - {!isQueryServiceLoading && !isReceivingData && ( - <> - - Failed - - )} + {!isQueryServiceLoading && + isReceivingData && + (getStartedSource !== 'kafka' ? ( + <> + + Success + + ) : ( + + ))} + {!isQueryServiceLoading && + !isReceivingData && + (getStartedSource !== 'kafka' ? ( + <> + + Failed + + ) : ( + + ))}
diff --git a/frontend/src/container/OnboardingContainer/Steps/DataSource/DataSource.tsx b/frontend/src/container/OnboardingContainer/Steps/DataSource/DataSource.tsx index f31fad1aa2..0936c4754d 100644 --- a/frontend/src/container/OnboardingContainer/Steps/DataSource/DataSource.tsx +++ b/frontend/src/container/OnboardingContainer/Steps/DataSource/DataSource.tsx @@ -9,7 +9,10 @@ import cx from 'classnames'; import { QueryParams } from 'constants/query'; import ROUTES from 'constants/routes'; import { useOnboardingContext } from 'container/OnboardingContainer/context/OnboardingContext'; -import { useCases } from 'container/OnboardingContainer/OnboardingContainer'; +import { + ModulesMap, + useCases, +} from 'container/OnboardingContainer/OnboardingContainer'; import { getDataSources, getSupportedFrameworks, @@ -49,6 +52,9 @@ export default function DataSource(): JSX.Element { updateSelectedFramework, } = useOnboardingContext(); + const isKafkaAPM = + getStartedSource === 'kafka' && selectedModule?.id === ModulesMap.APM; + const [supportedDataSources, setSupportedDataSources] = useState< DataSourceType[] >([]); @@ -155,14 +161,14 @@ export default function DataSource(): JSX.Element { className={cx( 'supported-language', selectedDataSource?.name === dataSource.name ? 'selected' : '', - getStartedSource === 'kafka' && + isKafkaAPM && !messagingQueueKakfaSupportedDataSources.includes(dataSource?.id || '') ? 'disabled' : '', )} key={dataSource.name} onClick={(): void => { - if (getStartedSource !== 'kafka') { + if (!isKafkaAPM) { updateSelectedFramework(null); updateSelectedEnvironment(null); updateSelectedDataSource(dataSource); diff --git a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts index df5e296722..1e6909fca7 100644 --- a/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts +++ b/frontend/src/container/OnboardingContainer/constants/apmDocFilePaths.ts @@ -252,7 +252,7 @@ import APM_java_springBoot_docker_recommendedSteps_runApplication from '../Modul import APM_java_springBoot_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-installOtelCollector.md'; import APM_java_springBoot_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-instrumentApplication.md'; import APM_java_springBoot_kubernetes_recommendedSteps_runApplication from '../Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication.md'; -import APM_java_springBoot_kubernetes_recommendedSteps_runApplication_producer from '../Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producer.md'; +import APM_java_springBoot_kubernetes_recommendedSteps_runApplication_producers from '../Modules/APM/Java/md-docs/SpringBoot/Kubernetes/springBoot-kubernetes-runApplication-producers.md'; // SpringBoot-LinuxAMD64-quickstart import APM_java_springBoot_linuxAMD64_quickStart_instrumentApplication from '../Modules/APM/Java/md-docs/SpringBoot/LinuxAMD64/QuickStart/springBoot-linuxamd64-quickStart-instrumentApplication.md'; import APM_java_springBoot_linuxAMD64_quickStart_runApplication from '../Modules/APM/Java/md-docs/SpringBoot/LinuxAMD64/QuickStart/springBoot-linuxamd64-quickStart-runApplication.md'; @@ -1054,7 +1054,7 @@ export const ApmDocFilePaths = { APM_java_springBoot_kubernetes_recommendedSteps_setupOtelCollector, APM_java_springBoot_kubernetes_recommendedSteps_instrumentApplication, APM_java_springBoot_kubernetes_recommendedSteps_runApplication, - APM_java_springBoot_kubernetes_recommendedSteps_runApplication_producer, + APM_java_springBoot_kubernetes_recommendedSteps_runApplication_producers, // SpringBoot-LinuxAMD64-recommended APM_java_springBoot_linuxAMD64_recommendedSteps_setupOtelCollector, diff --git a/frontend/src/hooks/messagingQueue / onboarding/useOnboardingStatus.tsx b/frontend/src/hooks/messagingQueue / onboarding/useOnboardingStatus.tsx index 897b0d7e33..13ecd15b8b 100644 --- a/frontend/src/hooks/messagingQueue / onboarding/useOnboardingStatus.tsx +++ b/frontend/src/hooks/messagingQueue / onboarding/useOnboardingStatus.tsx @@ -8,15 +8,22 @@ type UseOnboardingStatus = ( options?: UseQueryOptions< SuccessResponse | ErrorResponse >, + endpointService?: string, + queryKey?: string, ) => UseQueryResult | ErrorResponse>; -export const useOnboardingStatus: UseOnboardingStatus = (options) => +export const useOnboardingStatus: UseOnboardingStatus = ( + options, + endpointService, + queryKey, +) => useQuery | ErrorResponse>({ - queryKey: ['onboardingStatus'], + queryKey: [queryKey || `onboardingStatus-${endpointService}`], queryFn: () => getOnboardingStatus({ start: (Date.now() - 15 * 60 * 1000) * 1_000_000, end: Date.now() * 1_000_000, + endpointService, }), ...options, }); diff --git a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx new file mode 100644 index 0000000000..88e2147a0e --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx @@ -0,0 +1,206 @@ +import './MessagingQueueHealthCheck.styles.scss'; + +import { CaretDownOutlined, LoadingOutlined } from '@ant-design/icons'; +import { + Modal, + Select, + Spin, + Tooltip, + Tree, + TreeDataNode, + Typography, +} from 'antd'; +import { OnboardingStatusResponse } from 'api/messagingQueues/onboarding/getOnboardingStatus'; +import { Bolt, Check, OctagonAlert, X } from 'lucide-react'; +import { ReactNode, useEffect, useState } from 'react'; +import { v4 as uuid } from 'uuid'; + +interface AttributeCheckListProps { + visible: boolean; + onClose: () => void; + onboardingStatusResponses: { + title: string; + data: OnboardingStatusResponse['data']; + errorMsg?: string; + }[]; + loading: boolean; +} + +export enum AttributesFilters { + ALL = 'all', + SUCCESS = 'success', + ERROR = 'error', +} + +function ErrorTitleAndKey({ + title, + errorMsg, + isLeaf, +}: { + title: string; + errorMsg?: string; + isLeaf?: boolean; +}): TreeDataNode { + return { + key: `${title}-key-${uuid()}`, + title: ( +
+ + {title} + + +
+ + Fix +
+
+
+ ), + isLeaf, + }; +} + +function AttributeLabels({ title }: { title: ReactNode }): JSX.Element { + return ( +
+ + {title} +
+ ); +} + +function treeTitleAndKey({ + title, + isLeaf, +}: { + title: string; + isLeaf?: boolean; +}): TreeDataNode { + return { + key: `${title}-key-${uuid()}`, + title: ( +
+ + {title} + + {isLeaf && ( +
+ + + +
+ )} +
+ ), + isLeaf, + }; +} + +function generateTreeDataNodes( + response: OnboardingStatusResponse['data'], +): TreeDataNode[] { + return response + .map((item) => { + if (item.attribute) { + if (item.status === '1') { + return treeTitleAndKey({ title: item.attribute, isLeaf: true }); + } + if (item.status === '0') { + return ErrorTitleAndKey({ + title: item.attribute, + errorMsg: item.error_message || '', + }); + } + } + return null; + }) + .filter(Boolean) as TreeDataNode[]; +} + +function AttributeCheckList({ + visible, + onClose, + onboardingStatusResponses, + loading, +}: AttributeCheckListProps): JSX.Element { + const [filter, setFilter] = useState(AttributesFilters.ALL); + const [treeData, setTreeData] = useState([]); + + const handleFilterChange = (value: AttributesFilters): void => { + setFilter(value); + }; + + useEffect(() => { + const filteredData = onboardingStatusResponses.map((response) => { + if (response.errorMsg) { + return ErrorTitleAndKey({ + title: response.title, + errorMsg: response.errorMsg, + isLeaf: true, + }); + } + let filteredData = response.data; + + if (filter === AttributesFilters.SUCCESS) { + filteredData = response.data.filter((item) => item.status === '1'); + } else if (filter === AttributesFilters.ERROR) { + filteredData = response.data.filter((item) => item.status === '0'); + } + + return { + ...treeTitleAndKey({ title: response.title }), + children: generateTreeDataNodes(filteredData), + }; + }); + + setTreeData(filteredData); + }, [filter, onboardingStatusResponses]); + + return ( + } + > + {loading ? ( +
+ } size="large" /> +
+ ) : ( +
+ + + ); +} + +function EvaluationTimeSelector({ + setInterval, +}: { + setInterval: Dispatch>; +}): JSX.Element { + const [inputValue, setInputValue] = useState(''); + const [selectedInterval, setSelectedInterval] = useState('5ms'); + const [dropdownOpen, setDropdownOpen] = useState(false); + + const handleInputChange = (e: React.ChangeEvent): void => { + setInputValue(e.target.value); + }; + + const handleSelectChange = (value: string): void => { + setSelectedInterval(value); + setInputValue(''); + setDropdownOpen(false); + }; + + const handleAddCustomValue = (): void => { + setSelectedInterval(inputValue); + setInputValue(inputValue); + setDropdownOpen(false); + }; + + const handleKeyDown = (e: React.KeyboardEvent): void => { + if (e.key === 'Enter') { + e.preventDefault(); + e.stopPropagation(); + handleAddCustomValue(); + } + }; + + const renderDropdown = (menu: React.ReactNode): JSX.Element => ( + + ); + + useEffect(() => { + if (selectedInterval) { + setInterval(() => selectedInterval); + } + }, [selectedInterval, setInterval]); + + return ( +
+ + Evaluation Interval: + + +
+ ); +} + +export default EvaluationTimeSelector; diff --git a/frontend/src/pages/MessagingQueues/MQDetails/DropRateView/dropRateViewUtils.ts b/frontend/src/pages/MessagingQueues/MQDetails/DropRateView/dropRateViewUtils.ts new file mode 100644 index 0000000000..49d751e722 --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/DropRateView/dropRateViewUtils.ts @@ -0,0 +1,46 @@ +export function convertToMilliseconds(timeInput: string): number { + if (!timeInput.trim()) { + return 0; + } + + const match = timeInput.match(/^(\d+)(ms|s|ns)?$/); // Match number and optional unit + if (!match) { + throw new Error(`Invalid time format: ${timeInput}`); + } + + const value = parseInt(match[1], 10); + const unit = match[2] || 'ms'; // Default to 'ms' if no unit is provided + + switch (unit) { + case 's': + return value * 1e3; + case 'ms': + return value; + case 'ns': + return value / 1e6; + default: + throw new Error('Invalid time format'); + } +} + +export interface DropRateResponse { + timestamp: string; + data: { + breach_percentage: number; + breached_spans: number; + consumer_service: string; + producer_service: string; + top_traceIDs: string[]; + total_spans: number; + }; +} +export interface DropRateAPIResponse { + status: string; + data: { + resultType: string; + result: { + queryName: string; + list: DropRateResponse[]; + }[]; + }; +} diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.style.scss b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.style.scss index c4995a1812..5a746bbcae 100644 --- a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.style.scss +++ b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.style.scss @@ -17,13 +17,20 @@ background: var(--bg-ink-500); .mq-overview-title { - color: var(--bg-vanilla-200); + display: flex; + justify-content: space-between; + align-items: center; + width: 100%; - font-family: Inter; - font-size: 18px; - font-style: normal; - font-weight: 500; - line-height: 28px; + .drop-rat-title { + color: var(--bg-vanilla-200); + + font-family: Inter; + font-size: 18px; + font-style: normal; + font-weight: 500; + line-height: 28px; + } } .mq-details-options { @@ -43,3 +50,69 @@ } } } + +.droprate-view { + .mq-table { + width: 100%; + + .ant-table-content { + border-radius: 6px; + border: 1px solid var(--bg-slate-500); + box-shadow: 0px 4px 12px 0px rgba(0, 0, 0, 0.1); + } + + .ant-table-tbody { + .ant-table-cell { + max-width: 250px; + border-bottom: none; + } + } + + .ant-table-thead { + .ant-table-cell { + background-color: var(--bg-ink-500); + border-bottom: 1px solid var(--bg-slate-500); + } + } + } + + .trace-id-list { + display: flex; + flex-direction: column; + gap: 4px; + width: max-content; + + .traceid-style { + display: flex; + gap: 8px; + align-items: center; + + .traceid-text { + border-radius: 2px; + border: 1px solid var(--bg-slate-400); + background: var(--bg-slate-400); + padding: 2px; + cursor: pointer; + } + + .remaing-count { + cursor: pointer; + color: var(--bg-vanilla-100); + font-family: Inter; + font-size: 12px; + font-style: normal; + font-weight: 400; + line-height: normal; + letter-spacing: -0.06px; + } + } + } +} + +.pagination-left { + &.mq-table { + .ant-pagination { + justify-content: flex-start; + } + } +} diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx index 34ab160553..e1e1791f32 100644 --- a/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx +++ b/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx @@ -249,7 +249,7 @@ function MessagingQueuesTable({ , -): Promise< - SuccessResponse | ErrorResponse -> => { +): Promise | ErrorResponse> => { const { start, end, evalTime } = props; const response = await axios.post(`messaging-queues/kafka/span/evaluation`, { start, From fdc54a62a90348143629b3376da0a9bd60a6c3ad Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Thu, 7 Nov 2024 23:49:47 +0530 Subject: [PATCH 13/30] fix: kafka - misc fix and features (#6379) * feat: fixed multiple fixes and chores in kafka 2.0 * feat: fixed producer latency - producer-detail call * feat: fixed mq-detail page layout and pagination * feat: resolved comments --- .../MessagingQueues/MQDetails/MQDetails.tsx | 7 +- .../MQDetails/MQTables/MQTables.tsx | 6 +- .../AttributeCheckList.tsx | 68 ++++++++++++++++++- .../MessagingQueueHealthCheck.styles.scss | 3 + .../MessagingQueues.styles.scss | 28 +++----- .../pages/MessagingQueues/MessagingQueues.tsx | 4 +- 6 files changed, 87 insertions(+), 29 deletions(-) diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx index 40a943172f..3609b2d226 100644 --- a/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx +++ b/frontend/src/pages/MessagingQueues/MQDetails/MQDetails.tsx @@ -116,12 +116,7 @@ const checkValidityOfDetailConfigs = ( return false; } - if (currentTab === MessagingQueueServiceDetailType.ConsumerDetails) { - return Boolean(configDetails?.topic && configDetails?.partition); - } - return Boolean( - configDetails?.group && configDetails?.topic && configDetails?.partition, - ); + return Boolean(configDetails?.topic && configDetails?.partition); } if (selectedView === MessagingQueuesViewType.producerLatency.value) { diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx index e1e1791f32..73fd1b2f41 100644 --- a/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx +++ b/frontend/src/pages/MessagingQueues/MQDetails/MQTables/MQTables.tsx @@ -33,6 +33,8 @@ import { MessagingQueuesPayloadProps, } from './getConsumerLagDetails'; +const INITIAL_PAGE_SIZE = 10; + // eslint-disable-next-line sonarjs/cognitive-complexity export function getColumns( data: MessagingQueuesPayloadProps['payload'], @@ -155,8 +157,8 @@ function MessagingQueuesTable({ const paginationConfig = useMemo( () => - tableData?.length > 20 && { - pageSize: 20, + tableData?.length > INITIAL_PAGE_SIZE && { + pageSize: INITIAL_PAGE_SIZE, showTotal: showPaginationItem, showSizeChanger: false, hideOnSinglePage: true, diff --git a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx index 88e2147a0e..08b2ce6cfa 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx +++ b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/AttributeCheckList.tsx @@ -1,3 +1,5 @@ +/* eslint-disable jsx-a11y/no-static-element-interactions */ +/* eslint-disable jsx-a11y/click-events-have-key-events */ import './MessagingQueueHealthCheck.styles.scss'; import { CaretDownOutlined, LoadingOutlined } from '@ant-design/icons'; @@ -11,10 +13,20 @@ import { Typography, } from 'antd'; import { OnboardingStatusResponse } from 'api/messagingQueues/onboarding/getOnboardingStatus'; +import { QueryParams } from 'constants/query'; +import ROUTES from 'constants/routes'; +import { History } from 'history'; import { Bolt, Check, OctagonAlert, X } from 'lucide-react'; import { ReactNode, useEffect, useState } from 'react'; +import { useHistory } from 'react-router-dom'; +import { isCloudUser } from 'utils/app'; import { v4 as uuid } from 'uuid'; +import { + KAFKA_SETUP_DOC_LINK, + MessagingQueueHealthCheckService, +} from '../MessagingQueuesUtils'; + interface AttributeCheckListProps { visible: boolean; onClose: () => void; @@ -34,13 +46,42 @@ export enum AttributesFilters { function ErrorTitleAndKey({ title, + parentTitle, + history, + isCloudUserVal, errorMsg, isLeaf, }: { title: string; + parentTitle: string; + isCloudUserVal: boolean; + history: History; errorMsg?: string; isLeaf?: boolean; }): TreeDataNode { + const handleRedirection = (): void => { + let link = ''; + + switch (parentTitle) { + case 'Consumers': + link = `${ROUTES.GET_STARTED_APPLICATION_MONITORING}?${QueryParams.getStartedSource}=kafka&${QueryParams.getStartedSourceService}=${MessagingQueueHealthCheckService.Consumers}`; + break; + case 'Producers': + link = `${ROUTES.GET_STARTED_APPLICATION_MONITORING}?${QueryParams.getStartedSource}=kafka&${QueryParams.getStartedSourceService}=${MessagingQueueHealthCheckService.Producers}`; + break; + case 'Kafka': + link = `${ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING}?${QueryParams.getStartedSource}=kafka&${QueryParams.getStartedSourceService}=${MessagingQueueHealthCheckService.Kafka}`; + break; + default: + link = ''; + } + + if (isCloudUserVal && !!link) { + history.push(link); + } else { + window.open(KAFKA_SETUP_DOC_LINK, '_blank'); + } + }; return { key: `${title}-key-${uuid()}`, title: ( @@ -49,7 +90,13 @@ function ErrorTitleAndKey({ {title} -
+
{ + e.preventDefault(); + handleRedirection(); + }} + > Fix
@@ -98,6 +145,9 @@ function treeTitleAndKey({ function generateTreeDataNodes( response: OnboardingStatusResponse['data'], + parentTitle: string, + isCloudUserVal: boolean, + history: History, ): TreeDataNode[] { return response .map((item) => { @@ -109,6 +159,9 @@ function generateTreeDataNodes( return ErrorTitleAndKey({ title: item.attribute, errorMsg: item.error_message || '', + parentTitle, + history, + isCloudUserVal, }); } } @@ -129,6 +182,8 @@ function AttributeCheckList({ const handleFilterChange = (value: AttributesFilters): void => { setFilter(value); }; + const isCloudUserVal = isCloudUser(); + const history = useHistory(); useEffect(() => { const filteredData = onboardingStatusResponses.map((response) => { @@ -137,6 +192,9 @@ function AttributeCheckList({ title: response.title, errorMsg: response.errorMsg, isLeaf: true, + parentTitle: response.title, + history, + isCloudUserVal, }); } let filteredData = response.data; @@ -149,11 +207,17 @@ function AttributeCheckList({ return { ...treeTitleAndKey({ title: response.title }), - children: generateTreeDataNodes(filteredData), + children: generateTreeDataNodes( + filteredData, + response.title, + isCloudUserVal, + history, + ), }; }); setTreeData(filteredData); + // eslint-disable-next-line react-hooks/exhaustive-deps }, [filter, onboardingStatusResponses]); return ( diff --git a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss index 00b3ad8df8..22a1bed584 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss +++ b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss @@ -68,6 +68,8 @@ .ant-tree { .ant-tree-title { + cursor: default; + .attribute-error-title { display: flex; align-items: center; @@ -88,6 +90,7 @@ font-style: normal; font-weight: 400; line-height: 16px; + cursor: pointer; } } diff --git a/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss b/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss index bcfd62c773..9959bebe26 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss +++ b/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss @@ -45,28 +45,22 @@ border-bottom: 1px solid var(--bg-slate-500); - .header-content { + .header-config { display: flex; gap: 12px; align-items: center; - .header-config { - display: flex; - gap: 10px; - align-items: center; + .messaging-queue-options { + .ant-select-selector { + display: flex; + height: 32px; + padding: 6px 6px 6px 8px; + align-items: center; + gap: 4px; - .messaging-queue-options { - .ant-select-selector { - display: flex; - height: 32px; - padding: 6px 6px 6px 8px; - align-items: center; - gap: 4px; - - border-radius: 2px; - border: 1px solid var(--bg-slate-400); - background: var(--bg-ink-300); - } + border-radius: 2px; + border: 1px solid var(--bg-slate-400); + background: var(--bg-ink-300); } } } diff --git a/frontend/src/pages/MessagingQueues/MessagingQueues.tsx b/frontend/src/pages/MessagingQueues/MessagingQueues.tsx index 9e2c630bb0..34063fc3b8 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueues.tsx +++ b/frontend/src/pages/MessagingQueues/MessagingQueues.tsx @@ -60,8 +60,8 @@ function MessagingQueues(): JSX.Element { {t('breadcrumb')}
-
-
{t('header')}
+
+ {t('header')} / Date: Fri, 8 Nov 2024 12:22:39 +0530 Subject: [PATCH 14/30] chore: update events for onboarding part 2 (#6397) --- .../AboutSigNozQuestions.tsx | 2 +- .../InviteTeamMembers/InviteTeamMembers.tsx | 53 +++++++++------- .../OptimiseSignozNeeds.tsx | 8 +-- .../OrgQuestions/OrgQuestions.tsx | 18 +++++- .../OnboardingQuestionaire/index.tsx | 62 +++++++++++++++++-- 5 files changed, 107 insertions(+), 36 deletions(-) diff --git a/frontend/src/container/OnboardingQuestionaire/AboutSigNozQuestions/AboutSigNozQuestions.tsx b/frontend/src/container/OnboardingQuestionaire/AboutSigNozQuestions/AboutSigNozQuestions.tsx index ee7606ff3f..1c061803be 100644 --- a/frontend/src/container/OnboardingQuestionaire/AboutSigNozQuestions/AboutSigNozQuestions.tsx +++ b/frontend/src/container/OnboardingQuestionaire/AboutSigNozQuestions/AboutSigNozQuestions.tsx @@ -82,7 +82,7 @@ export function AboutSigNozQuestions({ otherInterestInSignoz, }); - logEvent('User Onboarding: About SigNoz Questions Answered', { + logEvent('Org Onboarding: Answered', { hearAboutSignoz, otherAboutSignoz, interestInSignoz, diff --git a/frontend/src/container/OnboardingQuestionaire/InviteTeamMembers/InviteTeamMembers.tsx b/frontend/src/container/OnboardingQuestionaire/InviteTeamMembers/InviteTeamMembers.tsx index fef689de3a..def1cf979d 100644 --- a/frontend/src/container/OnboardingQuestionaire/InviteTeamMembers/InviteTeamMembers.tsx +++ b/frontend/src/container/OnboardingQuestionaire/InviteTeamMembers/InviteTeamMembers.tsx @@ -161,6 +161,13 @@ function InviteTeamMembers({ setInviteUsersSuccessResponse(successfulInvites); + logEvent('Org Onboarding: Invite Team Members Success', { + teamMembers: teamMembersToInvite, + totalInvites: inviteUsersResponse.summary.total_invites, + successfulInvites: inviteUsersResponse.summary.successful_invites, + failedInvites: inviteUsersResponse.summary.failed_invites, + }); + setTimeout(() => { setDisableNextButton(false); onNext(); @@ -172,6 +179,13 @@ function InviteTeamMembers({ setInviteUsersSuccessResponse(successfulInvites); + logEvent('Org Onboarding: Invite Team Members Partial Success', { + teamMembers: teamMembersToInvite, + totalInvites: inviteUsersResponse.summary.total_invites, + successfulInvites: inviteUsersResponse.summary.successful_invites, + failedInvites: inviteUsersResponse.summary.failed_invites, + }); + if (inviteUsersResponse.failed_invites.length > 0) { setHasErrors(true); @@ -182,27 +196,21 @@ function InviteTeamMembers({ } }; - const { - mutate: sendInvites, - isLoading: isSendingInvites, - data: inviteUsersApiResponseData, - } = useMutation(inviteUsers, { - onSuccess: (response: SuccessResponse): void => { - logEvent('User Onboarding: Invite Team Members Sent', { - teamMembers: teamMembersToInvite, - }); - - handleInviteUsersSuccess(response); + const { mutate: sendInvites, isLoading: isSendingInvites } = useMutation( + inviteUsers, + { + onSuccess: (response: SuccessResponse): void => { + handleInviteUsersSuccess(response); + }, + onError: (error: AxiosError): void => { + logEvent('Org Onboarding: Invite Team Members Failed', { + teamMembers: teamMembersToInvite, + }); + + handleError(error); + }, }, - onError: (error: AxiosError): void => { - logEvent('User Onboarding: Invite Team Members Failed', { - teamMembers: teamMembersToInvite, - error, - }); - - handleError(error); - }, - }); + ); const handleNext = (): void => { if (validateAllUsers()) { @@ -254,9 +262,8 @@ function InviteTeamMembers({ }; const handleDoLater = (): void => { - logEvent('User Onboarding: Invite Team Members Skipped', { - teamMembers: teamMembersToInvite, - apiResponse: inviteUsersApiResponseData, + logEvent('Org Onboarding: Clicked Do Later', { + currentPageID: 4, }); onNext(); diff --git a/frontend/src/container/OnboardingQuestionaire/OptimiseSignozNeeds/OptimiseSignozNeeds.tsx b/frontend/src/container/OnboardingQuestionaire/OptimiseSignozNeeds/OptimiseSignozNeeds.tsx index f1be6fb8ee..dc499c9308 100644 --- a/frontend/src/container/OnboardingQuestionaire/OptimiseSignozNeeds/OptimiseSignozNeeds.tsx +++ b/frontend/src/container/OnboardingQuestionaire/OptimiseSignozNeeds/OptimiseSignozNeeds.tsx @@ -122,7 +122,7 @@ function OptimiseSignozNeeds({ }, [services, hostsPerDay, logsPerDay]); const handleOnNext = (): void => { - logEvent('User Onboarding: Optimise SigNoz Needs Answered', { + logEvent('Org Onboarding: Answered', { logsPerDay, hostsPerDay, services, @@ -144,10 +144,8 @@ function OptimiseSignozNeeds({ onWillDoLater(); - logEvent('User Onboarding: Optimise SigNoz Needs Skipped', { - logsPerDay: 0, - hostsPerDay: 0, - services: 0, + logEvent('Org Onboarding: Clicked Do Later', { + currentPageID: 3, }); }; diff --git a/frontend/src/container/OnboardingQuestionaire/OrgQuestions/OrgQuestions.tsx b/frontend/src/container/OnboardingQuestionaire/OrgQuestions/OrgQuestions.tsx index e0376a6559..7569e0fa81 100644 --- a/frontend/src/container/OnboardingQuestionaire/OrgQuestions/OrgQuestions.tsx +++ b/frontend/src/container/OnboardingQuestionaire/OrgQuestions/OrgQuestions.tsx @@ -94,6 +94,13 @@ function OrgQuestions({ organisationName === '' || orgDetails.organisationName === organisationName ) { + logEvent('Org Onboarding: Answered', { + usesObservability, + observabilityTool, + otherTool, + familiarity, + }); + onNext({ organisationName, usesObservability, @@ -121,10 +128,17 @@ function OrgQuestions({ }, }); - logEvent('User Onboarding: Org Name Updated', { + logEvent('Org Onboarding: Org Name Updated', { organisationName: orgDetails.organisationName, }); + logEvent('Org Onboarding: Answered', { + usesObservability, + observabilityTool, + otherTool, + familiarity, + }); + onNext({ organisationName, usesObservability, @@ -133,7 +147,7 @@ function OrgQuestions({ familiarity, }); } else { - logEvent('User Onboarding: Org Name Update Failed', { + logEvent('Org Onboarding: Org Name Update Failed', { organisationName: orgDetails.organisationName, }); diff --git a/frontend/src/container/OnboardingQuestionaire/index.tsx b/frontend/src/container/OnboardingQuestionaire/index.tsx index 3b3ed59354..390ac00212 100644 --- a/frontend/src/container/OnboardingQuestionaire/index.tsx +++ b/frontend/src/container/OnboardingQuestionaire/index.tsx @@ -1,6 +1,7 @@ import './OnboardingQuestionaire.styles.scss'; import { NotificationInstance } from 'antd/es/notification/interface'; +import logEvent from 'api/common/logEvent'; import updateProfileAPI from 'api/onboarding/updateProfile'; import getAllOrgPreferences from 'api/preferences/getAllOrgPreferences'; import updateOrgPreferenceAPI from 'api/preferences/updateOrgPreference'; @@ -61,6 +62,10 @@ const INITIAL_OPTIMISE_SIGNOZ_DETAILS: OptimiseSignozDetails = { services: 0, }; +const BACK_BUTTON_EVENT_NAME = 'Org Onboarding: Back Button Clicked'; +const NEXT_BUTTON_EVENT_NAME = 'Org Onboarding: Next Button Clicked'; +const ONBOARDING_COMPLETE_EVENT_NAME = 'Org Onboarding: Complete'; + function OnboardingQuestionaire(): JSX.Element { const { notifications } = useNotifications(); const { org } = useSelector((state) => state.app); @@ -98,6 +103,13 @@ function OnboardingQuestionaire(): JSX.Element { // eslint-disable-next-line react-hooks/exhaustive-deps }, [org]); + useEffect(() => { + logEvent('Org Onboarding: Started', { + org_id: org?.[0]?.id, + }); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); + const { refetch: refetchOrgPreferences } = useQuery({ queryFn: () => getAllOrgPreferences(), queryKey: ['getOrgPreferences'], @@ -120,6 +132,8 @@ function OnboardingQuestionaire(): JSX.Element { setUpdatingOrgOnboardingStatus(false); + logEvent('Org Onboarding: Redirecting to Get Started', {}); + history.push(ROUTES.GET_STARTED); }, onError: () => { @@ -156,6 +170,11 @@ function OnboardingQuestionaire(): JSX.Element { }); const handleUpdateProfile = (): void => { + logEvent(NEXT_BUTTON_EVENT_NAME, { + currentPageID: 3, + nextPageID: 4, + }); + updateProfile({ familiarity_with_observability: orgDetails?.familiarity as string, has_existing_observability_tool: orgDetails?.usesObservability as boolean, @@ -180,6 +199,10 @@ function OnboardingQuestionaire(): JSX.Element { }; const handleOnboardingComplete = (): void => { + logEvent(ONBOARDING_COMPLETE_EVENT_NAME, { + currentPageID: 4, + }); + setUpdatingOrgOnboardingStatus(true); updateOrgPreference({ preferenceID: 'ORG_ONBOARDING', @@ -199,6 +222,11 @@ function OnboardingQuestionaire(): JSX.Element { currentOrgData={currentOrgData} orgDetails={orgDetails} onNext={(orgDetails: OrgDetails): void => { + logEvent(NEXT_BUTTON_EVENT_NAME, { + currentPageID: 1, + nextPageID: 2, + }); + setOrgDetails(orgDetails); setCurrentStep(2); }} @@ -209,8 +237,20 @@ function OnboardingQuestionaire(): JSX.Element { setCurrentStep(1)} - onNext={(): void => setCurrentStep(3)} + onBack={(): void => { + logEvent(BACK_BUTTON_EVENT_NAME, { + currentPageID: 2, + prevPageID: 1, + }); + setCurrentStep(1); + }} + onNext={(): void => { + logEvent(NEXT_BUTTON_EVENT_NAME, { + currentPageID: 2, + nextPageID: 3, + }); + setCurrentStep(3); + }} /> )} @@ -220,9 +260,15 @@ function OnboardingQuestionaire(): JSX.Element { isUpdatingProfile={isUpdatingProfile} optimiseSignozDetails={optimiseSignozDetails} setOptimiseSignozDetails={setOptimiseSignozDetails} - onBack={(): void => setCurrentStep(2)} + onBack={(): void => { + logEvent(BACK_BUTTON_EVENT_NAME, { + currentPageID: 3, + prevPageID: 2, + }); + setCurrentStep(2); + }} onNext={handleUpdateProfile} - onWillDoLater={(): void => setCurrentStep(4)} // This is temporary, only to skip gateway api call as it's not setup on staging yet + onWillDoLater={(): void => setCurrentStep(4)} /> )} @@ -231,7 +277,13 @@ function OnboardingQuestionaire(): JSX.Element { isLoading={updatingOrgOnboardingStatus} teamMembers={teamMembers} setTeamMembers={setTeamMembers} - onBack={(): void => setCurrentStep(3)} + onBack={(): void => { + logEvent(BACK_BUTTON_EVENT_NAME, { + currentPageID: 4, + prevPageID: 3, + }); + setCurrentStep(3); + }} onNext={handleOnboardingComplete} /> )} From 22c10f94795da2f37f702c747ef07334906859b5 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Fri, 8 Nov 2024 12:35:32 +0530 Subject: [PATCH 15/30] Issue 6367 (#6376) * fix: issue with orderby by materialized column * fix: tests * fix: order by issue in old explorer as well --- pkg/query-service/app/logs/v3/query_builder.go | 3 --- pkg/query-service/app/logs/v3/query_builder_test.go | 8 ++++---- pkg/query-service/app/logs/v4/query_builder.go | 3 --- pkg/query-service/app/logs/v4/query_builder_test.go | 10 ++++++---- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/pkg/query-service/app/logs/v3/query_builder.go b/pkg/query-service/app/logs/v3/query_builder.go index e0c88e3ac1..8f14fea99d 100644 --- a/pkg/query-service/app/logs/v3/query_builder.go +++ b/pkg/query-service/app/logs/v3/query_builder.go @@ -419,9 +419,6 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st } else if panelType == v3.PanelTypeList { attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} name := getClickhouseColumnName(attr) - if item.IsColumn { - name = "`" + name + "`" - } orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order)) } } diff --git a/pkg/query-service/app/logs/v3/query_builder_test.go b/pkg/query-service/app/logs/v3/query_builder_test.go index 3191820dbb..958a3fa93f 100644 --- a/pkg/query-service/app/logs/v3/query_builder_test.go +++ b/pkg/query-service/app/logs/v3/query_builder_test.go @@ -788,14 +788,14 @@ var testBuildLogsQueryData = []struct { AggregateOperator: v3.AggregateOperatorNoOp, Expression: "A", Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, - OrderBy: []v3.OrderBy{{ColumnName: "method", DataType: v3.AttributeKeyDataTypeString, Order: "ASC", IsColumn: true}}, + OrderBy: []v3.OrderBy{{ColumnName: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, Order: "ASC", IsColumn: true}}, }, ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string," + "CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," + "CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool," + "CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string," + "CAST((scope_string_key, scope_string_value), 'Map(String, String)') as scope " + - "from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by `method` ASC", + "from signoz_logs.distributed_logs where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) order by `attribute_string_method` ASC", }, { Name: "Test Noop with filter", @@ -1524,7 +1524,7 @@ var testPrepLogsQueryLimitOffsetData = []struct { PageSize: 5, }, TableName: "logs", - ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string,CAST((scope_string_key, scope_string_value), 'Map(String, String)') as scope from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by `timestamp` desc LIMIT 1", + ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string,CAST((scope_string_key, scope_string_value), 'Map(String, String)') as scope from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by timestamp desc LIMIT 1", }, { Name: "Test limit greater than pageSize - order by ts", @@ -1545,7 +1545,7 @@ var testPrepLogsQueryLimitOffsetData = []struct { PageSize: 10, }, TableName: "logs", - ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string,CAST((scope_string_key, scope_string_value), 'Map(String, String)') as scope from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by `timestamp` desc LIMIT 10", + ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string,CAST((scope_string_key, scope_string_value), 'Map(String, String)') as scope from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by timestamp desc LIMIT 10", }, { Name: "Test limit less than pageSize - order by custom", diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index 3952d0e7e1..42cb19befc 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -255,9 +255,6 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st } else if panelType == v3.PanelTypeList { attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} name := getClickhouseKey(attr) - if item.IsColumn { - name = "`" + name + "`" - } orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order)) } } diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go index cbc9a450f3..4ce3721e18 100644 --- a/pkg/query-service/app/logs/v4/query_builder_test.go +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -520,14 +520,16 @@ func Test_orderByAttributeKeyTags(t *testing.T) { { ColumnName: "bytes", Order: "asc", + IsColumn: true, + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, }, }, tags: []v3.AttributeKey{ {Key: "name"}, - {Key: "bytes"}, }, }, - want: "`name` asc,value asc,`bytes` asc", + want: "`name` asc,value asc,`attribute_string_bytes` asc", }, { name: "test 4", @@ -1016,7 +1018,7 @@ func TestPrepareLogsQuery(t *testing.T) { }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string from " + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + - "order by `timestamp` desc LIMIT 1", + "order by timestamp desc LIMIT 1", }, { name: "Test limit greater than pageSize - order by ts", @@ -1041,7 +1043,7 @@ func TestPrepareLogsQuery(t *testing.T) { }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string from " + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + - "AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by `timestamp` desc LIMIT 10", + "AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by timestamp desc LIMIT 10", }, { name: "Test limit less than pageSize - order by custom", From 831540eaf01fd3495ed7a3a5b5d22dcfe19a2539 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Fri, 8 Nov 2024 21:10:09 +0530 Subject: [PATCH 16/30] fix: test notification missing for anomaly alert (#6391) --- ee/query-service/app/server.go | 8 +- ee/query-service/rules/manager.go | 105 ++++++++++++++ pkg/query-service/model/response.go | 1 + pkg/query-service/rules/api_params.go | 10 -- pkg/query-service/rules/api_params_test.go | 18 +-- pkg/query-service/rules/db.go | 3 + pkg/query-service/rules/manager.go | 141 +++++++------------ pkg/query-service/rules/templates.go | 15 ++ pkg/query-service/rules/templates_test.go | 11 ++ pkg/query-service/rules/test_notification.go | 97 +++++++++++++ 10 files changed, 295 insertions(+), 114 deletions(-) create mode 100644 pkg/query-service/rules/test_notification.go diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index cf54693ba8..1c44338a77 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -31,7 +31,6 @@ import ( "go.signoz.io/signoz/ee/query-service/rules" baseauth "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/migrate" - "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" licensepkg "go.signoz.io/signoz/ee/query-service/license" @@ -348,7 +347,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler) (*http.Server, e } if user.User.OrgId == "" { - return nil, model.UnauthorizedError(errors.New("orgId is missing in the claims")) + return nil, basemodel.UnauthorizedError(errors.New("orgId is missing in the claims")) } return user, nil @@ -765,8 +764,9 @@ func makeRulesManager( Cache: cache, EvalDelay: baseconst.GetEvalDelay(), - PrepareTaskFunc: rules.PrepareTaskFunc, - UseLogsNewSchema: useLogsNewSchema, + PrepareTaskFunc: rules.PrepareTaskFunc, + PrepareTestRuleFunc: rules.TestNotification, + UseLogsNewSchema: useLogsNewSchema, } // create Manager diff --git a/ee/query-service/rules/manager.go b/ee/query-service/rules/manager.go index e44bbcf82b..9843d108d8 100644 --- a/ee/query-service/rules/manager.go +++ b/ee/query-service/rules/manager.go @@ -1,10 +1,15 @@ package rules import ( + "context" "fmt" "time" + "github.com/google/uuid" + basemodel "go.signoz.io/signoz/pkg/query-service/model" baserules "go.signoz.io/signoz/pkg/query-service/rules" + "go.signoz.io/signoz/pkg/query-service/utils/labels" + "go.uber.org/zap" ) func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) { @@ -79,6 +84,106 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) return task, nil } +// TestNotification prepares a dummy rule for given rule parameters and +// sends a test notification. returns alert count and error (if any) +func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.ApiError) { + + ctx := context.Background() + + if opts.Rule == nil { + return 0, basemodel.BadRequest(fmt.Errorf("rule is required")) + } + + parsedRule := opts.Rule + var alertname = parsedRule.AlertName + if alertname == "" { + // alertname is not mandatory for testing, so picking + // a random string here + alertname = uuid.New().String() + } + + // append name to indicate this is test alert + parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, baserules.TestAlertPostFix) + + var rule baserules.Rule + var err error + + if parsedRule.RuleType == baserules.RuleTypeThreshold { + + // add special labels for test alerts + parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target) + parsedRule.Labels[labels.RuleSourceLabel] = "" + parsedRule.Labels[labels.AlertRuleIdLabel] = "" + + // create a threshold rule + rule, err = baserules.NewThresholdRule( + alertname, + parsedRule, + opts.FF, + opts.Reader, + opts.UseLogsNewSchema, + baserules.WithSendAlways(), + baserules.WithSendUnmatched(), + ) + + if err != nil { + zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err)) + return 0, basemodel.BadRequest(err) + } + + } else if parsedRule.RuleType == baserules.RuleTypeProm { + + // create promql rule + rule, err = baserules.NewPromRule( + alertname, + parsedRule, + opts.Logger, + opts.Reader, + opts.ManagerOpts.PqlEngine, + baserules.WithSendAlways(), + baserules.WithSendUnmatched(), + ) + + if err != nil { + zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err)) + return 0, basemodel.BadRequest(err) + } + } else if parsedRule.RuleType == baserules.RuleTypeAnomaly { + // create anomaly rule + rule, err = NewAnomalyRule( + alertname, + parsedRule, + opts.FF, + opts.Reader, + opts.Cache, + baserules.WithSendAlways(), + baserules.WithSendUnmatched(), + ) + if err != nil { + zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", rule.Name()), zap.Error(err)) + return 0, basemodel.BadRequest(err) + } + } else { + return 0, basemodel.BadRequest(fmt.Errorf("failed to derive ruletype with given information")) + } + + // set timestamp to current utc time + ts := time.Now().UTC() + + count, err := rule.Eval(ctx, ts) + if err != nil { + zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err)) + return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed")) + } + alertsFound, ok := count.(int) + if !ok { + return 0, basemodel.InternalError(fmt.Errorf("something went wrong")) + } + rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc) + + return alertsFound, nil +} + // newTask returns an appropriate group for // rule type func newTask(taskType baserules.TaskType, name string, frequency time.Duration, rules []baserules.Rule, opts *baserules.ManagerOptions, notify baserules.NotifyFunc, ruleDB baserules.RuleDB) baserules.Task { diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 3a720aed5e..61be36f170 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -617,6 +617,7 @@ type AlertsInfo struct { TotalAlerts int `json:"totalAlerts"` LogsBasedAlerts int `json:"logsBasedAlerts"` MetricBasedAlerts int `json:"metricBasedAlerts"` + AnomalyBasedAlerts int `json:"anomalyBasedAlerts"` TracesBasedAlerts int `json:"tracesBasedAlerts"` TotalChannels int `json:"totalChannels"` SlackChannels int `json:"slackChannels"` diff --git a/pkg/query-service/rules/api_params.go b/pkg/query-service/rules/api_params.go index b3c174b147..e4c76a6d71 100644 --- a/pkg/query-service/rules/api_params.go +++ b/pkg/query-service/rules/api_params.go @@ -42,16 +42,6 @@ var ( // this file contains api request and responses to be // served over http -// newApiErrorInternal returns a new api error object of type internal -func newApiErrorInternal(err error) *model.ApiError { - return &model.ApiError{Typ: model.ErrorInternal, Err: err} -} - -// newApiErrorBadData returns a new api error object of bad request type -func newApiErrorBadData(err error) *model.ApiError { - return &model.ApiError{Typ: model.ErrorBadData, Err: err} -} - // PostableRule is used to create alerting rule from HTTP api type PostableRule struct { AlertName string `yaml:"alert,omitempty" json:"alert,omitempty"` diff --git a/pkg/query-service/rules/api_params_test.go b/pkg/query-service/rules/api_params_test.go index 6a1245d0fe..9c3092ff90 100644 --- a/pkg/query-service/rules/api_params_test.go +++ b/pkg/query-service/rules/api_params_test.go @@ -8,7 +8,7 @@ import ( func TestIsAllQueriesDisabled(t *testing.T) { testCases := []*v3.CompositeQuery{ - &v3.CompositeQuery{ + { BuilderQueries: map[string]*v3.BuilderQuery{ "query1": { Disabled: true, @@ -20,10 +20,10 @@ func TestIsAllQueriesDisabled(t *testing.T) { QueryType: v3.QueryTypeBuilder, }, nil, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypeBuilder, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypeBuilder, BuilderQueries: map[string]*v3.BuilderQuery{ "query1": { @@ -34,10 +34,10 @@ func TestIsAllQueriesDisabled(t *testing.T) { }, }, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypePromQL, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypePromQL, PromQueries: map[string]*v3.PromQuery{ "query3": { @@ -45,7 +45,7 @@ func TestIsAllQueriesDisabled(t *testing.T) { }, }, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypePromQL, PromQueries: map[string]*v3.PromQuery{ "query3": { @@ -53,10 +53,10 @@ func TestIsAllQueriesDisabled(t *testing.T) { }, }, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypeClickHouseSQL, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypeClickHouseSQL, ClickHouseQueries: map[string]*v3.ClickHouseQuery{ "query4": { @@ -64,7 +64,7 @@ func TestIsAllQueriesDisabled(t *testing.T) { }, }, }, - &v3.CompositeQuery{ + { QueryType: v3.QueryTypeClickHouseSQL, ClickHouseQueries: map[string]*v3.ClickHouseQuery{ "query4": { diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go index 697ea63f92..c9db38201b 100644 --- a/pkg/query-service/rules/db.go +++ b/pkg/query-service/rules/db.go @@ -599,6 +599,9 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { } } } + if rule.RuleType == RuleTypeAnomaly { + alertsInfo.AnomalyBasedAlerts = alertsInfo.AnomalyBasedAlerts + 1 + } } else if rule.AlertType == AlertTypeTraces { alertsInfo.TracesBasedAlerts = alertsInfo.TracesBasedAlerts + 1 } diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index c41d0bbe50..50ad7b5430 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -10,8 +10,6 @@ import ( "sync" "time" - "github.com/google/uuid" - "go.uber.org/zap" "errors" @@ -24,7 +22,6 @@ import ( "go.signoz.io/signoz/pkg/query-service/model" pqle "go.signoz.io/signoz/pkg/query-service/pqlEngine" "go.signoz.io/signoz/pkg/query-service/telemetry" - "go.signoz.io/signoz/pkg/query-service/utils/labels" ) type PrepareTaskOptions struct { @@ -41,6 +38,19 @@ type PrepareTaskOptions struct { UseLogsNewSchema bool } +type PrepareTestRuleOptions struct { + Rule *PostableRule + RuleDB RuleDB + Logger *zap.Logger + Reader interfaces.Reader + Cache cache.Cache + FF interfaces.FeatureLookup + ManagerOpts *ManagerOptions + NotifyFunc NotifyFunc + + UseLogsNewSchema bool +} + const taskNamesuffix = "webAppEditor" func RuleIdFromTaskName(n string) string { @@ -81,6 +91,8 @@ type ManagerOptions struct { PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error) + PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError) + UseLogsNewSchema bool } @@ -99,10 +111,11 @@ type Manager struct { logger *zap.Logger - featureFlags interfaces.FeatureLookup - reader interfaces.Reader - cache cache.Cache - prepareTaskFunc func(opts PrepareTaskOptions) (Task, error) + featureFlags interfaces.FeatureLookup + reader interfaces.Reader + cache cache.Cache + prepareTaskFunc func(opts PrepareTaskOptions) (Task, error) + prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError) UseLogsNewSchema bool } @@ -123,6 +136,9 @@ func defaultOptions(o *ManagerOptions) *ManagerOptions { if o.PrepareTaskFunc == nil { o.PrepareTaskFunc = defaultPrepareTaskFunc } + if o.PrepareTestRuleFunc == nil { + o.PrepareTestRuleFunc = defaultTestNotification + } return o } @@ -203,17 +219,18 @@ func NewManager(o *ManagerOptions) (*Manager, error) { telemetry.GetInstance().SetAlertsInfoCallback(db.GetAlertsInfo) m := &Manager{ - tasks: map[string]Task{}, - rules: map[string]Rule{}, - notifier: notifier, - ruleDB: db, - opts: o, - block: make(chan struct{}), - logger: o.Logger, - featureFlags: o.FeatureFlags, - reader: o.Reader, - cache: o.Cache, - prepareTaskFunc: o.PrepareTaskFunc, + tasks: map[string]Task{}, + rules: map[string]Rule{}, + notifier: notifier, + ruleDB: db, + opts: o, + block: make(chan struct{}), + logger: o.Logger, + featureFlags: o.FeatureFlags, + reader: o.Reader, + cache: o.Cache, + prepareTaskFunc: o.PrepareTaskFunc, + prepareTestRuleFunc: o.PrepareTestRuleFunc, } return m, nil } @@ -788,78 +805,20 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m parsedRule, err := ParsePostableRule([]byte(ruleStr)) if err != nil { - return 0, newApiErrorBadData(err) - } - - var alertname = parsedRule.AlertName - if alertname == "" { - // alertname is not mandatory for testing, so picking - // a random string here - alertname = uuid.New().String() - } - - // append name to indicate this is test alert - parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix) - - var rule Rule - - if parsedRule.RuleType == RuleTypeThreshold { - - // add special labels for test alerts - parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target) - parsedRule.Labels[labels.RuleSourceLabel] = "" - parsedRule.Labels[labels.AlertRuleIdLabel] = "" - - // create a threshold rule - rule, err = NewThresholdRule( - alertname, - parsedRule, - m.featureFlags, - m.reader, - m.opts.UseLogsNewSchema, - WithSendAlways(), - WithSendUnmatched(), - ) - - if err != nil { - zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err)) - return 0, newApiErrorBadData(err) - } - - } else if parsedRule.RuleType == RuleTypeProm { - - // create promql rule - rule, err = NewPromRule( - alertname, - parsedRule, - m.logger, - m.reader, - m.opts.PqlEngine, - WithSendAlways(), - WithSendUnmatched(), - ) - - if err != nil { - zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err)) - return 0, newApiErrorBadData(err) - } - } else { - return 0, newApiErrorBadData(fmt.Errorf("failed to derive ruletype with given information")) - } - - // set timestamp to current utc time - ts := time.Now().UTC() - - count, err := rule.Eval(ctx, ts) - if err != nil { - zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err)) - return 0, newApiErrorInternal(fmt.Errorf("rule evaluation failed")) - } - alertsFound, ok := count.(int) - if !ok { - return 0, newApiErrorInternal(fmt.Errorf("something went wrong")) - } - rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), m.prepareNotifyFunc()) + return 0, model.BadRequest(err) + } + + alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{ + Rule: parsedRule, + RuleDB: m.ruleDB, + Logger: m.logger, + Reader: m.reader, + Cache: m.cache, + FF: m.featureFlags, + ManagerOpts: m.opts, + NotifyFunc: m.prepareNotifyFunc(), + UseLogsNewSchema: m.opts.UseLogsNewSchema, + }) - return alertsFound, nil + return alertCount, apiErr } diff --git a/pkg/query-service/rules/templates.go b/pkg/query-service/rules/templates.go index b5487011ce..49db785cee 100644 --- a/pkg/query-service/rules/templates.go +++ b/pkg/query-service/rules/templates.go @@ -233,6 +233,7 @@ func AlertTemplateData(labels map[string]string, value string, threshold string) // consistent across the platform. // If there is a go template block, it won't be replaced. // The example for existing go template block is: {{$threshold}} or {{$value}} or any other valid go template syntax. +// See templates_test.go for examples. func (te *TemplateExpander) preprocessTemplate() { // Handle the $variable syntax reDollar := regexp.MustCompile(`({{.*?}})|(\$(\w+(?:\.\w+)*))`) @@ -256,6 +257,19 @@ func (te *TemplateExpander) preprocessTemplate() { rest := submatches[2] return fmt.Sprintf(`{{index .Labels "%s"%s}}`, path, rest) }) + + // Handle the {{$variable}} syntax + // skip the special case for {{$threshold}} and {{$value}} + reVariable := regexp.MustCompile(`{{\s*\$\s*([a-zA-Z0-9_.]+)\s*}}`) + te.text = reVariable.ReplaceAllStringFunc(te.text, func(match string) string { + if strings.HasPrefix(match, "{{$threshold}}") || strings.HasPrefix(match, "{{$value}}") { + return match + } + // get the variable name from {{$variable}} syntax + variable := strings.TrimPrefix(match, "{{$") + variable = strings.TrimSuffix(variable, "}}") + return fmt.Sprintf(`{{index .Labels "%s"}}`, variable) + }) } // Funcs adds the functions in fm to the Expander's function map. @@ -335,6 +349,7 @@ func (te TemplateExpander) ExpandHTML(templateFiles []string) (result string, re // ParseTest parses the templates and returns the error if any. func (te TemplateExpander) ParseTest() error { + te.preprocessTemplate() _, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text) if err != nil { return err diff --git a/pkg/query-service/rules/templates_test.go b/pkg/query-service/rules/templates_test.go index 66d958e8f3..0fc0bd779a 100644 --- a/pkg/query-service/rules/templates_test.go +++ b/pkg/query-service/rules/templates_test.go @@ -74,3 +74,14 @@ func TestTemplateExpander_WithLablesDotSyntax(t *testing.T) { } require.Equal(t, "test my-service exceeds 100 and observed at 200", result) } + +func TestTemplateExpander_WithVariableSyntax(t *testing.T) { + defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}" + data := AlertTemplateData(map[string]string{"service.name": "my-service"}, "200", "100") + expander := NewTemplateExpander(context.Background(), defs+"test {{$service.name}} exceeds {{$threshold}} and observed at {{$value}}", "test", data, times.Time(time.Now().Unix()), nil) + result, err := expander.Expand() + if err != nil { + t.Fatal(err) + } + require.Equal(t, "test my-service exceeds 100 and observed at 200", result) +} diff --git a/pkg/query-service/rules/test_notification.go b/pkg/query-service/rules/test_notification.go new file mode 100644 index 0000000000..37fb2e5f1b --- /dev/null +++ b/pkg/query-service/rules/test_notification.go @@ -0,0 +1,97 @@ +package rules + +import ( + "context" + "fmt" + "time" + + "github.com/google/uuid" + "go.signoz.io/signoz/pkg/query-service/model" + "go.signoz.io/signoz/pkg/query-service/utils/labels" + "go.uber.org/zap" +) + +// TestNotification prepares a dummy rule for given rule parameters and +// sends a test notification. returns alert count and error (if any) +func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError) { + + ctx := context.Background() + + if opts.Rule == nil { + return 0, model.BadRequest(fmt.Errorf("rule is required")) + } + + parsedRule := opts.Rule + var alertname = parsedRule.AlertName + if alertname == "" { + // alertname is not mandatory for testing, so picking + // a random string here + alertname = uuid.New().String() + } + + // append name to indicate this is test alert + parsedRule.AlertName = fmt.Sprintf("%s%s", alertname, TestAlertPostFix) + + var rule Rule + var err error + + if parsedRule.RuleType == RuleTypeThreshold { + + // add special labels for test alerts + parsedRule.Annotations[labels.AlertSummaryLabel] = fmt.Sprintf("The rule threshold is set to %.4f, and the observed metric value is {{$value}}.", *parsedRule.RuleCondition.Target) + parsedRule.Labels[labels.RuleSourceLabel] = "" + parsedRule.Labels[labels.AlertRuleIdLabel] = "" + + // create a threshold rule + rule, err = NewThresholdRule( + alertname, + parsedRule, + opts.FF, + opts.Reader, + opts.UseLogsNewSchema, + WithSendAlways(), + WithSendUnmatched(), + ) + + if err != nil { + zap.L().Error("failed to prepare a new threshold rule for test", zap.String("name", rule.Name()), zap.Error(err)) + return 0, model.BadRequest(err) + } + + } else if parsedRule.RuleType == RuleTypeProm { + + // create promql rule + rule, err = NewPromRule( + alertname, + parsedRule, + opts.Logger, + opts.Reader, + opts.ManagerOpts.PqlEngine, + WithSendAlways(), + WithSendUnmatched(), + ) + + if err != nil { + zap.L().Error("failed to prepare a new promql rule for test", zap.String("name", rule.Name()), zap.Error(err)) + return 0, model.BadRequest(err) + } + } else { + return 0, model.BadRequest(fmt.Errorf("failed to derive ruletype with given information")) + } + + // set timestamp to current utc time + ts := time.Now().UTC() + + count, err := rule.Eval(ctx, ts) + if err != nil { + zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err)) + return 0, model.InternalError(fmt.Errorf("rule evaluation failed")) + } + alertsFound, ok := count.(int) + if !ok { + return 0, model.InternalError(fmt.Errorf("something went wrong")) + } + rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc) + + return alertsFound, nil +} From 63872983c65c6673b9453ef82cef4560c30c4c11 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Sat, 9 Nov 2024 13:04:43 +0530 Subject: [PATCH 17/30] feat: added metric page in messaging queues (#6399) * feat: added metric page in messaging queues * feat: added misc fixes * feat: removed a class name from mqcards * feat: added lightMode styles for kafka 2.0 (#6400) * feat: resolved comments and used strings --- .../public/locales/en-GB/messagingQueues.json | 24 + .../en-GB/messagingQueuesKafkaOverview.json | 82 +- .../public/locales/en/messagingQueues.json | 24 + .../en/messagingQueuesKafkaOverview.json | 4 + .../MQDetailPage/MQDetailPage.tsx | 15 +- .../DropRateView/DropRateView.styles.scss | 13 + .../MQDetails/DropRateView/DropRateView.tsx | 4 +- .../MQDetails/MQDetails.style.scss | 80 +- .../MessagingQueues/MQDetails/MQDetails.tsx | 18 +- .../MetricPage/MetricColumnGraphs.tsx | 115 ++ .../MetricPage/MetricPage.styles.scss | 128 ++ .../MQDetails/MetricPage/MetricPage.tsx | 134 ++ .../MQDetails/MetricPage/MetricPageGraph.tsx | 59 + .../MQDetails/MetricPage/MetricPageUtil.ts | 1092 +++++++++++++++++ .../MessagingQueueHealthCheck.styles.scss | 74 ++ .../MessagingQueues.styles.scss | 10 + .../pages/MessagingQueues/MessagingQueues.tsx | 22 +- .../MessagingQueues/MessagingQueuesUtils.ts | 9 +- 18 files changed, 1843 insertions(+), 64 deletions(-) create mode 100644 frontend/public/locales/en-GB/messagingQueues.json create mode 100644 frontend/public/locales/en/messagingQueues.json create mode 100644 frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricColumnGraphs.tsx create mode 100644 frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.styles.scss create mode 100644 frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.tsx create mode 100644 frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageGraph.tsx create mode 100644 frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageUtil.ts diff --git a/frontend/public/locales/en-GB/messagingQueues.json b/frontend/public/locales/en-GB/messagingQueues.json new file mode 100644 index 0000000000..5c446d98d8 --- /dev/null +++ b/frontend/public/locales/en-GB/messagingQueues.json @@ -0,0 +1,24 @@ +{ + "metricGraphCategory": { + "brokerMetrics": { + "title": "Broker Metrics", + "description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics" + }, + "consumerMetrics": { + "title": "Consumer Metrics", + "description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed." + }, + "producerMetrics": { + "title": "Producer Metrics", + "description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery." + }, + "brokerJVMMetrics": { + "title": "Broker JVM Metrics", + "description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate." + }, + "partitionMetrics": { + "title": "Partition Metrics", + "description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas." + } + } +} diff --git a/frontend/public/locales/en-GB/messagingQueuesKafkaOverview.json b/frontend/public/locales/en-GB/messagingQueuesKafkaOverview.json index 5061a5ddcb..f298f664d1 100644 --- a/frontend/public/locales/en-GB/messagingQueuesKafkaOverview.json +++ b/frontend/public/locales/en-GB/messagingQueuesKafkaOverview.json @@ -1,30 +1,54 @@ { - "breadcrumb": "Messaging Queues", - "header": "Kafka / Overview", - "overview": { - "title": "Start sending data in as little as 20 minutes", - "subtitle": "Connect and Monitor Your Data Streams" - }, - "configureConsumer": { - "title": "Configure Consumer", - "description": "Add consumer data sources to gain insights and enhance monitoring.", - "button": "Get Started" - }, - "configureProducer": { - "title": "Configure Producer", - "description": "Add producer data sources to gain insights and enhance monitoring.", - "button": "Get Started" - }, - "monitorKafka": { - "title": "Monitor kafka", - "description": "Add your Kafka source to gain insights and enhance activity tracking.", - "button": "Get Started" - }, - "summarySection": { - "viewDetailsButton": "View Details" - }, - "confirmModal": { - "content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.", - "okText": "Proceed" - } -} \ No newline at end of file + "breadcrumb": "Messaging Queues", + "header": "Kafka / Overview", + "overview": { + "title": "Start sending data in as little as 20 minutes", + "subtitle": "Connect and Monitor Your Data Streams" + }, + "configureConsumer": { + "title": "Configure Consumer", + "description": "Add consumer data sources to gain insights and enhance monitoring.", + "button": "Get Started" + }, + "configureProducer": { + "title": "Configure Producer", + "description": "Add producer data sources to gain insights and enhance monitoring.", + "button": "Get Started" + }, + "monitorKafka": { + "title": "Monitor kafka", + "description": "Add your Kafka source to gain insights and enhance activity tracking.", + "button": "Get Started" + }, + "summarySection": { + "viewDetailsButton": "View Details", + "consumer": { + "title": "Consumer lag view", + "description": "Connect and Monitor Your Data Streams" + }, + "producer": { + "title": "Producer latency view", + "description": "Connect and Monitor Your Data Streams" + }, + "partition": { + "title": "Partition Latency view", + "description": "Connect and Monitor Your Data Streams" + }, + "dropRate": { + "title": "Drop Rate view", + "description": "Connect and Monitor Your Data Streams" + }, + "metricPage": { + "title": "Metric View", + "description": "Connect and Monitor Your Data Streams" + } + }, + "confirmModal": { + "content": "Before navigating to the details page, please make sure you have configured all the required setup to ensure correct data monitoring.", + "okText": "Proceed" + }, + "overviewSummarySection": { + "title": "Monitor Your Data Streams", + "subtitle": "Monitor key Kafka metrics like consumer lag and latency to ensure efficient data flow and troubleshoot in real time." + } +} diff --git a/frontend/public/locales/en/messagingQueues.json b/frontend/public/locales/en/messagingQueues.json new file mode 100644 index 0000000000..5c446d98d8 --- /dev/null +++ b/frontend/public/locales/en/messagingQueues.json @@ -0,0 +1,24 @@ +{ + "metricGraphCategory": { + "brokerMetrics": { + "title": "Broker Metrics", + "description": "The Kafka Broker metrics here inform you of data loss/delay through unclean leader elections and network throughputs, as well as request fails through request purgatories and timeouts metrics" + }, + "consumerMetrics": { + "title": "Consumer Metrics", + "description": "Kafka Consumer metrics provide insights into lag between message production and consumption, success rates and latency of message delivery, and the volume of data consumed." + }, + "producerMetrics": { + "title": "Producer Metrics", + "description": "Kafka Producers send messages to brokers for storage and distribution by topic. These metrics inform you of the volume and rate of data sent, and the success rate of message delivery." + }, + "brokerJVMMetrics": { + "title": "Broker JVM Metrics", + "description": "Kafka brokers are Java applications that expose JVM metrics to inform on the broker's system health. Garbage collection metrics like those below provide key insights into free memory, broker performance, and heap size. You need to enable new_gc_metrics for this section to populate." + }, + "partitionMetrics": { + "title": "Partition Metrics", + "description": "Kafka partitions are the unit of parallelism in Kafka. These metrics inform you of the number of partitions per topic, the current offset of each partition, the oldest offset, and the number of in-sync replicas." + } + } +} diff --git a/frontend/public/locales/en/messagingQueuesKafkaOverview.json b/frontend/public/locales/en/messagingQueuesKafkaOverview.json index de67d0b3e9..f298f664d1 100644 --- a/frontend/public/locales/en/messagingQueuesKafkaOverview.json +++ b/frontend/public/locales/en/messagingQueuesKafkaOverview.json @@ -37,6 +37,10 @@ "dropRate": { "title": "Drop Rate view", "description": "Connect and Monitor Your Data Streams" + }, + "metricPage": { + "title": "Metric View", + "description": "Connect and Monitor Your Data Streams" } }, "confirmModal": { diff --git a/frontend/src/pages/MessagingQueues/MQDetailPage/MQDetailPage.tsx b/frontend/src/pages/MessagingQueues/MQDetailPage/MQDetailPage.tsx index 5ee98249f6..5793d40b7b 100644 --- a/frontend/src/pages/MessagingQueues/MQDetailPage/MQDetailPage.tsx +++ b/frontend/src/pages/MessagingQueues/MQDetailPage/MQDetailPage.tsx @@ -18,6 +18,7 @@ import { } from '../MessagingQueuesUtils'; import DropRateView from '../MQDetails/DropRateView/DropRateView'; import MessagingQueueOverview from '../MQDetails/MessagingQueueOverview'; +import MetricPage from '../MQDetails/MetricPage/MetricPage'; import MessagingQueuesDetails from '../MQDetails/MQDetails'; import MessagingQueuesConfigOptions from '../MQGraph/MQConfigOptions'; import MessagingQueuesGraph from '../MQGraph/MQGraph'; @@ -60,6 +61,10 @@ function MQDetailPage(): JSX.Element { }); }; + const showMessagingQueueDetails = + selectedView !== MessagingQueuesViewType.dropRate.value && + selectedView !== MessagingQueuesViewType.metricPage.value; + return (
@@ -82,7 +87,7 @@ function MQDetailPage(): JSX.Element { setSelectedView(value); updateUrlQuery({ [QueryParams.mqServiceView]: value }); }} - value={mqServiceView} + value={selectedView} options={[ { label: MessagingQueuesViewType.consumerLag.label, @@ -100,6 +105,10 @@ function MQDetailPage(): JSX.Element { label: MessagingQueuesViewType.dropRate.label, value: MessagingQueuesViewType.dropRate.value, }, + { + label: MessagingQueuesViewType.metricPage.label, + value: MessagingQueuesViewType.metricPage.value, + }, ]} />
@@ -112,6 +121,8 @@ function MQDetailPage(): JSX.Element {
) : selectedView === MessagingQueuesViewType.dropRate.value ? ( + ) : selectedView === MessagingQueuesViewType.metricPage.value ? ( + ) : ( )} - {selectedView !== MessagingQueuesViewType.dropRate.value && ( + {showMessagingQueueDetails && (
-
- {MessagingQueuesViewType.dropRate.label} -
+ {MessagingQueuesViewType.dropRate.label}
( - + {ConsumerLagDetailTitle[detailType]} - {detailType === MessagingQueueServiceDetailType.PartitionHostMetrics && ( - - )} )); }; diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricColumnGraphs.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricColumnGraphs.tsx new file mode 100644 index 0000000000..a88db1efc7 --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricColumnGraphs.tsx @@ -0,0 +1,115 @@ +import { Typography } from 'antd'; +import { CardContainer } from 'container/GridCardLayout/styles'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import { useTranslation } from 'react-i18next'; +import { Widgets } from 'types/api/dashboard/getAll'; + +import MetricPageGridGraph from './MetricPageGraph'; +import { + averageRequestLatencyWidgetData, + brokerCountWidgetData, + brokerNetworkThroughputWidgetData, + bytesConsumedWidgetData, + consumerFetchRateWidgetData, + consumerGroupMemberWidgetData, + consumerLagByGroupWidgetData, + consumerOffsetWidgetData, + ioWaitTimeWidgetData, + kafkaProducerByteRateWidgetData, + messagesConsumedWidgetData, + producerFetchRequestPurgatoryWidgetData, + requestResponseWidgetData, + requestTimesWidgetData, +} from './MetricPageUtil'; + +interface MetricSectionProps { + title: string; + description: string; + graphCount: Widgets[]; +} + +function MetricSection({ + title, + description, + graphCount, +}: MetricSectionProps): JSX.Element { + const isDarkMode = useIsDarkMode(); + + return ( +
+ +
+ {title} +
+
+ + {description} + +
+ {graphCount.map((widgetData) => ( + + ))} +
+
+ ); +} + +function MetricColumnGraphs(): JSX.Element { + const { t } = useTranslation('messagingQueues'); + + const metricsData = [ + { + title: t('metricGraphCategory.brokerMetrics.title'), + description: t('metricGraphCategory.brokerMetrics.description'), + graphCount: [ + brokerCountWidgetData, + requestTimesWidgetData, + producerFetchRequestPurgatoryWidgetData, + brokerNetworkThroughputWidgetData, + ], + id: 'broker-metrics', + }, + { + title: t('metricGraphCategory.producerMetrics.title'), + description: t('metricGraphCategory.producerMetrics.description'), + graphCount: [ + ioWaitTimeWidgetData, + requestResponseWidgetData, + averageRequestLatencyWidgetData, + kafkaProducerByteRateWidgetData, + bytesConsumedWidgetData, + ], + id: 'producer-metrics', + }, + { + title: t('metricGraphCategory.consumerMetrics.title'), + description: t('metricGraphCategory.consumerMetrics.description'), + graphCount: [ + consumerOffsetWidgetData, + consumerGroupMemberWidgetData, + consumerLagByGroupWidgetData, + consumerFetchRateWidgetData, + messagesConsumedWidgetData, + ], + id: 'consumer-metrics', + }, + ]; + + return ( +
+ {metricsData.map((metric) => ( + + ))} +
+ ); +} + +export default MetricColumnGraphs; diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.styles.scss b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.styles.scss new file mode 100644 index 0000000000..cc955c0739 --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.styles.scss @@ -0,0 +1,128 @@ +.metric-page { + padding: 20px; + display: flex; + flex-direction: column; + gap: 32px; + + .metric-page-container { + display: flex; + flex-direction: column; + + .row-panel { + padding-left: 10px; + } + + .metric-page-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + align-items: flex-start; + gap: 10px; + + .metric-graph { + height: 320px; + padding: 10px; + width: 100%; + box-sizing: border-box; + } + } + + @media (max-width: 768px) { + .metric-page-grid { + grid-template-columns: 1fr; + } + } + + .graph-description { + padding: 16px 10px 16px 10px; + } + } + + .row-panel { + border-radius: 4px; + background: rgba(18, 19, 23, 0.4); + padding: 8px; + display: flex; + gap: 6px; + align-items: center; + height: 48px !important; + + .ant-typography { + font-size: 14px; + font-weight: 500; + } + + .row-panel-section { + display: flex; + gap: 6px; + align-items: center; + + .row-icon { + color: var(--bg-vanilla-400); + cursor: pointer; + } + + .section-title { + color: var(--bg-vanilla-400); + font-family: Inter; + font-size: 14px; + font-style: normal; + font-weight: 400; + line-height: 20px; + letter-spacing: -0.07px; + } + } + } + + .metric-column-graph-container { + display: grid; + grid-template-columns: 1fr 1fr 1fr; + gap: 10px; + + .metric-column-graph { + display: flex; + flex-direction: column; + gap: 10px; + + .row-panel { + justify-content: center; + } + + .metric-page-grid { + display: flex; + flex-direction: column; + flex: 1; + min-width: 0; + gap: 10px; + + .metric-graph { + height: 320px; + padding: 10px; + width: 100%; + box-sizing: border-box; + } + } + } + } + + @media (max-width: 768px) { + .metric-column-graph-container { + grid-template-columns: 1fr; + } + } +} + +.lightMode { + .metric-page { + .row-panel { + .row-panel-section { + .row-icon { + color: var(--bg-ink-300); + } + + .section-title { + color: var(--bg-ink-300); + } + } + } + } +} diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.tsx new file mode 100644 index 0000000000..3c997da459 --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPage.tsx @@ -0,0 +1,134 @@ +import './MetricPage.styles.scss'; + +import { Typography } from 'antd'; +import cx from 'classnames'; +import { CardContainer } from 'container/GridCardLayout/styles'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import { ChevronDown, ChevronUp } from 'lucide-react'; +import { useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Widgets } from 'types/api/dashboard/getAll'; + +import MetricColumnGraphs from './MetricColumnGraphs'; +import MetricPageGridGraph from './MetricPageGraph'; +import { + cpuRecentUtilizationWidgetData, + currentOffsetPartitionWidgetData, + insyncReplicasWidgetData, + jvmGcCollectionsElapsedWidgetData, + jvmGCCountWidgetData, + jvmMemoryHeapWidgetData, + oldestOffsetWidgetData, + partitionCountPerTopicWidgetData, +} from './MetricPageUtil'; + +interface CollapsibleMetricSectionProps { + title: string; + description: string; + graphCount: Widgets[]; + isCollapsed: boolean; + onToggle: () => void; +} + +function CollapsibleMetricSection({ + title, + description, + graphCount, + isCollapsed, + onToggle, +}: CollapsibleMetricSectionProps): JSX.Element { + const isDarkMode = useIsDarkMode(); + + return ( +
+ +
+
+ {title} + {isCollapsed ? ( + + ) : ( + + )} +
+
+
+ {!isCollapsed && ( + <> + + {description} + +
+ {graphCount.map((widgetData) => ( + + ))} +
+ + )} +
+ ); +} + +function MetricPage(): JSX.Element { + const [collapsedSections, setCollapsedSections] = useState<{ + [key: string]: boolean; + }>({ + producerMetrics: false, + consumerMetrics: false, + }); + + const toggleCollapse = (key: string): void => { + setCollapsedSections((prev) => ({ + ...prev, + [key]: !prev[key], + })); + }; + + const { t } = useTranslation('messagingQueues'); + + const metricSections = [ + { + key: 'bokerJVMMetrics', + title: t('metricGraphCategory.brokerJVMMetrics.title'), + description: t('metricGraphCategory.brokerJVMMetrics.description'), + graphCount: [ + jvmGCCountWidgetData, + jvmGcCollectionsElapsedWidgetData, + cpuRecentUtilizationWidgetData, + jvmMemoryHeapWidgetData, + ], + }, + { + key: 'partitionMetrics', + title: t('metricGraphCategory.partitionMetrics.title'), + description: t('metricGraphCategory.partitionMetrics.description'), + graphCount: [ + partitionCountPerTopicWidgetData, + currentOffsetPartitionWidgetData, + oldestOffsetWidgetData, + insyncReplicasWidgetData, + ], + }, + ]; + + return ( +
+ + {metricSections.map(({ key, title, description, graphCount }) => ( + toggleCollapse(key)} + /> + ))} +
+ ); +} + +export default MetricPage; diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageGraph.tsx b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageGraph.tsx new file mode 100644 index 0000000000..248dc35178 --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageGraph.tsx @@ -0,0 +1,59 @@ +import './MetricPage.styles.scss'; + +import { QueryParams } from 'constants/query'; +import { PANEL_TYPES } from 'constants/queryBuilder'; +import { ViewMenuAction } from 'container/GridCardLayout/config'; +import GridCard from 'container/GridCardLayout/GridCard'; +import { Card } from 'container/GridCardLayout/styles'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import useUrlQuery from 'hooks/useUrlQuery'; +import { useCallback } from 'react'; +import { useDispatch } from 'react-redux'; +import { useHistory, useLocation } from 'react-router-dom'; +import { UpdateTimeInterval } from 'store/actions'; +import { Widgets } from 'types/api/dashboard/getAll'; + +function MetricPageGridGraph({ + widgetData, +}: { + widgetData: Widgets; +}): JSX.Element { + const history = useHistory(); + const { pathname } = useLocation(); + const dispatch = useDispatch(); + const urlQuery = useUrlQuery(); + const isDarkMode = useIsDarkMode(); + + const onDragSelect = useCallback( + (start: number, end: number) => { + const startTimestamp = Math.trunc(start); + const endTimestamp = Math.trunc(end); + + urlQuery.set(QueryParams.startTime, startTimestamp.toString()); + urlQuery.set(QueryParams.endTime, endTimestamp.toString()); + const generatedUrl = `${pathname}?${urlQuery.toString()}`; + history.push(generatedUrl); + + if (startTimestamp !== endTimestamp) { + dispatch(UpdateTimeInterval('custom', [startTimestamp, endTimestamp])); + } + }, + [dispatch, history, pathname, urlQuery], + ); + + return ( + + + + ); +} + +export default MetricPageGridGraph; diff --git a/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageUtil.ts b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageUtil.ts new file mode 100644 index 0000000000..144b573c5f --- /dev/null +++ b/frontend/src/pages/MessagingQueues/MQDetails/MetricPage/MetricPageUtil.ts @@ -0,0 +1,1092 @@ +/* eslint-disable sonarjs/no-duplicate-string */ +import { PANEL_TYPES } from 'constants/queryBuilder'; +import { GetWidgetQueryBuilderProps } from 'container/MetricsApplication/types'; +import { Widgets } from 'types/api/dashboard/getAll'; +import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse'; +import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData'; +import { EQueryType } from 'types/common/dashboard'; +import { DataSource } from 'types/common/queryBuilder'; +import { v4 as uuid } from 'uuid'; + +interface GetWidgetQueryProps { + title: string; + description: string; + queryData: IBuilderQuery[]; +} + +interface GetWidgetQueryPropsReturn extends GetWidgetQueryBuilderProps { + description?: string; + nullZeroValues: string; +} + +export const getWidgetQueryBuilder = ({ + query, + title = '', + panelTypes, + yAxisUnit = '', + fillSpans = false, + id, + nullZeroValues, + description, +}: GetWidgetQueryPropsReturn): Widgets => ({ + description: description || '', + id: id || uuid(), + isStacked: false, + nullZeroValues: nullZeroValues || '', + opacity: '1', + panelTypes, + query, + timePreferance: 'GLOBAL_TIME', + title, + yAxisUnit, + softMax: null, + softMin: null, + selectedLogFields: [], + selectedTracesFields: [], + fillSpans, +}); + +export function getWidgetQuery( + props: GetWidgetQueryProps, +): GetWidgetQueryPropsReturn { + const { title, description } = props; + return { + title, + yAxisUnit: 'none', + panelTypes: PANEL_TYPES.TIME_SERIES, + fillSpans: false, + description, + nullZeroValues: 'zero', + query: { + queryType: EQueryType.QUERY_BUILDER, + promql: [], + builder: { + queryData: props.queryData, + queryFormulas: [], + }, + clickhouse_sql: [], + id: uuid(), + }, + }; +} + +export const requestTimesWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_request_time_avg--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_request_time_avg', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Request Times', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Request Times', + description: + 'This metric is used to measure the average latency experienced by requests across the Kafka broker.', + }), +); + +export const brokerCountWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_brokers--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_brokers', + type: 'Gauge', + }, + aggregateOperator: 'sum', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Broker count', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'sum', + }, + ], + title: 'Broker Count', + description: 'Total number of active brokers in the Kafka cluster.\n', + }), +); + +export const producerFetchRequestPurgatoryWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_purgatory_size--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_purgatory_size', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Producer and Fetch Request Purgatory', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Producer and Fetch Request Purgatory', + description: + 'Measures the number of requests that Kafka brokers have received but cannot immediately fulfill', + }), +); + +export const brokerNetworkThroughputWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: + 'kafka_server_brokertopicmetrics_bytesoutpersec_oneminuterate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_server_brokertopicmetrics_bytesoutpersec_oneminuterate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Broker Network Throughput', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Broker Network Throughput', + description: + 'Helps gauge the data throughput from the Kafka broker to consumer clients, focusing on the network usage associated with serving messages to consumers.', + }), +); + +export const ioWaitTimeWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_producer_io_waittime_total--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'kafka_producer_io_waittime_total', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'I/O Wait Time', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + title: 'I/O Wait Time', + description: + 'This metric measures the total time that producers are in an I/O wait state, indicating potential bottlenecks in data transmission from producers to Kafka brokers.', + }), +); + +export const requestResponseWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_producer_request_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_producer_request_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Request Rate', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_producer_response_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_producer_response_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'B', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Response Rate', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Request and Response Rate', + description: + "Indicates how many requests the producer is sending per second, reflecting the intensity of the producer's interaction with the Kafka cluster. Also, helps Kafka administrators gauge the responsiveness of brokers to producer requests.", + }), +); + +export const averageRequestLatencyWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_producer_request_latency_avg--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_producer_request_latency_avg', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Average Request Latency', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Average Request Latency', + description: + 'Helps Kafka administrators and developers understand the average latency experienced by producer requests.', + }), +); + +export const kafkaProducerByteRateWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_producer_byte_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_producer_byte_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'kafka_producer_byte_rate', + description: + 'Helps measure the data output rate from the producer, indicating the load a producer is placing on Kafka brokers.', + }), +); + +export const bytesConsumedWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_bytes_consumed_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_bytes_consumed_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Bytes Consumed', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Bytes Consumed', + description: + 'Helps Kafka administrators monitor the data consumption rate of a consumer group, showing how much data (in bytes) is being read from the Kafka cluster over time.', + }), +); + +export const consumerOffsetWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_group_offset--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_group_offset', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'group--string--tag--false', + isColumn: false, + isJSON: false, + key: 'group', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'partition--string--tag--false', + isColumn: false, + isJSON: false, + key: 'partition', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Consumer Offest', + description: 'Current offset of each consumer group for each topic partition', + }), +); + +export const consumerGroupMemberWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_group_members--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_group_members', + type: 'Gauge', + }, + aggregateOperator: 'sum', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'group--string--tag--false', + isColumn: false, + isJSON: false, + key: 'group', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'sum', + }, + ], + title: 'Consumer Group Members', + description: 'Number of active users in each group', + }), +); + +export const consumerLagByGroupWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_group_lag--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_group_lag', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'group--string--tag--false', + isColumn: false, + isJSON: false, + key: 'group', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'partition--string--tag--false', + isColumn: false, + isJSON: false, + key: 'partition', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Consumer Lag by Group', + description: + 'Helps Kafka administrators assess whether consumer groups are keeping up with the incoming data stream or falling behind', + }), +); + +export const consumerFetchRateWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_fetch_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_fetch_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'service_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'service_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Consumer Fetch Rate', + description: + 'Metric measures the rate at which fetch requests are made by a Kafka consumer to the broker, typically in requests per second.', + }), +); + +export const messagesConsumedWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_consumer_records_consumed_rate--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_consumer_records_consumed_rate', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'Messages Consumed', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Messages Consumed', + description: + 'Measures the rate at which a Kafka consumer is consuming records (messages) per second from Kafka brokers.', + }), +); + +export const jvmGCCountWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'jvm_gc_collections_count--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'jvm_gc_collections_count', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'JVM GC Count', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + title: 'JVM GC Count', + description: + 'Tracks the total number of garbage collection (GC) events that have occurred in the Java Virtual Machine (JVM).', + }), +); + +export const jvmGcCollectionsElapsedWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'jvm_gc_collections_elapsed--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'jvm_gc_collections_elapsed', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'garbagecollector', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + title: 'jvm_gc_collections_elapsed', + description: + 'Measures the total time (usually in milliseconds) spent on garbage collection (GC) events in the Java Virtual Machine (JVM).', + }), +); + +export const cpuRecentUtilizationWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'jvm_cpu_recent_utilization--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'jvm_cpu_recent_utilization', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'CPU utilization', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'CPU Recent Utilization', + description: + 'This metric measures the recent CPU usage by the Java Virtual Machine (JVM), typically expressed as a percentage.', + }), +); + +export const jvmMemoryHeapWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'jvm_memory_heap_max--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'jvm_memory_heap_max', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: 'JVM memory heap', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'JVM memory heap', + description: + 'The metric represents the maximum amount of heap memory available to the Java Virtual Machine (JVM)', + }), +); + +export const partitionCountPerTopicWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_topic_partitions--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_topic_partitions', + type: 'Gauge', + }, + aggregateOperator: 'sum', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'sum', + }, + ], + title: 'Partition Count per Topic', + description: 'Number of partitions for each topic', + }), +); + +export const currentOffsetPartitionWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_partition_current_offset--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_partition_current_offset', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'partition--string--tag--false', + isColumn: false, + isJSON: false, + key: 'partition', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Current Offset ( Partition )', + description: + 'Current offset of each partition, showing the latest position in each partition', + }), +); + +export const oldestOffsetWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_partition_oldest_offset--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_partition_oldest_offset', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'partition--string--tag--false', + isColumn: false, + isJSON: false, + key: 'partition', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'Oldest Offset (Partition)', + description: + 'Oldest offset of each partition to identify log retention and offset range.', + }), +); + +export const insyncReplicasWidgetData = getWidgetQueryBuilder( + getWidgetQuery({ + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'kafka_partition_replicas_in_sync--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'kafka_partition_replicas_in_sync', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'topic--string--tag--false', + isColumn: false, + isJSON: false, + key: 'topic', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'partition--string--tag--false', + isColumn: false, + isJSON: false, + key: 'partition', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'avg', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + title: 'In-Sync Replicas (ISR)', + description: + 'Count of in-sync replicas for each partition to ensure data availability.', + }), +); diff --git a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss index 22a1bed584..4e7bd9a64c 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss +++ b/frontend/src/pages/MessagingQueues/MessagingQueueHealthCheck/MessagingQueueHealthCheck.styles.scss @@ -166,3 +166,77 @@ padding-right: 8px; } } + +.lightMode { + .mq-health-check-modal { + .ant-modal-content { + border: 1px solid var(--bg-vanilla-400); + background: var(--bg-vanilla-200); + + .ant-modal-header { + border-bottom: 1px solid var(--bg-vanilla-400); + background: var(--bg-vanilla-200); + + .ant-modal-title { + color: var(--bg-ink-300); + } + } + + .modal-content { + background: var(--bg-vanilla-100); + + .attribute-select { + .ant-select-selector { + border: 1px solid var(--bg-vanilla-300); + background: var(--bg-vanilla-200); + } + } + + .tree-text { + color: var(--bg-ink-300); + } + + .ant-tree { + .ant-tree-title { + .attribute-error-title { + color: var(--bg-amber-500); + + .tree-text { + color: var(--bg-amber-500); + } + } + + .attribute-success-title { + .success-attribute-icon { + color: var(--bg-ink-300); + } + } + } + } + } + + .loader-container { + background: var(--bg-ink-300); + } + } + } + + .config-btn { + background: var(--bg-vanilla-300); + + &.missing-config-btn { + background: var(--bg-amber-100); + color: var(--bg-amber-500); + + &:hover { + color: var(--bg-amber-600) !important; + } + } + + .missing-config-btn { + .config-btn-content { + border-right: 1px solid var(--bg-amber-600); + } + } + } +} diff --git a/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss b/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss index 9959bebe26..9edcd928a3 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss +++ b/frontend/src/pages/MessagingQueues/MessagingQueues.styles.scss @@ -222,6 +222,12 @@ } } + :nth-child(2), + :nth-child(4) { + border-left: none !important; + border-right: none !important; + } + &.summary-section { .overview-info-card { min-height: 144px; @@ -331,6 +337,10 @@ .messaging-breadcrumb { color: var(--bg-ink-400); border-bottom: 1px solid var(--bg-vanilla-300); + + .message-queue-text { + color: var(--bg-ink-400); + } } .messaging-header { color: var(--bg-ink-400); diff --git a/frontend/src/pages/MessagingQueues/MessagingQueues.tsx b/frontend/src/pages/MessagingQueues/MessagingQueues.tsx index 34063fc3b8..74bc1f3796 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueues.tsx +++ b/frontend/src/pages/MessagingQueues/MessagingQueues.tsx @@ -156,7 +156,7 @@ function MessagingQueues(): JSX.Element { -
+

{t('summarySection.producer.title')}

@@ -174,7 +174,7 @@ function MessagingQueues(): JSX.Element {

-
+

{t('summarySection.partition.title')}

@@ -210,6 +210,24 @@ function MessagingQueues(): JSX.Element {

+
+
+

{t('summarySection.metricPage.title')}

+

+ {t('summarySection.metricPage.description')} +

+
+
+ +
+
diff --git a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts index 024c2dfb1c..ec7fadcdce 100644 --- a/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts +++ b/frontend/src/pages/MessagingQueues/MessagingQueuesUtils.ts @@ -222,7 +222,8 @@ export enum MessagingQueuesViewTypeOptions { ConsumerLag = 'consumerLag', PartitionLatency = 'partitionLatency', ProducerLatency = 'producerLatency', - ConsumerLatency = 'consumerLatency', + DropRate = 'dropRate', + MetricPage = 'metricPage', } export const MessagingQueuesViewType = { @@ -240,7 +241,11 @@ export const MessagingQueuesViewType = { }, dropRate: { label: 'Drop Rate view', - value: 'dropRate', + value: MessagingQueuesViewTypeOptions.DropRate, + }, + metricPage: { + label: 'Metric view', + value: MessagingQueuesViewTypeOptions.MetricPage, }, }; From b64326070cd71c564c985e61621ab9fc8ce7b43e Mon Sep 17 00:00:00 2001 From: Yunus M Date: Mon, 11 Nov 2024 12:15:43 +0530 Subject: [PATCH 18/30] [Snyk] Fix for 2 vulnerabilities (#6215) * fix: frontend/package.json & frontend/yarn.lock to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-JS-UPLOT-6209224 - https://snyk.io/vuln/SNYK-JS-VUETEMPLATECOMPILER-8219888 * chore: upgrade design tokens to 1.1.3 --------- Co-authored-by: snyk-bot Co-authored-by: ahmadshaheer --- frontend/package.json | 2 +- .../src/container/ExplorerOptions/utils.ts | 4 +- frontend/yarn.lock | 483 ++---------------- 3 files changed, 50 insertions(+), 439 deletions(-) diff --git a/frontend/package.json b/frontend/package.json index 09316d530e..320fa28f85 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -42,7 +42,7 @@ "@radix-ui/react-tooltip": "1.0.7", "@sentry/react": "7.102.1", "@sentry/webpack-plugin": "2.16.0", - "@signozhq/design-tokens": "0.0.8", + "@signozhq/design-tokens": "1.1.4", "@uiw/react-md-editor": "3.23.5", "@visx/group": "3.3.0", "@visx/shape": "3.5.0", diff --git a/frontend/src/container/ExplorerOptions/utils.ts b/frontend/src/container/ExplorerOptions/utils.ts index d94e64161e..2aa6c49b91 100644 --- a/frontend/src/container/ExplorerOptions/utils.ts +++ b/frontend/src/container/ExplorerOptions/utils.ts @@ -1,4 +1,4 @@ -import { Color } from '@signozhq/design-tokens'; +import { Color, ColorType } from '@signozhq/design-tokens'; import { showErrorNotification } from 'components/ExplorerCard/utils'; import { LOCALSTORAGE } from 'constants/localStorage'; import { QueryParams } from 'constants/query'; @@ -8,7 +8,7 @@ import { DataSource } from 'types/common/queryBuilder'; import { SaveNewViewHandlerProps } from './types'; -export const getRandomColor = (): Color => { +export const getRandomColor = (): ColorType => { const colorKeys = Object.keys(Color) as (keyof typeof Color)[]; const randomKey = colorKeys[Math.floor(Math.random() * colorKeys.length)]; return Color[randomKey]; diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 36b62c1282..1331655a2c 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -860,11 +860,6 @@ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719" integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw== -"@babel/parser@^7.23.6": - version "7.23.6" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.6.tgz#ba1c9e512bda72a47e285ae42aff9d2a635a9e3b" - integrity sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ== - "@babel/parser@^7.24.0", "@babel/parser@^7.24.1": version "7.24.1" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.1.tgz#1e416d3627393fab1cb5b0f2f1796a100ae9133a" @@ -3099,48 +3094,6 @@ "@types/mdx" "^2.0.0" "@types/react" ">=16" -"@microsoft/api-extractor-model@7.28.3": - version "7.28.3" - resolved "https://registry.yarnpkg.com/@microsoft/api-extractor-model/-/api-extractor-model-7.28.3.tgz#f6a213e41a2274d5195366b646954daee39e8493" - integrity sha512-wT/kB2oDbdZXITyDh2SQLzaWwTOFbV326fP0pUwNW00WeliARs0qjmXBWmGWardEzp2U3/axkO3Lboqun6vrig== - dependencies: - "@microsoft/tsdoc" "0.14.2" - "@microsoft/tsdoc-config" "~0.16.1" - "@rushstack/node-core-library" "3.62.0" - -"@microsoft/api-extractor@7.39.0": - version "7.39.0" - resolved "https://registry.yarnpkg.com/@microsoft/api-extractor/-/api-extractor-7.39.0.tgz#41c25f7f522e8b9376debda07364ff234e602eff" - integrity sha512-PuXxzadgnvp+wdeZFPonssRAj/EW4Gm4s75TXzPk09h3wJ8RS3x7typf95B4vwZRrPTQBGopdUl+/vHvlPdAcg== - dependencies: - "@microsoft/api-extractor-model" "7.28.3" - "@microsoft/tsdoc" "0.14.2" - "@microsoft/tsdoc-config" "~0.16.1" - "@rushstack/node-core-library" "3.62.0" - "@rushstack/rig-package" "0.5.1" - "@rushstack/ts-command-line" "4.17.1" - colors "~1.2.1" - lodash "~4.17.15" - resolve "~1.22.1" - semver "~7.5.4" - source-map "~0.6.1" - typescript "5.3.3" - -"@microsoft/tsdoc-config@~0.16.1": - version "0.16.2" - resolved "https://registry.yarnpkg.com/@microsoft/tsdoc-config/-/tsdoc-config-0.16.2.tgz#b786bb4ead00d54f53839a458ce626c8548d3adf" - integrity sha512-OGiIzzoBLgWWR0UdRJX98oYO+XKGf7tiK4Zk6tQ/E4IJqGCe7dvkTvgDZV5cFJUzLGDOjeAXrnZoA6QkVySuxw== - dependencies: - "@microsoft/tsdoc" "0.14.2" - ajv "~6.12.6" - jju "~1.4.0" - resolve "~1.19.0" - -"@microsoft/tsdoc@0.14.2": - version "0.14.2" - resolved "https://registry.yarnpkg.com/@microsoft/tsdoc/-/tsdoc-0.14.2.tgz#c3ec604a0b54b9a9b87e9735dfc59e1a5da6a5fb" - integrity sha512-9b8mPpKrfeGRuhFH5iO1iwCLeIIsV6+H1sRfxbkoGXIyQE2BTsPd9zqSqQJ+pv5sJ/hT5M1zvOFL02MnEezFug== - "@monaco-editor/loader@^1.3.3": version "1.3.3" resolved "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.3.3.tgz" @@ -3546,46 +3499,6 @@ resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.20.0.tgz#03554155b45d8b529adf635b2f6ad1165d70d8b4" integrity sha512-mUnk8rPJBI9loFDZ+YzPGdeniYK+FTmRD1TMCz7ev2SNIozyKKpnGgsxO34u6Z4z/t0ITuu7voi/AshfsGsgFg== -"@rollup/pluginutils@^5.1.0": - version "5.1.0" - resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-5.1.0.tgz#7e53eddc8c7f483a4ad0b94afb1f7f5fd3c771e0" - integrity sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g== - dependencies: - "@types/estree" "^1.0.0" - estree-walker "^2.0.2" - picomatch "^2.3.1" - -"@rushstack/node-core-library@3.62.0": - version "3.62.0" - resolved "https://registry.yarnpkg.com/@rushstack/node-core-library/-/node-core-library-3.62.0.tgz#a30a44a740b522944165f0faa6644134eb95be1d" - integrity sha512-88aJn2h8UpSvdwuDXBv1/v1heM6GnBf3RjEy6ZPP7UnzHNCqOHA2Ut+ScYUbXcqIdfew9JlTAe3g+cnX9xQ/Aw== - dependencies: - colors "~1.2.1" - fs-extra "~7.0.1" - import-lazy "~4.0.0" - jju "~1.4.0" - resolve "~1.22.1" - semver "~7.5.4" - z-schema "~5.0.2" - -"@rushstack/rig-package@0.5.1": - version "0.5.1" - resolved "https://registry.yarnpkg.com/@rushstack/rig-package/-/rig-package-0.5.1.tgz#6c9c283cc96b5bb1eae9875946d974ac5429bb21" - integrity sha512-pXRYSe29TjRw7rqxD4WS3HN/sRSbfr+tJs4a9uuaSIBAITbUggygdhuG0VrO0EO+QqH91GhYMN4S6KRtOEmGVA== - dependencies: - resolve "~1.22.1" - strip-json-comments "~3.1.1" - -"@rushstack/ts-command-line@4.17.1": - version "4.17.1" - resolved "https://registry.yarnpkg.com/@rushstack/ts-command-line/-/ts-command-line-4.17.1.tgz#c78db928ce5b93f2e98fd9e14c24f3f3876e57f1" - integrity sha512-2jweO1O57BYP5qdBGl6apJLB+aRIn5ccIRTPDyULh0KMwVzFqWtw6IZWt1qtUoZD/pD2RNkIOosH6Cq45rIYeg== - dependencies: - "@types/argparse" "1.0.38" - argparse "~1.0.9" - colors "~1.2.1" - string-argv "~0.3.1" - "@sentry-internal/feedback@7.102.1": version "7.102.1" resolved "https://registry.yarnpkg.com/@sentry-internal/feedback/-/feedback-7.102.1.tgz#747f88c2881c76fddd16bce57cc4bc17b4c2af93" @@ -3750,13 +3663,10 @@ unplugin "1.0.1" uuid "^9.0.0" -"@signozhq/design-tokens@0.0.8": - version "0.0.8" - resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-0.0.8.tgz#368dc92cfe01d0cd893df140445c5d9dfd944a88" - integrity sha512-YUxQw6w7iyUMTBxj82nFZQNRsg7Boej3YM6K5bYfDMQg0MqvWQCWsP7EkyLHu/TiyOZwZWb++vzXG6m+YJX9bw== - dependencies: - style-dictionary "3.8.0" - vite-plugin-dts "^3.6.4" +"@signozhq/design-tokens@1.1.4": + version "1.1.4" + resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-1.1.4.tgz#5d5de5bd9d19b6a3631383db015cc4b70c3f7661" + integrity sha512-ICZz5szxTq8NcKAsk6LP+nSybPyEcyy8eu2zfxlPQCnJ1YjJP1PglaKLlF0N6+D60gAd3yC5he06BqR8/HxjNg== "@sinclair/typebox@^0.25.16": version "0.25.24" @@ -3876,11 +3786,6 @@ dependencies: "@types/estree" "*" -"@types/argparse@1.0.38": - version "1.0.38" - resolved "https://registry.yarnpkg.com/@types/argparse/-/argparse-1.0.38.tgz#a81fd8606d481f873a3800c6ebae4f1d768a56a9" - integrity sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA== - "@types/aria-query@^5.0.1": version "5.0.1" resolved "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.1.tgz" @@ -4859,68 +4764,7 @@ d3-time-format "4.1.0" internmap "2.0.3" -"@volar/language-core@1.11.1", "@volar/language-core@~1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@volar/language-core/-/language-core-1.11.1.tgz#ecdf12ea8dc35fb8549e517991abcbf449a5ad4f" - integrity sha512-dOcNn3i9GgZAcJt43wuaEykSluAuOkQgzni1cuxLxTV0nJKanQztp7FxyswdRILaKH+P2XZMPRp2S4MV/pElCw== - dependencies: - "@volar/source-map" "1.11.1" - -"@volar/source-map@1.11.1", "@volar/source-map@~1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@volar/source-map/-/source-map-1.11.1.tgz#535b0328d9e2b7a91dff846cab4058e191f4452f" - integrity sha512-hJnOnwZ4+WT5iupLRnuzbULZ42L7BWWPMmruzwtLhJfpDVoZLjNBxHDi2sY2bgZXCKlpU5XcsMFoYrsQmPhfZg== - dependencies: - muggle-string "^0.3.1" - -"@volar/typescript@~1.11.1": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@volar/typescript/-/typescript-1.11.1.tgz#ba86c6f326d88e249c7f5cfe4b765be3946fd627" - integrity sha512-iU+t2mas/4lYierSnoFOeRFQUhAEMgsFuQxoxvwn5EdQopw43j+J27a4lt9LMInx1gLJBC6qL14WYGlgymaSMQ== - dependencies: - "@volar/language-core" "1.11.1" - path-browserify "^1.0.1" - -"@vue/compiler-core@3.4.4": - version "3.4.4" - resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.4.4.tgz#ba1ca008e95f118449cab79bdab3f7506bab2892" - integrity sha512-U5AdCN+6skzh2bSJrkMj2KZsVkUpgK8/XlxjSRYQZhNPcvt9/kmgIMpFEiTyK+Dz5E1J+8o8//BEIX+bakgVSw== - dependencies: - "@babel/parser" "^7.23.6" - "@vue/shared" "3.4.4" - entities "^4.5.0" - estree-walker "^2.0.2" - source-map-js "^1.0.2" - -"@vue/compiler-dom@^3.3.0": - version "3.4.4" - resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.4.4.tgz#a11bba8af691b58700c479ce893b02bf71bb089a" - integrity sha512-iSwkdDULCN+Vr8z6uwdlL044GJ/nUmECxP9vu7MzEs4Qma0FwDLYvnvRcyO0ZITuu3Os4FptGUDnhi1kOLSaGw== - dependencies: - "@vue/compiler-core" "3.4.4" - "@vue/shared" "3.4.4" - -"@vue/language-core@1.8.27", "@vue/language-core@^1.8.26": - version "1.8.27" - resolved "https://registry.yarnpkg.com/@vue/language-core/-/language-core-1.8.27.tgz#2ca6892cb524e024a44e554e4c55d7a23e72263f" - integrity sha512-L8Kc27VdQserNaCUNiSFdDl9LWT24ly8Hpwf1ECy3aFb9m6bDhBGQYOujDm21N7EW3moKIOKEanQwe1q5BK+mA== - dependencies: - "@volar/language-core" "~1.11.1" - "@volar/source-map" "~1.11.1" - "@vue/compiler-dom" "^3.3.0" - "@vue/shared" "^3.3.0" - computeds "^0.0.1" - minimatch "^9.0.3" - muggle-string "^0.3.1" - path-browserify "^1.0.1" - vue-template-compiler "^2.7.14" - -"@vue/shared@3.4.4", "@vue/shared@^3.3.0": - version "3.4.4" - resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.4.4.tgz#26e4e62a5fbfb39c25e9e54d21eeb852f1c83a7a" - integrity sha512-abSgiVRhfjfl3JALR/cSuBl74hGJ3SePgf1mKzodf1eMWLwHZbfEGxT2cNJSsNiw44jEgrO7bNkhchaWA7RwNw== - -"@webassemblyjs/ast@1.12.1", "@webassemblyjs/ast@^1.12.1": +"@webassemblyjs/ast@1.12.1": version "1.12.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.12.1.tgz#bb16a0e8b1914f979f45864c23819cc3e3f0d4bb" integrity sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg== @@ -4928,16 +4772,34 @@ "@webassemblyjs/helper-numbers" "1.11.6" "@webassemblyjs/helper-wasm-bytecode" "1.11.6" +"@webassemblyjs/ast@^1.12.1": + version "1.14.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.14.1.tgz#a9f6a07f2b03c95c8d38c4536a1fdfb521ff55b6" + integrity sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ== + dependencies: + "@webassemblyjs/helper-numbers" "1.13.2" + "@webassemblyjs/helper-wasm-bytecode" "1.13.2" + "@webassemblyjs/floating-point-hex-parser@1.11.6": version "1.11.6" resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz#dacbcb95aff135c8260f77fa3b4c5fea600a6431" integrity sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw== +"@webassemblyjs/floating-point-hex-parser@1.13.2": + version "1.13.2" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz#fcca1eeddb1cc4e7b6eed4fc7956d6813b21b9fb" + integrity sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA== + "@webassemblyjs/helper-api-error@1.11.6": version "1.11.6" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz#6132f68c4acd59dcd141c44b18cbebbd9f2fa768" integrity sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q== +"@webassemblyjs/helper-api-error@1.13.2": + version "1.13.2" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz#e0a16152248bc38daee76dd7e21f15c5ef3ab1e7" + integrity sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ== + "@webassemblyjs/helper-buffer@1.12.1": version "1.12.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz#6df20d272ea5439bf20ab3492b7fb70e9bfcb3f6" @@ -4952,11 +4814,25 @@ "@webassemblyjs/helper-api-error" "1.11.6" "@xtuc/long" "4.2.2" +"@webassemblyjs/helper-numbers@1.13.2": + version "1.13.2" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz#dbd932548e7119f4b8a7877fd5a8d20e63490b2d" + integrity sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA== + dependencies: + "@webassemblyjs/floating-point-hex-parser" "1.13.2" + "@webassemblyjs/helper-api-error" "1.13.2" + "@xtuc/long" "4.2.2" + "@webassemblyjs/helper-wasm-bytecode@1.11.6": version "1.11.6" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz#bb2ebdb3b83aa26d9baad4c46d4315283acd51e9" integrity sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA== +"@webassemblyjs/helper-wasm-bytecode@1.13.2": + version "1.13.2" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz#e556108758f448aae84c850e593ce18a0eb31e0b" + integrity sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA== + "@webassemblyjs/helper-wasm-section@1.12.1": version "1.12.1" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz#3da623233ae1a60409b509a52ade9bc22a37f7bf" @@ -5223,7 +5099,7 @@ ajv-keywords@^5.1.0: dependencies: fast-deep-equal "^3.1.3" -ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.12.6, ajv@~6.12.6: +ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.5, ajv@^6.12.6: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== @@ -5382,7 +5258,7 @@ arg@^4.1.0: resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz" integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== -argparse@^1.0.7, argparse@~1.0.9: +argparse@^1.0.7: version "1.0.10" resolved "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz" integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== @@ -6298,15 +6174,6 @@ canvas-color-tracker@1: dependencies: tinycolor2 "^1.6.0" -capital-case@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/capital-case/-/capital-case-1.0.4.tgz#9d130292353c9249f6b00fa5852bee38a717e669" - integrity sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A== - dependencies: - no-case "^3.0.4" - tslib "^2.0.3" - upper-case-first "^2.0.2" - cardboard-vr-display@^1.0.19: version "1.0.19" resolved "https://registry.npmjs.org/cardboard-vr-display/-/cardboard-vr-display-1.0.19.tgz" @@ -6353,24 +6220,6 @@ chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.1: ansi-styles "^4.1.0" supports-color "^7.1.0" -change-case@^4.1.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/change-case/-/change-case-4.1.2.tgz#fedfc5f136045e2398c0410ee441f95704641e12" - integrity sha512-bSxY2ws9OtviILG1EiY5K7NNxkqg/JnRnFxLtKQ96JaviiIxi7djMrSd0ECT9AC+lttClmYwKw53BWpOMblo7A== - dependencies: - camel-case "^4.1.2" - capital-case "^1.0.4" - constant-case "^3.0.4" - dot-case "^3.0.4" - header-case "^2.0.4" - no-case "^3.0.4" - param-case "^3.0.4" - pascal-case "^3.1.2" - path-case "^3.0.4" - sentence-case "^3.0.4" - snake-case "^3.0.4" - tslib "^2.0.3" - char-regex@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz" @@ -6622,11 +6471,6 @@ colorette@^2.0.10, colorette@^2.0.14, colorette@^2.0.16: resolved "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz" integrity sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w== -colors@~1.2.1: - version "1.2.5" - resolved "https://registry.yarnpkg.com/colors/-/colors-1.2.5.tgz#89c7ad9a374bc030df8013241f68136ed8835afc" - integrity sha512-erNRLao/Y3Fv54qUa0LBB+//Uf3YwMUmdJinN20yMXm9zdKKqH9wt7R9IIVZ+K7ShzfpLV/Zg8+VyrBJYB4lpg== - combined-stream@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" @@ -6649,11 +6493,6 @@ commander@2, commander@^2.20.0, commander@^2.20.3: resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== -commander@^10.0.0: - version "10.0.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" - integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== - commander@^7.0.0, commander@^7.2.0: version "7.2.0" resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz" @@ -6723,11 +6562,6 @@ compute-scroll-into-view@^3.0.2: resolved "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-3.0.3.tgz" integrity sha512-nadqwNxghAGTamwIqQSG433W6OADZx2vCo3UXHNrzTRHK/htu+7+L0zhjEoaeaQVNAi3YgqWDv8+tzf0hRfR+A== -computeds@^0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/computeds/-/computeds-0.0.1.tgz#215b08a4ba3e08a11ff6eee5d6d8d7166a97ce2e" - integrity sha512-7CEBgcMjVmitjYo5q8JTJVra6X5mQ20uTThdK+0kR7UEaDrAWEQcRiBtWJzga4eRpP6afNwwLsX2SET2JhVB1Q== - concat-map@0.0.1: version "0.0.1" resolved "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz" @@ -6743,15 +6577,6 @@ connect-history-api-fallback@^2.0.0: resolved "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz" integrity sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA== -constant-case@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/constant-case/-/constant-case-3.0.4.tgz#3b84a9aeaf4cf31ec45e6bf5de91bdfb0589faf1" - integrity sha512-I2hSBi7Vvs7BEuJDr5dDHfzb/Ruj3FyvFyh7KLilAjNQw3Be+xgqUBA2W6scVEcL0hL1dwPRtIqEPVUCKkSsyQ== - dependencies: - no-case "^3.0.4" - tslib "^2.0.3" - upper-case "^2.0.2" - content-disposition@0.5.4: version "0.5.4" resolved "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz" @@ -7394,11 +7219,6 @@ dayjs@^1.10.7, dayjs@^1.11.1: resolved "https://registry.npmjs.org/dayjs/-/dayjs-1.11.7.tgz" integrity sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ== -de-indent@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/de-indent/-/de-indent-1.0.2.tgz#b2038e846dc33baa5796128d0804b455b8c1e21d" - integrity sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg== - debounce@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/debounce/-/debounce-1.2.1.tgz#38881d8f4166a5c5848020c11827b834bcb3e0a5" @@ -7850,7 +7670,7 @@ entities@^2.0.0, entities@^2.2.0: resolved "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@^4.2.0, entities@^4.4.0, entities@^4.5.0: +entities@^4.2.0, entities@^4.4.0: version "4.5.0" resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== @@ -8396,11 +8216,6 @@ estree-util-visit@^1.0.0: "@types/estree-jsx" "^1.0.0" "@types/unist" "^2.0.0" -estree-walker@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac" - integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== - estree-walker@^3.0.0: version "3.0.3" resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d" @@ -8819,15 +8634,6 @@ fs-extra@^10.0.0: jsonfile "^6.0.1" universalify "^2.0.0" -fs-extra@~7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9" - integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw== - dependencies: - graceful-fs "^4.1.2" - jsonfile "^4.0.0" - universalify "^0.1.0" - fs-monkey@^1.0.3: version "1.0.3" resolved "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz" @@ -8982,7 +8788,7 @@ glob-to-regexp@^0.4.1: resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz" integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== -glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6, glob@^7.2.0: +glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: version "7.2.3" resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz" integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== @@ -9380,14 +9186,6 @@ he@^1.2.0: resolved "https://registry.npmjs.org/he/-/he-1.2.0.tgz" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== -header-case@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/header-case/-/header-case-2.0.4.tgz#5a42e63b55177349cf405beb8d775acabb92c063" - integrity sha512-H/vuk5TEEVZwrR0lp2zed9OCo1uAILMlx0JEMgC26rzyJJ3N1v6XkwHHXJQdR2doSjcGPM6OKPYoJgf0plJ11Q== - dependencies: - capital-case "^1.0.4" - tslib "^2.0.3" - headers-polyfill@3.2.5: version "3.2.5" resolved "https://registry.yarnpkg.com/headers-polyfill/-/headers-polyfill-3.2.5.tgz#6e67d392c9d113d37448fe45014e0afdd168faed" @@ -9686,11 +9484,6 @@ import-fresh@^3.0.0, import-fresh@^3.2.1: parent-module "^1.0.0" resolve-from "^4.0.0" -import-lazy@~4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-4.0.0.tgz#e8eb627483a0a43da3c03f3e35548be5cb0cc153" - integrity sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw== - import-local@^3.0.2: version "3.1.0" resolved "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz" @@ -9911,13 +9704,6 @@ is-ci@^3.0.1: dependencies: ci-info "^3.2.0" -is-core-module@^2.1.0: - version "2.13.1" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" - integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== - dependencies: - hasown "^2.0.0" - is-core-module@^2.11.0, is-core-module@^2.5.0, is-core-module@^2.9.0: version "2.12.0" resolved "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz" @@ -10767,11 +10553,6 @@ jest@^27.5.1: import-local "^3.0.2" jest-cli "^27.5.1" -jju@~1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/jju/-/jju-1.4.0.tgz#a3abe2718af241a2b2904f84a625970f389ae32a" - integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA== - js-base64@^3.7.2: version "3.7.5" resolved "https://registry.npmjs.org/js-base64/-/js-base64-3.7.5.tgz" @@ -10897,18 +10678,6 @@ json5@^1.0.2: dependencies: minimist "^1.2.0" -jsonc-parser@^3.0.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/jsonc-parser/-/jsonc-parser-3.2.0.tgz#31ff3f4c2b9793f89c67212627c51c6394f88e76" - integrity sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w== - -jsonfile@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" - integrity sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg== - optionalDependencies: - graceful-fs "^4.1.6" - jsonfile@^6.0.1: version "6.1.0" resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz" @@ -10968,11 +10737,6 @@ klona@^2.0.4: resolved "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz" integrity sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA== -kolorist@^1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/kolorist/-/kolorist-1.8.0.tgz#edddbbbc7894bc13302cdf740af6374d4a04743c" - integrity sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ== - language-subtag-registry@~0.3.2: version "0.3.22" resolved "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz" @@ -11199,12 +10963,7 @@ lodash.debounce@^4.0.8: resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== -lodash.get@^4.4.2: - version "4.4.2" - resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99" - integrity sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ== - -lodash.isequal@^4.0.0, lodash.isequal@^4.5.0: +lodash.isequal@^4.0.0: version "4.5.0" resolved "https://registry.yarnpkg.com/lodash.isequal/-/lodash.isequal-4.5.0.tgz#415c4478f2bcc30120c22ce10ed3226f7d3e18e0" integrity sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ== @@ -11234,7 +10993,7 @@ lodash.uniq@^4.5.0: resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz" integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== -lodash@4.17.21, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.7.0, lodash@~4.17.15: +lodash@4.17.21, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.7.0: version "4.17.21" resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -12164,13 +11923,6 @@ minimatch@^8.0.2: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.3: - version "9.0.3" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" - integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== - dependencies: - brace-expansion "^2.0.1" - minimist-options@4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz" @@ -12264,11 +12016,6 @@ msw@1.3.2: type-fest "^2.19.0" yargs "^17.3.1" -muggle-string@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/muggle-string/-/muggle-string-0.3.1.tgz#e524312eb1728c63dd0b2ac49e3282e6ed85963a" - integrity sha512-ckmWDJjphvd/FvZawgygcUeQCxzvohjFO5RxTjj4eq8kw359gFF3E1brjfI+viLMxss5JrHTDRHZvu2/tuy0Qg== - multicast-dns@^7.2.5: version "7.2.5" resolved "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz" @@ -12944,19 +12691,6 @@ pascal-case@^3.1.2: no-case "^3.0.4" tslib "^2.0.3" -path-browserify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" - integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== - -path-case@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/path-case/-/path-case-3.0.4.tgz#9168645334eb942658375c56f80b4c0cb5f82c6f" - integrity sha512-qO4qCFjXqVTrcbPt/hQfhTQ+VhFsqNKOPtytgNKkKxSoEp3XPUQ8ObFuePylOIok5gjn69ry8XiULxCwot3Wfg== - dependencies: - dot-case "^3.0.4" - tslib "^2.0.3" - path-exists@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz" @@ -12982,7 +12716,7 @@ path-key@^3.0.0, path-key@^3.1.0: resolved "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== -path-parse@^1.0.6, path-parse@^1.0.7: +path-parse@^1.0.7: version "1.0.7" resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== @@ -14854,23 +14588,6 @@ resolve@^2.0.0-next.4: path-parse "^1.0.7" supports-preserve-symlinks-flag "^1.0.0" -resolve@~1.19.0: - version "1.19.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.19.0.tgz#1af5bf630409734a067cae29318aac7fa29a267c" - integrity sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg== - dependencies: - is-core-module "^2.1.0" - path-parse "^1.0.6" - -resolve@~1.22.1: - version "1.22.8" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" - integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== - dependencies: - is-core-module "^2.13.0" - path-parse "^1.0.7" - supports-preserve-symlinks-flag "^1.0.0" - restore-cursor@^3.1.0: version "3.1.0" resolved "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz" @@ -15065,7 +14782,7 @@ selfsigned@^2.1.1: dependencies: node-forge "^1" -"semver@2 || 3 || 4 || 5", semver@7.3.7, semver@7.5.4, semver@7.x, semver@^5.5.0, semver@^5.6.0, semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.4, semver@~7.5.4: +"semver@2 || 3 || 4 || 5", semver@7.3.7, semver@7.5.4, semver@7.x, semver@^5.5.0, semver@^5.6.0, semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0, semver@^6.3.1, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== @@ -15091,15 +14808,6 @@ send@0.19.0: range-parser "~1.2.1" statuses "2.0.1" -sentence-case@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/sentence-case/-/sentence-case-3.0.4.tgz#3645a7b8c117c787fde8702056225bb62a45131f" - integrity sha512-8LS0JInaQMCRoQ7YUytAo/xUu5W2XnQxV2HI/6uM6U7CITS1RqPElr30V6uIqyMKM9lJGRVFy5/4CuzcixNYSg== - dependencies: - no-case "^3.0.4" - tslib "^2.0.3" - upper-case-first "^2.0.2" - serialize-javascript@^5.0.1: version "5.0.1" resolved "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz" @@ -15305,14 +15013,6 @@ slice-ansi@^5.0.0: ansi-styles "^6.0.0" is-fullwidth-code-point "^4.0.0" -snake-case@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/snake-case/-/snake-case-3.0.4.tgz#4f2bbd568e9935abdfd593f34c691dadb49c452c" - integrity sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg== - dependencies: - dot-case "^3.0.4" - tslib "^2.0.3" - sockjs@^0.3.24: version "0.3.24" resolved "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz" @@ -15541,11 +15241,6 @@ string-argv@^0.3.1: resolved "https://registry.npmjs.org/string-argv/-/string-argv-0.3.1.tgz" integrity sha512-a1uQGz7IyVy9YwhqjZIZu1c8JO8dNIe20xBmSS6qu9kv++k3JGzCVmprbNN5Kn+BgzD5E7YYwg1CcjuJMRNsvg== -string-argv@~0.3.1: - version "0.3.2" - resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.3.2.tgz#2b6d0ef24b656274d957d54e0a4bbf6153dc02b6" - integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q== - string-convert@^0.2.0: version "0.2.1" resolved "https://registry.npmjs.org/string-convert/-/string-convert-0.2.1.tgz" @@ -15690,26 +15385,11 @@ strip-indent@^3.0.0: dependencies: min-indent "^1.0.0" -strip-json-comments@^3.1.0, strip-json-comments@^3.1.1, strip-json-comments@~3.1.1: +strip-json-comments@^3.1.0, strip-json-comments@^3.1.1: version "3.1.1" resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz" integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== -style-dictionary@3.8.0: - version "3.8.0" - resolved "https://registry.yarnpkg.com/style-dictionary/-/style-dictionary-3.8.0.tgz#7cb8d64360c53431f768d44def665f61e971a73e" - integrity sha512-wHlB/f5eO3mDcYv6WtOz6gvQC477jBKrwuIXe+PtHskTCBsJdAOvL8hCquczJxDui2TnwpeNE+2msK91JJomZg== - dependencies: - chalk "^4.0.0" - change-case "^4.1.2" - commander "^8.3.0" - fs-extra "^10.0.0" - glob "^7.2.0" - json5 "^2.2.2" - jsonc-parser "^3.0.0" - lodash "^4.17.15" - tinycolor2 "^1.4.1" - style-loader@1.3.0: version "1.3.0" resolved "https://registry.npmjs.org/style-loader/-/style-loader-1.3.0.tgz" @@ -16036,7 +15716,7 @@ tiny-warning@^1.0.0: resolved "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz" integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA== -tinycolor2@1, tinycolor2@1.6.0, tinycolor2@^1.4.1, tinycolor2@^1.6.0: +tinycolor2@1, tinycolor2@1.6.0, tinycolor2@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.6.0.tgz#f98007460169b0263b97072c5ae92484ce02d09e" integrity sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw== @@ -16359,11 +16039,6 @@ typescript-plugin-css-modules@5.0.1: stylus "^0.59.0" tsconfig-paths "^4.1.2" -typescript@5.3.3: - version "5.3.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.3.3.tgz#b3ce6ba258e72e6305ba66f5c9b452aaee3ffe37" - integrity sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw== - typescript@^4.0.5, typescript@^4.4.3: version "4.9.5" resolved "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz" @@ -16520,11 +16195,6 @@ unist-util-visit@^5.0.0: unist-util-is "^6.0.0" unist-util-visit-parents "^6.0.0" -universalify@^0.1.0: - version "0.1.2" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" - integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== - universalify@^0.2.0: version "0.2.0" resolved "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz" @@ -16571,20 +16241,6 @@ uplot@1.6.31: resolved "https://registry.yarnpkg.com/uplot/-/uplot-1.6.31.tgz#092a4b586590e9794b679e1df885a15584b03698" integrity sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w== -upper-case-first@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/upper-case-first/-/upper-case-first-2.0.2.tgz#992c3273f882abd19d1e02894cc147117f844324" - integrity sha512-514ppYHBaKwfJRK/pNC6c/OxfGa0obSnAl106u97Ed0I625Nin96KAjttZF6ZL3e1XLtphxnqrOi9iWgm+u+bg== - dependencies: - tslib "^2.0.3" - -upper-case@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-2.0.2.tgz#d89810823faab1df1549b7d97a76f8662bae6f7a" - integrity sha512-KgdgDGJt2TpuwBUIjgG6lzw2GWFRCW9Qkfkiv0DxqHHLYJHmtmdUIKcZd8rHgFSjopVTlw6ggzCm1b8MFQwikg== - dependencies: - tslib "^2.0.3" - uri-js@^4.2.2: version "4.4.1" resolved "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz" @@ -16693,11 +16349,6 @@ validate-npm-package-license@^3.0.1: spdx-correct "^3.0.0" spdx-expression-parse "^3.0.0" -validator@^13.7.0: - version "13.11.0" - resolved "https://registry.yarnpkg.com/validator/-/validator-13.11.0.tgz#23ab3fd59290c61248364eabf4067f04955fbb1b" - integrity sha512-Ii+sehpSfZy+At5nPdnyMhx78fEoPDkR2XW/zimHEL3MyGJQOCQ7WeP20jPYRz7ZCpcKLB21NxuXHF3bxjStBQ== - value-equal@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz" @@ -16759,40 +16410,11 @@ vfile@^6.0.0: unist-util-stringify-position "^4.0.0" vfile-message "^4.0.0" -vite-plugin-dts@^3.6.4: - version "3.7.0" - resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-3.7.0.tgz#654ee7c38c0cdd4589b9bc198a264f34172bd870" - integrity sha512-np1uPaYzu98AtPReB8zkMnbjwcNHOABsLhqVOf81b3ol9b5M2wPcAVs8oqPnOpr6Us+7yDXVauwkxsk5+ldmRA== - dependencies: - "@microsoft/api-extractor" "7.39.0" - "@rollup/pluginutils" "^5.1.0" - "@vue/language-core" "^1.8.26" - debug "^4.3.4" - kolorist "^1.8.0" - vue-tsc "^1.8.26" - void-elements@3.1.0: version "3.1.0" resolved "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz" integrity sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w== -vue-template-compiler@^2.7.14: - version "2.7.16" - resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.7.16.tgz#c81b2d47753264c77ac03b9966a46637482bb03b" - integrity sha512-AYbUWAJHLGGQM7+cNTELw+KsOG9nl2CnSv467WobS5Cv9uk3wFcnr1Etsz2sEIHEZvw1U+o9mRlEO6QbZvUPGQ== - dependencies: - de-indent "^1.0.2" - he "^1.2.0" - -vue-tsc@^1.8.26: - version "1.8.27" - resolved "https://registry.yarnpkg.com/vue-tsc/-/vue-tsc-1.8.27.tgz#feb2bb1eef9be28017bb9e95e2bbd1ebdd48481c" - integrity sha512-WesKCAZCRAbmmhuGl3+VrdWItEvfoFIPXOvUJkjULi+x+6G/Dy69yO3TBRJDr9eUlmsNAwVmxsNZxvHKzbkKdg== - dependencies: - "@volar/typescript" "~1.11.1" - "@vue/language-core" "1.8.27" - semver "^7.5.4" - w3c-hr-time@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz" @@ -17392,17 +17014,6 @@ yocto-queue@^1.0.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251" integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g== -z-schema@~5.0.2: - version "5.0.6" - resolved "https://registry.yarnpkg.com/z-schema/-/z-schema-5.0.6.tgz#46d6a687b15e4a4369e18d6cb1c7b8618fc256c5" - integrity sha512-+XR1GhnWklYdfr8YaZv/iu+vY+ux7V5DS5zH1DQf6bO5ufrt/5cgNhVO5qyhsjFXvsqQb/f08DWE9b6uPscyAg== - dependencies: - lodash.get "^4.4.2" - lodash.isequal "^4.5.0" - validator "^13.7.0" - optionalDependencies: - commander "^10.0.0" - zwitch@^2.0.0, zwitch@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-2.0.4.tgz#c827d4b0acb76fc3e685a4c6ec2902d51070e9d7" From 939e2a3570280e7d9cf736a93595271b9cd6e0e4 Mon Sep 17 00:00:00 2001 From: Shaheer Kochai Date: Mon, 11 Nov 2024 12:04:20 +0430 Subject: [PATCH 19/30] fix: fix the issue of adding new query in new alert page changing the data source (#6286) Co-authored-by: Srikanth Chekuri --- frontend/src/container/FormAlertRules/index.tsx | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/frontend/src/container/FormAlertRules/index.tsx b/frontend/src/container/FormAlertRules/index.tsx index 05c4149d73..5572af8365 100644 --- a/frontend/src/container/FormAlertRules/index.tsx +++ b/frontend/src/container/FormAlertRules/index.tsx @@ -53,6 +53,7 @@ import { QueryFunctionProps, } from 'types/api/queryBuilder/queryBuilderData'; import { EQueryType } from 'types/common/dashboard'; +import { DataSource } from 'types/common/queryBuilder'; import { GlobalReducer } from 'types/reducer/globalTime'; import BasicInfo from './BasicInfo'; @@ -105,6 +106,11 @@ function FormAlertRules({ const location = useLocation(); const queryParams = new URLSearchParams(location.search); + const dataSource = useMemo( + () => urlQuery.get(QueryParams.alertType) as DataSource, + [urlQuery], + ); + // In case of alert the panel types should always be "Graph" only const panelType = PANEL_TYPES.TIME_SERIES; @@ -114,13 +120,12 @@ function FormAlertRules({ handleSetQueryData, handleRunQuery, handleSetConfig, - initialDataSource, redirectWithQueryBuilderData, } = useQueryBuilder(); useEffect(() => { - handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, initialDataSource); - }, [handleSetConfig, initialDataSource, panelType]); + handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, dataSource); + }, [handleSetConfig, dataSource, panelType]); // use query client const ruleCache = useQueryClient(); From 577a169508481a9214460db12b3e6274730ed9cc Mon Sep 17 00:00:00 2001 From: Shaheer Kochai Date: Mon, 11 Nov 2024 23:43:07 +0430 Subject: [PATCH 20/30] feat: alert rename interaction (#6208) * feat: alert rename interaction * feat: add support for enter and escape shortcuts to submit and cancel rename * chore: add missing alert field * chore: update the style similar to dashboard rename * refactor: remove buttonProps * chore: add missing alert property to fix the build --------- Co-authored-by: Srikanth Chekuri --- frontend/src/constants/reactQueryKeys.ts | 1 + .../src/container/CreateAlertRule/defaults.ts | 5 + .../ActionButtons/ActionButtons.tsx | 161 ++++++++++-------- .../ActionButtons/RenameModal.styles.scss | 138 +++++++++++++++ .../AlertHeader/ActionButtons/RenameModal.tsx | 95 +++++++++++ .../AlertDetails/AlertHeader/AlertHeader.tsx | 16 +- frontend/src/pages/AlertDetails/hooks.tsx | 38 +++++ frontend/src/types/api/alerts/def.ts | 2 +- 8 files changed, 379 insertions(+), 77 deletions(-) create mode 100644 frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.styles.scss create mode 100644 frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.tsx diff --git a/frontend/src/constants/reactQueryKeys.ts b/frontend/src/constants/reactQueryKeys.ts index ec2353abbf..1dbacde963 100644 --- a/frontend/src/constants/reactQueryKeys.ts +++ b/frontend/src/constants/reactQueryKeys.ts @@ -18,4 +18,5 @@ export const REACT_QUERY_KEY = { GET_ALL_ALLERTS: 'GET_ALL_ALLERTS', REMOVE_ALERT_RULE: 'REMOVE_ALERT_RULE', DUPLICATE_ALERT_RULE: 'DUPLICATE_ALERT_RULE', + UPDATE_ALERT_RULE: 'UPDATE_ALERT_RULE', }; diff --git a/frontend/src/container/CreateAlertRule/defaults.ts b/frontend/src/container/CreateAlertRule/defaults.ts index 34058a06f6..bac7c90865 100644 --- a/frontend/src/container/CreateAlertRule/defaults.ts +++ b/frontend/src/container/CreateAlertRule/defaults.ts @@ -57,6 +57,7 @@ export const alertDefaults: AlertDef = { }, annotations: defaultAnnotations, evalWindow: defaultEvalWindow, + alert: '', }; export const anamolyAlertDefaults: AlertDef = { @@ -101,6 +102,7 @@ export const anamolyAlertDefaults: AlertDef = { }, annotations: defaultAnnotations, evalWindow: defaultEvalWindow, + alert: '', }; export const logAlertDefaults: AlertDef = { @@ -132,6 +134,7 @@ export const logAlertDefaults: AlertDef = { }, annotations: defaultAnnotations, evalWindow: defaultEvalWindow, + alert: '', }; export const traceAlertDefaults: AlertDef = { @@ -163,6 +166,7 @@ export const traceAlertDefaults: AlertDef = { }, annotations: defaultAnnotations, evalWindow: defaultEvalWindow, + alert: '', }; export const exceptionAlertDefaults: AlertDef = { @@ -194,6 +198,7 @@ export const exceptionAlertDefaults: AlertDef = { }, annotations: defaultAnnotations, evalWindow: defaultEvalWindow, + alert: '', }; export const ALERTS_VALUES_MAP: Record = { diff --git a/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/ActionButtons.tsx b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/ActionButtons.tsx index 2f37c4fc9d..00987a0a66 100644 --- a/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/ActionButtons.tsx +++ b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/ActionButtons.tsx @@ -2,82 +2,90 @@ import './ActionButtons.styles.scss'; import { Color } from '@signozhq/design-tokens'; import { Divider, Dropdown, MenuProps, Switch, Tooltip } from 'antd'; -import { QueryParams } from 'constants/query'; -import ROUTES from 'constants/routes'; import { useIsDarkMode } from 'hooks/useDarkMode'; -import useUrlQuery from 'hooks/useUrlQuery'; -import history from 'lib/history'; import { Copy, Ellipsis, PenLine, Trash2 } from 'lucide-react'; import { useAlertRuleDelete, useAlertRuleDuplicate, useAlertRuleStatusToggle, + useAlertRuleUpdate, } from 'pages/AlertDetails/hooks'; import CopyToClipboard from 'periscope/components/CopyToClipboard'; import { useAlertRule } from 'providers/Alert'; -import React, { useEffect, useState } from 'react'; +import { useCallback, useEffect, useState } from 'react'; import { CSSProperties } from 'styled-components'; import { AlertDef } from 'types/api/alerts/def'; import { AlertHeaderProps } from '../AlertHeader'; +import RenameModal from './RenameModal'; const menuItemStyle: CSSProperties = { fontSize: '14px', letterSpacing: '0.14px', }; + function AlertActionButtons({ ruleId, alertDetails, + setUpdatedName, }: { ruleId: string; alertDetails: AlertHeaderProps['alertDetails']; + setUpdatedName: (name: string) => void; }): JSX.Element { const { alertRuleState, setAlertRuleState } = useAlertRule(); - const { handleAlertStateToggle } = useAlertRuleStatusToggle({ ruleId }); + const [intermediateName, setIntermediateName] = useState( + alertDetails.alert, + ); + const [isRenameAlertOpen, setIsRenameAlertOpen] = useState(false); + const isDarkMode = useIsDarkMode(); + const { handleAlertStateToggle } = useAlertRuleStatusToggle({ ruleId }); const { handleAlertDuplicate } = useAlertRuleDuplicate({ alertDetails: (alertDetails as unknown) as AlertDef, }); const { handleAlertDelete } = useAlertRuleDelete({ ruleId: Number(ruleId) }); + const { handleAlertUpdate, isLoading } = useAlertRuleUpdate({ + alertDetails: (alertDetails as unknown) as AlertDef, + setUpdatedName, + intermediateName, + }); - const params = useUrlQuery(); - - const handleRename = React.useCallback(() => { - params.set(QueryParams.ruleId, String(ruleId)); - history.push(`${ROUTES.ALERT_OVERVIEW}?${params.toString()}`); - }, [params, ruleId]); - - const menu: MenuProps['items'] = React.useMemo( - () => [ - { - key: 'rename-rule', - label: 'Rename', - icon: , - onClick: (): void => handleRename(), - style: menuItemStyle, - }, - { - key: 'duplicate-rule', - label: 'Duplicate', - icon: , - onClick: (): void => handleAlertDuplicate(), - style: menuItemStyle, - }, - { type: 'divider' }, - { - key: 'delete-rule', - label: 'Delete', - icon: , - onClick: (): void => handleAlertDelete(), - style: { - ...menuItemStyle, - color: Color.BG_CHERRY_400, - }, + const handleRename = useCallback(() => { + setIsRenameAlertOpen(true); + }, []); + + const onNameChangeHandler = useCallback(() => { + handleAlertUpdate(); + setIsRenameAlertOpen(false); + }, [handleAlertUpdate]); + + const menuItems: MenuProps['items'] = [ + { + key: 'rename-rule', + label: 'Rename', + icon: , + onClick: handleRename, + style: menuItemStyle, + }, + { + key: 'duplicate-rule', + label: 'Duplicate', + icon: , + onClick: handleAlertDuplicate, + style: menuItemStyle, + }, + { + key: 'delete-rule', + label: 'Delete', + icon: , + onClick: handleAlertDelete, + style: { + ...menuItemStyle, + color: Color.BG_CHERRY_400, }, - ], - [handleAlertDelete, handleAlertDuplicate, handleRename], - ); - const isDarkMode = useIsDarkMode(); + }, + ]; // state for immediate UI feedback rather than waiting for onSuccess of handleAlertStateTiggle to updating the alertRuleState const [isAlertRuleDisabled, setIsAlertRuleDisabled] = useState< @@ -95,35 +103,48 @@ function AlertActionButtons({ // eslint-disable-next-line react-hooks/exhaustive-deps useEffect(() => (): void => setAlertRuleState(undefined), []); + const toggleAlertRule = useCallback(() => { + setIsAlertRuleDisabled((prev) => !prev); + handleAlertStateToggle(); + }, [handleAlertStateToggle]); + return ( -
- - {isAlertRuleDisabled !== undefined && ( - { - setIsAlertRuleDisabled((prev) => !prev); - handleAlertStateToggle(); - }} - checked={!isAlertRuleDisabled} - /> - )} - - - - - - - - + <> +
+ + {isAlertRuleDisabled !== undefined && ( + + )} - -
+ + + + + + + + + +
+ + + ); } diff --git a/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.styles.scss b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.styles.scss new file mode 100644 index 0000000000..d3552d8143 --- /dev/null +++ b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.styles.scss @@ -0,0 +1,138 @@ +.rename-alert { + .ant-modal-content { + width: 384px; + flex-shrink: 0; + border-radius: 4px; + border: 1px solid var(--bg-slate-500); + background: var(--bg-ink-400); + box-shadow: 0px -4px 16px 2px rgba(0, 0, 0, 0.2); + padding: 0px; + + .ant-modal-header { + height: 52px; + padding: 16px; + background: var(--bg-ink-400); + border-bottom: 1px solid var(--bg-slate-500); + margin-bottom: 0px; + .ant-modal-title { + color: var(--bg-vanilla-100); + font-family: Inter; + font-size: 14px; + font-style: normal; + font-weight: 400; + line-height: 20px; /* 142.857% */ + width: 349px; + height: 20px; + } + } + + .ant-modal-body { + padding: 16px; + + .alert-content { + display: flex; + flex-direction: column; + gap: 8px; + + .name-text { + color: var(--bg-vanilla-100); + font-family: Inter; + font-size: 14px; + font-style: normal; + font-weight: 500; + line-height: 20px; /* 142.857% */ + } + + .alert-name-input { + display: flex; + padding: 6px 6px 6px 8px; + align-items: center; + gap: 4px; + align-self: stretch; + border-radius: 0px 2px 2px 0px; + border: 1px solid var(--bg-slate-400); + background: var(--bg-ink-300); + } + } + } + + .ant-modal-footer { + padding: 16px; + margin-top: 0px; + .alert-rename { + display: flex; + flex-direction: row-reverse; + gap: 12px; + + .cancel-btn { + display: flex; + padding: 4px 8px; + justify-content: center; + align-items: center; + gap: 4px; + border-radius: 2px; + background: var(--bg-slate-500); + + .ant-btn-icon { + margin-inline-end: 0px; + } + } + + .rename-btn { + display: flex; + align-items: center; + display: flex; + padding: 4px 8px; + justify-content: center; + align-items: center; + gap: 4px; + border-radius: 2px; + background: var(--bg-robin-500); + + .ant-btn-icon { + margin-inline-end: 0px; + } + } + } + } + } +} + +.lightMode { + .rename-alert { + .ant-modal-content { + border: 1px solid var(--bg-vanilla-300); + background: var(--bg-vanilla-100); + + .ant-modal-header { + background: var(--bg-vanilla-100); + border-bottom: 1px solid var(--bg-vanilla-300); + + .ant-modal-title { + color: var(--bg-ink-300); + } + } + + .ant-modal-body { + .alert-content { + .name-text { + color: var(--bg-ink-300); + } + + .alert-name-input { + border: 1px solid var(--bg-vanilla-300); + background: var(--bg-vanilla-100); + } + } + } + + .ant-modal-footer { + .alert-rename { + .cancel-btn { + background: var(--bg-vanilla-300); + } + } + } + } + } +} diff --git a/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.tsx b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.tsx new file mode 100644 index 0000000000..ce73260fb3 --- /dev/null +++ b/frontend/src/pages/AlertDetails/AlertHeader/ActionButtons/RenameModal.tsx @@ -0,0 +1,95 @@ +import './RenameModal.styles.scss'; + +import { Button, Input, InputRef, Modal, Typography } from 'antd'; +import { Check, X } from 'lucide-react'; +import { useCallback, useEffect, useRef } from 'react'; + +type Props = { + isOpen: boolean; + setIsOpen: (isOpen: boolean) => void; + onNameChangeHandler: () => void; + isLoading: boolean; + intermediateName: string; + setIntermediateName: (name: string) => void; +}; + +function RenameModal({ + isOpen, + setIsOpen, + onNameChangeHandler, + isLoading, + intermediateName, + setIntermediateName, +}: Props): JSX.Element { + const inputRef = useRef(null); + + useEffect(() => { + if (isOpen && inputRef.current) { + inputRef.current.focus(); + } + }, [isOpen]); + + const handleClose = useCallback((): void => setIsOpen(false), [setIsOpen]); + + useEffect(() => { + const handleKeyDown = (e: KeyboardEvent): void => { + if (isOpen) { + if (e.key === 'Enter') { + onNameChangeHandler(); + } else if (e.key === 'Escape') { + handleClose(); + } + } + }; + + document.addEventListener('keydown', handleKeyDown); + + return (): void => { + document.removeEventListener('keydown', handleKeyDown); + }; + }, [isOpen, onNameChangeHandler, handleClose]); + + return ( + + + + + } + > +
+ Enter a new name + setIntermediateName(e.target.value)} + /> +
+
+ ); +} + +export default RenameModal; diff --git a/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx b/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx index 04edd6a8b0..f617a6d78e 100644 --- a/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx +++ b/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx @@ -2,7 +2,7 @@ import './AlertHeader.styles.scss'; import LineClampedText from 'periscope/components/LineClampedText/LineClampedText'; import { useAlertRule } from 'providers/Alert'; -import { useMemo } from 'react'; +import { useMemo, useState } from 'react'; import AlertActionButtons from './ActionButtons/ActionButtons'; import AlertLabels from './AlertLabels/AlertLabels'; @@ -19,7 +19,9 @@ export type AlertHeaderProps = { }; }; function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element { - const { state, alert, labels } = alertDetails; + const { state, alert: alertName, labels } = alertDetails; + const { alertRuleState } = useAlertRule(); + const [updatedName, setUpdatedName] = useState(alertName); const labelsWithoutSeverity = useMemo( () => @@ -29,8 +31,6 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element { [labels], ); - const { alertRuleState } = useAlertRule(); - return (
@@ -38,7 +38,7 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
- +
@@ -54,7 +54,11 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
- +
); diff --git a/frontend/src/pages/AlertDetails/hooks.tsx b/frontend/src/pages/AlertDetails/hooks.tsx index b4d7674c67..c159d2169b 100644 --- a/frontend/src/pages/AlertDetails/hooks.tsx +++ b/frontend/src/pages/AlertDetails/hooks.tsx @@ -467,6 +467,44 @@ export const useAlertRuleDuplicate = ({ return { handleAlertDuplicate }; }; +export const useAlertRuleUpdate = ({ + alertDetails, + setUpdatedName, + intermediateName, +}: { + alertDetails: AlertDef; + setUpdatedName: (name: string) => void; + intermediateName: string; +}): { + handleAlertUpdate: () => void; + isLoading: boolean; +} => { + const { notifications } = useNotifications(); + const handleError = useAxiosError(); + + const { mutate: updateAlertRule, isLoading } = useMutation( + [REACT_QUERY_KEY.UPDATE_ALERT_RULE, alertDetails.id], + save, + { + onMutate: () => setUpdatedName(intermediateName), + onSuccess: () => + notifications.success({ message: 'Alert renamed successfully' }), + onError: (error) => { + setUpdatedName(alertDetails.alert); + handleError(error); + }, + }, + ); + + const handleAlertUpdate = (): void => { + updateAlertRule({ + data: { ...alertDetails, alert: intermediateName }, + id: alertDetails.id, + }); + }; + + return { handleAlertUpdate, isLoading }; +}; export const useAlertRuleDelete = ({ ruleId, diff --git a/frontend/src/types/api/alerts/def.ts b/frontend/src/types/api/alerts/def.ts index 40a6036411..3891c1b123 100644 --- a/frontend/src/types/api/alerts/def.ts +++ b/frontend/src/types/api/alerts/def.ts @@ -19,7 +19,7 @@ export const defaultSeasonality = 'hourly'; export interface AlertDef { id?: number; alertType?: string; - alert?: string; + alert: string; ruleType?: string; frequency?: string; condition: RuleCondition; From e974e9d47f1c1960d551d8c730bc293eb1eb77d1 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Tue, 12 Nov 2024 01:40:10 +0530 Subject: [PATCH 21/30] feat: consume the new licenses v3 structure. (#6341) * feat: setup for licenses v3 integration * feat: added some more logic * feat: validator changes * chore: added a couple of todos * feat: added config parameter for licenses v3 and the boot option * feat: some typo fix * feat: added refresh licenses handler * feat: handle the start manager license activation * chore: text updates * feat: added list licenses call * chore: refactor the entire code to cleanup interfaces * fix: nil pointer error * chore: some minor edits * feat: model changes * feat: model changes * fix: utilise factory pattern * feat: added default basic plan * chore: added test cases for new license function * feat: added more test cases * chore: make the licenses id not null * feat: cosmetic changes * feat: cosmetic changes * feat: update zeus URL * chore: license testing fixes * feat: added license status and category handling for query-service * chore: added v3 support in v2 endpoint * chore: http response codes and some code cleanup * chore: added detailed test cases * chore: address review comments * chore: some misc cleanup --- ee/query-service/app/api/api.go | 17 ++ ee/query-service/app/api/license.go | 101 +++++++++- ee/query-service/app/server.go | 4 +- ee/query-service/constants/constants.go | 1 + .../integrations/signozio/response.go | 5 + .../integrations/signozio/signozio.go | 61 +++++- ee/query-service/license/db.go | 118 +++++++++++ ee/query-service/license/manager.go | 185 +++++++++++++++++- ee/query-service/license/sqlite/init.go | 11 ++ ee/query-service/main.go | 3 + ee/query-service/model/errors.go | 7 + ee/query-service/model/license.go | 143 ++++++++++++++ ee/query-service/model/license_test.go | 170 ++++++++++++++++ ee/query-service/model/plans.go | 11 ++ pkg/query-service/app/http_handler.go | 5 + 15 files changed, 830 insertions(+), 12 deletions(-) create mode 100644 ee/query-service/model/license_test.go diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index 82557705fd..4291b3f488 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -40,6 +40,7 @@ type APIHandlerOptions struct { // Querier Influx Interval FluxInterval time.Duration UseLogsNewSchema bool + UseLicensesV3 bool } type APIHandler struct { @@ -65,6 +66,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) { Cache: opts.Cache, FluxInterval: opts.FluxInterval, UseLogsNewSchema: opts.UseLogsNewSchema, + UseLicensesV3: opts.UseLicensesV3, }) if err != nil { @@ -173,10 +175,25 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew router.HandleFunc("/api/v1/dashboards/{uuid}/lock", am.EditAccess(ah.lockDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}/unlock", am.EditAccess(ah.unlockDashboard)).Methods(http.MethodPut) + // v2 router.HandleFunc("/api/v2/licenses", am.ViewAccess(ah.listLicensesV2)). Methods(http.MethodGet) + // v3 + router.HandleFunc("/api/v3/licenses", + am.ViewAccess(ah.listLicensesV3)). + Methods(http.MethodGet) + + router.HandleFunc("/api/v3/licenses", + am.AdminAccess(ah.applyLicenseV3)). + Methods(http.MethodPost) + + router.HandleFunc("/api/v3/licenses", + am.AdminAccess(ah.refreshLicensesV3)). + Methods(http.MethodPut) + + // v4 router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost) // Gateway diff --git a/ee/query-service/app/api/license.go b/ee/query-service/app/api/license.go index 51cfddefb1..0cb7fa2bab 100644 --- a/ee/query-service/app/api/license.go +++ b/ee/query-service/app/api/license.go @@ -9,6 +9,7 @@ import ( "go.signoz.io/signoz/ee/query-service/constants" "go.signoz.io/signoz/ee/query-service/model" + "go.signoz.io/signoz/pkg/http/render" "go.uber.org/zap" ) @@ -59,6 +60,21 @@ type billingDetails struct { } `json:"data"` } +type ApplyLicenseRequest struct { + LicenseKey string `json:"key"` +} + +type ListLicenseResponse map[string]interface{} + +func convertLicenseV3ToListLicenseResponse(licensesV3 []*model.LicenseV3) []ListLicenseResponse { + listLicenses := []ListLicenseResponse{} + + for _, license := range licensesV3 { + listLicenses = append(listLicenses, license.Data) + } + return listLicenses +} + func (ah *APIHandler) listLicenses(w http.ResponseWriter, r *http.Request) { licenses, apiError := ah.LM().GetLicenses(context.Background()) if apiError != nil { @@ -88,6 +104,51 @@ func (ah *APIHandler) applyLicense(w http.ResponseWriter, r *http.Request) { ah.Respond(w, license) } +func (ah *APIHandler) listLicensesV3(w http.ResponseWriter, r *http.Request) { + licenses, apiError := ah.LM().GetLicensesV3(r.Context()) + + if apiError != nil { + RespondError(w, apiError, nil) + return + } + + ah.Respond(w, convertLicenseV3ToListLicenseResponse(licenses)) +} + +// this function is called by zeus when inserting licenses in the query-service +func (ah *APIHandler) applyLicenseV3(w http.ResponseWriter, r *http.Request) { + var licenseKey ApplyLicenseRequest + + if err := json.NewDecoder(r.Body).Decode(&licenseKey); err != nil { + RespondError(w, model.BadRequest(err), nil) + return + } + + if licenseKey.LicenseKey == "" { + RespondError(w, model.BadRequest(fmt.Errorf("license key is required")), nil) + return + } + + _, apiError := ah.LM().ActivateV3(r.Context(), licenseKey.LicenseKey) + if apiError != nil { + RespondError(w, apiError, nil) + return + } + + render.Success(w, http.StatusAccepted, nil) +} + +func (ah *APIHandler) refreshLicensesV3(w http.ResponseWriter, r *http.Request) { + + apiError := ah.LM().RefreshLicense(r.Context()) + if apiError != nil { + RespondError(w, apiError, nil) + return + } + + render.Success(w, http.StatusNoContent, nil) +} + func (ah *APIHandler) checkout(w http.ResponseWriter, r *http.Request) { type checkoutResponse struct { @@ -154,11 +215,45 @@ func (ah *APIHandler) getBilling(w http.ResponseWriter, r *http.Request) { ah.Respond(w, billingResponse.Data) } +func convertLicenseV3ToLicenseV2(licenses []*model.LicenseV3) []model.License { + licensesV2 := []model.License{} + for _, l := range licenses { + licenseV2 := model.License{ + Key: l.Key, + ActivationId: "", + PlanDetails: "", + FeatureSet: l.Features, + ValidationMessage: "", + IsCurrent: l.IsCurrent, + LicensePlan: model.LicensePlan{ + PlanKey: l.PlanName, + ValidFrom: l.ValidFrom, + ValidUntil: l.ValidUntil, + Status: l.Status}, + } + licensesV2 = append(licensesV2, licenseV2) + } + return licensesV2 +} + func (ah *APIHandler) listLicensesV2(w http.ResponseWriter, r *http.Request) { - licenses, apiError := ah.LM().GetLicenses(context.Background()) - if apiError != nil { - RespondError(w, apiError, nil) + var licenses []model.License + + if ah.UseLicensesV3 { + licensesV3, err := ah.LM().GetLicensesV3(r.Context()) + if err != nil { + RespondError(w, err, nil) + return + } + licenses = convertLicenseV3ToLicenseV2(licensesV3) + } else { + _licenses, apiError := ah.LM().GetLicenses(r.Context()) + if apiError != nil { + RespondError(w, apiError, nil) + return + } + licenses = _licenses } resp := model.Licenses{ diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 1c44338a77..a8acbc46e9 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -77,6 +77,7 @@ type ServerOptions struct { Cluster string GatewayUrl string UseLogsNewSchema bool + UseLicensesV3 bool } // Server runs HTTP api service @@ -133,7 +134,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { } // initiate license manager - lm, err := licensepkg.StartManager("sqlite", localDB) + lm, err := licensepkg.StartManager("sqlite", localDB, serverOptions.UseLicensesV3) if err != nil { return nil, err } @@ -269,6 +270,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { FluxInterval: fluxInterval, Gateway: gatewayProxy, UseLogsNewSchema: serverOptions.UseLogsNewSchema, + UseLicensesV3: serverOptions.UseLicensesV3, } apiHandler, err := api.NewAPIHandler(apiOpts) diff --git a/ee/query-service/constants/constants.go b/ee/query-service/constants/constants.go index c1baa6320b..0931fd01fc 100644 --- a/ee/query-service/constants/constants.go +++ b/ee/query-service/constants/constants.go @@ -13,6 +13,7 @@ var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "") var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "") var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false") var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL") +var ZeusURL = GetOrDefaultEnv("ZEUS_URL", "ZeusURL") func GetOrDefaultEnv(key string, fallback string) string { v := os.Getenv(key) diff --git a/ee/query-service/integrations/signozio/response.go b/ee/query-service/integrations/signozio/response.go index 67ad8aac88..f0b0132d1b 100644 --- a/ee/query-service/integrations/signozio/response.go +++ b/ee/query-service/integrations/signozio/response.go @@ -13,3 +13,8 @@ type ActivationResponse struct { ActivationId string `json:"ActivationId"` PlanDetails string `json:"PlanDetails"` } + +type ValidateLicenseResponse struct { + Status status `json:"status"` + Data map[string]interface{} `json:"data"` +} diff --git a/ee/query-service/integrations/signozio/signozio.go b/ee/query-service/integrations/signozio/signozio.go index c18cfb6572..6c0b937c80 100644 --- a/ee/query-service/integrations/signozio/signozio.go +++ b/ee/query-service/integrations/signozio/signozio.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "time" "github.com/pkg/errors" "go.uber.org/zap" @@ -23,12 +24,14 @@ const ( ) type Client struct { - Prefix string + Prefix string + GatewayUrl string } func New() *Client { return &Client{ - Prefix: constants.LicenseSignozIo, + Prefix: constants.LicenseSignozIo, + GatewayUrl: constants.ZeusURL, } } @@ -116,6 +119,60 @@ func ValidateLicense(activationId string) (*ActivationResponse, *model.ApiError) } +func ValidateLicenseV3(licenseKey string) (*model.LicenseV3, *model.ApiError) { + + // Creating an HTTP client with a timeout for better control + client := &http.Client{ + Timeout: 10 * time.Second, + } + + req, err := http.NewRequest("GET", C.GatewayUrl+"/v2/licenses/me", nil) + if err != nil { + return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to create request: %w", err))) + } + + // Setting the custom header + req.Header.Set("X-Signoz-Cloud-Api-Key", licenseKey) + + response, err := client.Do(req) + if err != nil { + return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to make post request: %w", err))) + } + + body, err := io.ReadAll(response.Body) + if err != nil { + return nil, model.BadRequest(errors.Wrap(err, fmt.Sprintf("failed to read validation response from %v", C.GatewayUrl))) + } + + defer response.Body.Close() + + switch response.StatusCode { + case 200: + a := ValidateLicenseResponse{} + err = json.Unmarshal(body, &a) + if err != nil { + return nil, model.BadRequest(errors.Wrap(err, "failed to marshal license validation response")) + } + + license, err := model.NewLicenseV3(a.Data) + if err != nil { + return nil, model.BadRequest(errors.Wrap(err, "failed to generate new license v3")) + } + + return license, nil + case 400: + return nil, model.BadRequest(errors.Wrap(fmt.Errorf(string(body)), + fmt.Sprintf("bad request error received from %v", C.GatewayUrl))) + case 401: + return nil, model.Unauthorized(errors.Wrap(fmt.Errorf(string(body)), + fmt.Sprintf("unauthorized request error received from %v", C.GatewayUrl))) + default: + return nil, model.InternalError(errors.Wrap(fmt.Errorf(string(body)), + fmt.Sprintf("internal request error received from %v", C.GatewayUrl))) + } + +} + func NewPostRequestWithCtx(ctx context.Context, url string, contentType string, body io.Reader) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, POST, url, body) if err != nil { diff --git a/ee/query-service/license/db.go b/ee/query-service/license/db.go index f6ccc88426..12df69233d 100644 --- a/ee/query-service/license/db.go +++ b/ee/query-service/license/db.go @@ -3,6 +3,7 @@ package license import ( "context" "database/sql" + "encoding/json" "fmt" "time" @@ -48,6 +49,34 @@ func (r *Repo) GetLicenses(ctx context.Context) ([]model.License, error) { return licenses, nil } +func (r *Repo) GetLicensesV3(ctx context.Context) ([]*model.LicenseV3, error) { + licensesData := []model.LicenseDB{} + licenseV3Data := []*model.LicenseV3{} + + query := "SELECT id,key,data FROM licenses_v3" + + err := r.db.Select(&licensesData, query) + if err != nil { + return nil, fmt.Errorf("failed to get licenses from db: %v", err) + } + + for _, l := range licensesData { + var licenseData map[string]interface{} + err := json.Unmarshal([]byte(l.Data), &licenseData) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err) + } + + license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData) + if err != nil { + return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err) + } + licenseV3Data = append(licenseV3Data, license) + } + + return licenseV3Data, nil +} + // GetActiveLicense fetches the latest active license from DB. // If the license is not present, expect a nil license and a nil error in the output. func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel.ApiError) { @@ -79,6 +108,45 @@ func (r *Repo) GetActiveLicense(ctx context.Context) (*model.License, *basemodel return active, nil } +func (r *Repo) GetActiveLicenseV3(ctx context.Context) (*model.LicenseV3, error) { + var err error + licenses := []model.LicenseDB{} + + query := "SELECT id,key,data FROM licenses_v3" + + err = r.db.Select(&licenses, query) + if err != nil { + return nil, basemodel.InternalError(fmt.Errorf("failed to get active licenses from db: %v", err)) + } + + var active *model.LicenseV3 + for _, l := range licenses { + var licenseData map[string]interface{} + err := json.Unmarshal([]byte(l.Data), &licenseData) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal data into licenseData : %v", err) + } + + license, err := model.NewLicenseV3WithIDAndKey(l.ID, l.Key, licenseData) + if err != nil { + return nil, fmt.Errorf("failed to get licenses v3 schema : %v", err) + } + + if active == nil && + (license.ValidFrom != 0) && + (license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) { + active = license + } + if active != nil && + license.ValidFrom > active.ValidFrom && + (license.ValidUntil == -1 || license.ValidUntil > time.Now().Unix()) { + active = license + } + } + + return active, nil +} + // InsertLicense inserts a new license in db func (r *Repo) InsertLicense(ctx context.Context, l *model.License) error { @@ -204,3 +272,53 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error { } return nil } + +// InsertLicenseV3 inserts a new license v3 in db +func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) error { + + query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)` + + // licsense is the entity of zeus so putting the entire license here without defining schema + licenseData, err := json.Marshal(l.Data) + if err != nil { + return fmt.Errorf("insert license failed: license marshal error") + } + + _, err = r.db.ExecContext(ctx, + query, + l.ID, + l.Key, + string(licenseData), + ) + + if err != nil { + zap.L().Error("error in inserting license data: ", zap.Error(err)) + return fmt.Errorf("failed to insert license in db: %v", err) + } + + return nil +} + +// UpdateLicenseV3 updates a new license v3 in db +func (r *Repo) UpdateLicenseV3(ctx context.Context, l *model.LicenseV3) error { + + // the key and id for the license can't change so only update the data here! + query := `UPDATE licenses_v3 SET data=$1 WHERE id=$2;` + + license, err := json.Marshal(l.Data) + if err != nil { + return fmt.Errorf("insert license failed: license marshal error") + } + _, err = r.db.ExecContext(ctx, + query, + license, + l.ID, + ) + + if err != nil { + zap.L().Error("error in updating license data: ", zap.Error(err)) + return fmt.Errorf("failed to update license in db: %v", err) + } + + return nil +} diff --git a/ee/query-service/license/manager.go b/ee/query-service/license/manager.go index 800f4b7ff3..13b869da8c 100644 --- a/ee/query-service/license/manager.go +++ b/ee/query-service/license/manager.go @@ -7,6 +7,7 @@ import ( "time" "github.com/jmoiron/sqlx" + "github.com/pkg/errors" "sync" @@ -45,11 +46,12 @@ type Manager struct { failedAttempts uint64 // keep track of active license and features - activeLicense *model.License - activeFeatures basemodel.FeatureSet + activeLicense *model.License + activeLicenseV3 *model.LicenseV3 + activeFeatures basemodel.FeatureSet } -func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*Manager, error) { +func StartManager(dbType string, db *sqlx.DB, useLicensesV3 bool, features ...basemodel.Feature) (*Manager, error) { if LM != nil { return LM, nil } @@ -65,7 +67,7 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M repo: &repo, } - if err := m.start(features...); err != nil { + if err := m.start(useLicensesV3, features...); err != nil { return m, err } LM = m @@ -73,8 +75,14 @@ func StartManager(dbType string, db *sqlx.DB, features ...basemodel.Feature) (*M } // start loads active license in memory and initiates validator -func (lm *Manager) start(features ...basemodel.Feature) error { - err := lm.LoadActiveLicense(features...) +func (lm *Manager) start(useLicensesV3 bool, features ...basemodel.Feature) error { + + var err error + if useLicensesV3 { + err = lm.LoadActiveLicenseV3(features...) + } else { + err = lm.LoadActiveLicense(features...) + } return err } @@ -108,6 +116,31 @@ func (lm *Manager) SetActive(l *model.License, features ...basemodel.Feature) { go lm.Validator(context.Background()) } +} +func (lm *Manager) SetActiveV3(l *model.LicenseV3, features ...basemodel.Feature) { + lm.mutex.Lock() + defer lm.mutex.Unlock() + + if l == nil { + return + } + + lm.activeLicenseV3 = l + lm.activeFeatures = append(l.Features, features...) + // set default features + setDefaultFeatures(lm) + + err := lm.InitFeatures(lm.activeFeatures) + if err != nil { + zap.L().Panic("Couldn't activate features", zap.Error(err)) + } + if !lm.validatorRunning { + // we want to make sure only one validator runs, + // we already have lock() so good to go + lm.validatorRunning = true + go lm.ValidatorV3(context.Background()) + } + } func setDefaultFeatures(lm *Manager) { @@ -137,6 +170,28 @@ func (lm *Manager) LoadActiveLicense(features ...basemodel.Feature) error { return nil } +func (lm *Manager) LoadActiveLicenseV3(features ...basemodel.Feature) error { + active, err := lm.repo.GetActiveLicenseV3(context.Background()) + if err != nil { + return err + } + if active != nil { + lm.SetActiveV3(active, features...) + } else { + zap.L().Info("No active license found, defaulting to basic plan") + // if no active license is found, we default to basic(free) plan with all default features + lm.activeFeatures = model.BasicPlan + setDefaultFeatures(lm) + err := lm.InitFeatures(lm.activeFeatures) + if err != nil { + zap.L().Error("Couldn't initialize features", zap.Error(err)) + return err + } + } + + return nil +} + func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, apiError *model.ApiError) { licenses, err := lm.repo.GetLicenses(ctx) @@ -163,6 +218,23 @@ func (lm *Manager) GetLicenses(ctx context.Context) (response []model.License, a return } +func (lm *Manager) GetLicensesV3(ctx context.Context) (response []*model.LicenseV3, apiError *model.ApiError) { + + licenses, err := lm.repo.GetLicensesV3(ctx) + if err != nil { + return nil, model.InternalError(err) + } + + for _, l := range licenses { + if lm.activeLicenseV3 != nil && l.Key == lm.activeLicenseV3.Key { + l.IsCurrent = true + } + response = append(response, l) + } + + return response, nil +} + // Validator validates license after an epoch of time func (lm *Manager) Validator(ctx context.Context) { defer close(lm.terminated) @@ -187,6 +259,30 @@ func (lm *Manager) Validator(ctx context.Context) { } } +// Validator validates license after an epoch of time +func (lm *Manager) ValidatorV3(ctx context.Context) { + defer close(lm.terminated) + tick := time.NewTicker(validationFrequency) + defer tick.Stop() + + lm.ValidateV3(ctx) + + for { + select { + case <-lm.done: + return + default: + select { + case <-lm.done: + return + case <-tick.C: + lm.ValidateV3(ctx) + } + } + + } +} + // Validate validates the current active license func (lm *Manager) Validate(ctx context.Context) (reterr error) { zap.L().Info("License validation started") @@ -254,6 +350,54 @@ func (lm *Manager) Validate(ctx context.Context) (reterr error) { return nil } +// todo[vikrantgupta25]: check the comparison here between old and new license! +func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError { + + license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key) + if apiError != nil { + zap.L().Error("failed to validate license", zap.Error(apiError.Err)) + return apiError + } + + err := lm.repo.UpdateLicenseV3(ctx, license) + if err != nil { + return model.BadRequest(errors.Wrap(err, "failed to update the new license")) + } + lm.SetActiveV3(license) + + return nil +} + +func (lm *Manager) ValidateV3(ctx context.Context) (reterr error) { + zap.L().Info("License validation started") + if lm.activeLicenseV3 == nil { + return nil + } + + defer func() { + lm.mutex.Lock() + + lm.lastValidated = time.Now().Unix() + if reterr != nil { + zap.L().Error("License validation completed with error", zap.Error(reterr)) + atomic.AddUint64(&lm.failedAttempts, 1) + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_CHECK_FAILED, + map[string]interface{}{"err": reterr.Error()}, "", true, false) + } else { + zap.L().Info("License validation completed with no errors") + } + + lm.mutex.Unlock() + }() + + err := lm.RefreshLicense(ctx) + + if err != nil { + return err + } + return nil +} + // Activate activates a license key with signoz server func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *model.License, errResponse *model.ApiError) { defer func() { @@ -298,6 +442,35 @@ func (lm *Manager) Activate(ctx context.Context, key string) (licenseResponse *m return l, nil } +func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseResponse *model.LicenseV3, errResponse *model.ApiError) { + defer func() { + if errResponse != nil { + userEmail, err := auth.GetEmailFromJwt(ctx) + if err == nil { + telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_LICENSE_ACT_FAILED, + map[string]interface{}{"err": errResponse.Err.Error()}, userEmail, true, false) + } + } + }() + + license, apiError := validate.ValidateLicenseV3(licenseKey) + if apiError != nil { + zap.L().Error("failed to get the license", zap.Error(apiError.Err)) + return nil, apiError + } + + // insert the new license to the sqlite db + err := lm.repo.InsertLicenseV3(ctx, license) + if err != nil { + zap.L().Error("failed to activate license", zap.Error(err)) + return nil, model.InternalError(err) + } + + // license is valid, activate it + lm.SetActiveV3(license) + return license, nil +} + // CheckFeature will be internally used by backend routines // for feature gating func (lm *Manager) CheckFeature(featureKey string) error { diff --git a/ee/query-service/license/sqlite/init.go b/ee/query-service/license/sqlite/init.go index c80bbd5a86..cd34081cc9 100644 --- a/ee/query-service/license/sqlite/init.go +++ b/ee/query-service/license/sqlite/init.go @@ -48,5 +48,16 @@ func InitDB(db *sqlx.DB) error { return fmt.Errorf("error in creating feature_status table: %s", err.Error()) } + table_schema = `CREATE TABLE IF NOT EXISTS licenses_v3 ( + id TEXT PRIMARY KEY, + key TEXT NOT NULL UNIQUE, + data TEXT + );` + + _, err = db.Exec(table_schema) + if err != nil { + return fmt.Errorf("error in creating licenses_v3 table: %s", err.Error()) + } + return nil } diff --git a/ee/query-service/main.go b/ee/query-service/main.go index 41cc69aa49..55e70893e6 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -94,6 +94,7 @@ func main() { var cluster string var useLogsNewSchema bool + var useLicensesV3 bool var cacheConfigPath, fluxInterval string var enableQueryServiceLogOTLPExport bool var preferSpanMetrics bool @@ -104,6 +105,7 @@ func main() { var gatewayUrl string flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") + flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") @@ -143,6 +145,7 @@ func main() { Cluster: cluster, GatewayUrl: gatewayUrl, UseLogsNewSchema: useLogsNewSchema, + UseLicensesV3: useLicensesV3, } // Read the jwt secret key diff --git a/ee/query-service/model/errors.go b/ee/query-service/model/errors.go index 7e7b8410e2..efc780be95 100644 --- a/ee/query-service/model/errors.go +++ b/ee/query-service/model/errors.go @@ -46,6 +46,13 @@ func BadRequest(err error) *ApiError { } } +func Unauthorized(err error) *ApiError { + return &ApiError{ + Typ: basemodel.ErrorUnauthorized, + Err: err, + } +} + // BadRequestStr returns a ApiError object of bad request for string input func BadRequestStr(s string) *ApiError { return &ApiError{ diff --git a/ee/query-service/model/license.go b/ee/query-service/model/license.go index 7ad349c9b7..2f9a0feeda 100644 --- a/ee/query-service/model/license.go +++ b/ee/query-service/model/license.go @@ -3,6 +3,8 @@ package model import ( "encoding/base64" "encoding/json" + "fmt" + "reflect" "time" "github.com/pkg/errors" @@ -104,3 +106,144 @@ type SubscriptionServerResp struct { Status string `json:"status"` Data Licenses `json:"data"` } + +type Plan struct { + Name string `json:"name"` +} + +type LicenseDB struct { + ID string `json:"id"` + Key string `json:"key"` + Data string `json:"data"` +} +type LicenseV3 struct { + ID string + Key string + Data map[string]interface{} + PlanName string + Features basemodel.FeatureSet + Status string + IsCurrent bool + ValidFrom int64 + ValidUntil int64 +} + +func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) { + var zeroValue T + if val, ok := data[key]; ok { + if value, ok := val.(T); ok { + return value, nil + } + return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue)) + } + return zeroValue, fmt.Errorf("%s key is missing", key) +} + +func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) { + var features basemodel.FeatureSet + + // extract id from data + licenseID, err := extractKeyFromMapStringInterface[string](data, "id") + if err != nil { + return nil, err + } + delete(data, "id") + + // extract key from data + licenseKey, err := extractKeyFromMapStringInterface[string](data, "key") + if err != nil { + return nil, err + } + delete(data, "key") + + // extract status from data + status, err := extractKeyFromMapStringInterface[string](data, "status") + if err != nil { + return nil, err + } + + planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan") + if err != nil { + return nil, err + } + + planName, err := extractKeyFromMapStringInterface[string](planMap, "name") + if err != nil { + return nil, err + } + // if license status is inactive then default it to basic + if status == LicenseStatusInactive { + planName = PlanNameBasic + } + + featuresFromZeus := basemodel.FeatureSet{} + if _features, ok := data["features"]; ok { + featuresData, err := json.Marshal(_features) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal features data") + } + + if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal features data") + } + } + + switch planName { + case PlanNameTeams: + features = append(features, ProPlan...) + case PlanNameEnterprise: + features = append(features, EnterprisePlan...) + case PlanNameBasic: + features = append(features, BasicPlan...) + default: + features = append(features, BasicPlan...) + } + + if len(featuresFromZeus) > 0 { + for _, feature := range featuresFromZeus { + exists := false + for i, existingFeature := range features { + if existingFeature.Name == feature.Name { + features[i] = feature // Replace existing feature + exists = true + break + } + } + if !exists { + features = append(features, feature) // Append if it doesn't exist + } + } + } + data["features"] = features + + _validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from") + if err != nil { + _validFrom = 0 + } + validFrom := int64(_validFrom) + + _validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until") + if err != nil { + _validUntil = 0 + } + validUntil := int64(_validUntil) + + return &LicenseV3{ + ID: licenseID, + Key: licenseKey, + Data: data, + PlanName: planName, + Features: features, + ValidFrom: validFrom, + ValidUntil: validUntil, + Status: status, + }, nil + +} + +func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) { + licenseDataWithIdAndKey := data + licenseDataWithIdAndKey["id"] = id + licenseDataWithIdAndKey["key"] = key + return NewLicenseV3(licenseDataWithIdAndKey) +} diff --git a/ee/query-service/model/license_test.go b/ee/query-service/model/license_test.go new file mode 100644 index 0000000000..1c6150c8ac --- /dev/null +++ b/ee/query-service/model/license_test.go @@ -0,0 +1,170 @@ +package model + +import ( + "encoding/json" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.signoz.io/signoz/pkg/query-service/model" +) + +func TestNewLicenseV3(t *testing.T) { + testCases := []struct { + name string + data []byte + pass bool + expected *LicenseV3 + error error + }{ + { + name: "Error for missing license id", + data: []byte(`{}`), + pass: false, + error: errors.New("id key is missing"), + }, + { + name: "Error for license id not being a valid string", + data: []byte(`{"id": 10}`), + pass: false, + error: errors.New("id key is not a valid string"), + }, + { + name: "Error for missing license key", + data: []byte(`{"id":"does-not-matter"}`), + pass: false, + error: errors.New("key key is missing"), + }, + { + name: "Error for invalid string license key", + data: []byte(`{"id":"does-not-matter","key":10}`), + pass: false, + error: errors.New("key key is not a valid string"), + }, + { + name: "Error for missing license status", + data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`), + pass: false, + error: errors.New("status key is missing"), + }, + { + name: "Error for invalid string license status", + data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`), + pass: false, + error: errors.New("status key is not a valid string"), + }, + { + name: "Error for missing license plan", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`), + pass: false, + error: errors.New("plan key is missing"), + }, + { + name: "Error for invalid json license plan", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`), + pass: false, + error: errors.New("plan key is not a valid map[string]interface {}"), + }, + { + name: "Error for invalid license plan", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`), + pass: false, + error: errors.New("name key is missing"), + }, + { + name: "Parse the entire license properly", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`), + pass: true, + expected: &LicenseV3{ + ID: "does-not-matter", + Key: "does-not-matter-key", + Data: map[string]interface{}{ + "plan": map[string]interface{}{ + "name": "TEAMS", + }, + "category": "FREE", + "status": "ACTIVE", + "valid_from": float64(1730899309), + "valid_until": float64(-1), + }, + PlanName: PlanNameTeams, + ValidFrom: 1730899309, + ValidUntil: -1, + Status: "ACTIVE", + IsCurrent: false, + Features: model.FeatureSet{}, + }, + }, + { + name: "Fallback to basic plan if license status is inactive", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`), + pass: true, + expected: &LicenseV3{ + ID: "does-not-matter", + Key: "does-not-matter-key", + Data: map[string]interface{}{ + "plan": map[string]interface{}{ + "name": "TEAMS", + }, + "category": "FREE", + "status": "INACTIVE", + "valid_from": float64(1730899309), + "valid_until": float64(-1), + }, + PlanName: PlanNameBasic, + ValidFrom: 1730899309, + ValidUntil: -1, + Status: "INACTIVE", + IsCurrent: false, + Features: model.FeatureSet{}, + }, + }, + { + name: "fallback states for validFrom and validUntil", + data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`), + pass: true, + expected: &LicenseV3{ + ID: "does-not-matter", + Key: "does-not-matter-key", + Data: map[string]interface{}{ + "plan": map[string]interface{}{ + "name": "TEAMS", + }, + "valid_from": 1234.456, + "valid_until": 5678.567, + "category": "FREE", + "status": "ACTIVE", + }, + PlanName: PlanNameTeams, + ValidFrom: 1234, + ValidUntil: 5678, + Status: "ACTIVE", + IsCurrent: false, + Features: model.FeatureSet{}, + }, + }, + } + + for _, tc := range testCases { + var licensePayload map[string]interface{} + err := json.Unmarshal(tc.data, &licensePayload) + require.NoError(t, err) + license, err := NewLicenseV3(licensePayload) + if license != nil { + license.Features = make(model.FeatureSet, 0) + delete(license.Data, "features") + } + + if tc.pass { + require.NoError(t, err) + require.NotNil(t, license) + assert.Equal(t, tc.expected, license) + } else { + require.Error(t, err) + assert.EqualError(t, err, tc.error.Error()) + require.Nil(t, license) + } + + } +} diff --git a/ee/query-service/model/plans.go b/ee/query-service/model/plans.go index c5272340a3..1ac9ac28d6 100644 --- a/ee/query-service/model/plans.go +++ b/ee/query-service/model/plans.go @@ -9,6 +9,17 @@ const SSO = "SSO" const Basic = "BASIC_PLAN" const Pro = "PRO_PLAN" const Enterprise = "ENTERPRISE_PLAN" + +var ( + PlanNameEnterprise = "ENTERPRISE" + PlanNameTeams = "TEAMS" + PlanNameBasic = "BASIC" +) + +var ( + LicenseStatusInactive = "INACTIVE" +) + const DisableUpsell = "DISABLE_UPSELL" const Onboarding = "ONBOARDING" const ChatSupport = "CHAT_SUPPORT" diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 968aca0030..c488595584 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -111,6 +111,7 @@ type APIHandler struct { Upgrader *websocket.Upgrader UseLogsNewSchema bool + UseLicensesV3 bool hostsRepo *inframetrics.HostsRepo processesRepo *inframetrics.ProcessesRepo @@ -156,6 +157,9 @@ type APIHandlerOpts struct { // Use Logs New schema UseLogsNewSchema bool + + // Use Licenses V3 structure + UseLicensesV3 bool } // NewAPIHandler returns an APIHandler @@ -211,6 +215,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { querier: querier, querierV2: querierv2, UseLogsNewSchema: opts.UseLogsNewSchema, + UseLicensesV3: opts.UseLicensesV3, hostsRepo: hostsRepo, processesRepo: processesRepo, podsRepo: podsRepo, From d1503f1418fb3c8f4440c34656b5a2fc62e64f2b Mon Sep 17 00:00:00 2001 From: Ekansh Gupta Date: Tue, 12 Nov 2024 11:00:36 +0530 Subject: [PATCH 22/30] feat: fixProducerAPI (#6422) chore: bugfix --- pkg/query-service/app/http_handler.go | 28 +++++++++---------- .../messagingQueues/kafka/translator.go | 4 +-- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index c488595584..bbb42effbf 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -3222,16 +3222,16 @@ func (aH *APIHandler) getProducerThroughputOverview( } for _, res := range result { - for _, series := range res.Series { - serviceName, serviceNameOk := series.Labels["service_name"] - topicName, topicNameOk := series.Labels["topic"] - params := []string{serviceName, topicName} + for _, list := range res.List { + serviceName, serviceNameOk := list.Data["service_name"].(*string) + topicName, topicNameOk := list.Data["topic"].(*string) + params := []string{*serviceName, *topicName} hashKey := uniqueIdentifier(params, "#") _, ok := attributeCache.Hash[hashKey] if topicNameOk && serviceNameOk && !ok { attributeCache.Hash[hashKey] = struct{}{} - attributeCache.TopicName = append(attributeCache.TopicName, topicName) - attributeCache.ServiceName = append(attributeCache.ServiceName, serviceName) + attributeCache.TopicName = append(attributeCache.TopicName, *topicName) + attributeCache.ServiceName = append(attributeCache.ServiceName, *serviceName) } } } @@ -3256,25 +3256,23 @@ func (aH *APIHandler) getProducerThroughputOverview( } latencyColumn := &v3.Result{QueryName: "latency"} - var latencySeries []*v3.Series + var latencySeries []*v3.Row for _, res := range resultFetchLatency { - for _, series := range res.Series { - topic, topicOk := series.Labels["topic"] - serviceName, serviceNameOk := series.Labels["service_name"] - params := []string{topic, serviceName} + for _, list := range res.List { + topic, topicOk := list.Data["topic"].(*string) + serviceName, serviceNameOk := list.Data["service_name"].(*string) + params := []string{*serviceName, *topic} hashKey := uniqueIdentifier(params, "#") _, ok := attributeCache.Hash[hashKey] if topicOk && serviceNameOk && ok { - latencySeries = append(latencySeries, series) + latencySeries = append(latencySeries, list) } } } - latencyColumn.Series = latencySeries + latencyColumn.List = latencySeries result = append(result, latencyColumn) - resultFetchLatency = postprocess.TransformToTableForBuilderQueries(result, queryRangeParams) - resp := v3.QueryRangeResponse{ Result: resultFetchLatency, } diff --git a/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go b/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go index 2731bd5b95..d7dc96d470 100644 --- a/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go +++ b/pkg/query-service/app/integrations/messagingQueues/kafka/translator.go @@ -284,7 +284,7 @@ func BuildQRParamsWithCache(messagingQueue *MessagingQueue, queryContext string, cq = &v3.CompositeQuery{ QueryType: v3.QueryTypeBuilder, BuilderQueries: bhq, - PanelType: v3.PanelTypeTable, + PanelType: v3.PanelTypeList, } } @@ -364,7 +364,7 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) { - if queryContext == "producer-consumer-eval" { + if queryContext == "producer-consumer-eval" || queryContext == "producer-throughput-overview" { return &v3.CompositeQuery{ QueryType: v3.QueryTypeClickHouseSQL, ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq}, From 2ec641b99e89b34ab4dfdb2cc5838ae50cc39b7f Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Tue, 12 Nov 2024 11:24:22 +0530 Subject: [PATCH 23/30] fix: add severity_text legend (#6415) --- frontend/src/container/LogsExplorerViews/index.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/container/LogsExplorerViews/index.tsx b/frontend/src/container/LogsExplorerViews/index.tsx index 8dc46c5a5a..5ce5dbe2be 100644 --- a/frontend/src/container/LogsExplorerViews/index.tsx +++ b/frontend/src/container/LogsExplorerViews/index.tsx @@ -202,6 +202,7 @@ function LogsExplorerViews({ id: 'severity_text--string----true', }, ], + legend: '{{severity_text}}', }; const modifiedQuery: Query = { From d5523fc09286e91fa34534c6d35a783a568253e9 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Tue, 12 Nov 2024 13:34:45 +0530 Subject: [PATCH 24/30] fix: ignore ts for panel type table (#6419) --- pkg/query-service/app/queryBuilder/query_builder.go | 4 +++- pkg/query-service/app/queryBuilder/query_builder_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go index de8db2057a..e824c15ac3 100644 --- a/pkg/query-service/app/queryBuilder/query_builder.go +++ b/pkg/query-service/app/queryBuilder/query_builder.go @@ -116,7 +116,9 @@ func expressionToQuery( for _, tag := range qp.CompositeQuery.BuilderQueries[variable].GroupBy { groupTags = append(groupTags, tag.Key) } - groupTags = append(groupTags, "ts") + if qp.CompositeQuery.PanelType != v3.PanelTypeTable { + groupTags = append(groupTags, "ts") + } if joinUsing == "" { for _, tag := range groupTags { joinUsing += fmt.Sprintf("%s.`%s` as `%s`, ", variable, tag, tag) diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index f7538f0efb..eba5a56868 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -498,11 +498,11 @@ var testLogsWithFormula = []struct { }, }, }, - ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " + + ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " + "toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] = true AND " + "has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT now() as ts, attributes_bool_value[indexOf(attributes_bool_key, 'key1.1')] as `key1.1`, " + "toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND attributes_bool_value[indexOf(attributes_bool_key, 'key1.2')] = true AND " + - "has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`", + "has(attributes_bool_key, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`", }, { Name: "test formula with dot in filter and group by materialized attribute", @@ -707,12 +707,12 @@ var testLogsWithFormulaV2 = []struct { }, }, }, - ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " + + ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " + "toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " + "AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " + "attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " + "AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " + - "mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`", + "mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1`", }, { Name: "test formula with dot in filter and group by materialized attribute", From fd9e9f0fb33eaacdf62397ddea61f24ab58bb8a3 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 12 Nov 2024 20:53:40 +0530 Subject: [PATCH 25/30] chore: add k8s {deployment, daemonset, statefulset, job} resources (#6401) --- pkg/query-service/app/http_handler.go | 33 ++ pkg/query-service/app/infra.go | 210 ++++++++ pkg/query-service/app/inframetrics/common.go | 16 + .../app/inframetrics/daemonsets.go | 444 ++++++++++++++++ .../app/inframetrics/deployments.go | 444 ++++++++++++++++ pkg/query-service/app/inframetrics/jobs.go | 498 ++++++++++++++++++ .../app/inframetrics/statefulsets.go | 444 ++++++++++++++++ .../app/inframetrics/workload_query.go | 166 ++++++ pkg/query-service/model/infra.go | 122 +++++ 9 files changed, 2377 insertions(+) create mode 100644 pkg/query-service/app/inframetrics/daemonsets.go create mode 100644 pkg/query-service/app/inframetrics/deployments.go create mode 100644 pkg/query-service/app/inframetrics/jobs.go create mode 100644 pkg/query-service/app/inframetrics/statefulsets.go create mode 100644 pkg/query-service/app/inframetrics/workload_query.go diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index bbb42effbf..0f6d351af7 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -119,6 +119,11 @@ type APIHandler struct { nodesRepo *inframetrics.NodesRepo namespacesRepo *inframetrics.NamespacesRepo clustersRepo *inframetrics.ClustersRepo + // workloads + deploymentsRepo *inframetrics.DeploymentsRepo + daemonsetsRepo *inframetrics.DaemonSetsRepo + statefulsetsRepo *inframetrics.StatefulSetsRepo + jobsRepo *inframetrics.JobsRepo } type APIHandlerOpts struct { @@ -197,6 +202,10 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { nodesRepo := inframetrics.NewNodesRepo(opts.Reader, querierv2) namespacesRepo := inframetrics.NewNamespacesRepo(opts.Reader, querierv2) clustersRepo := inframetrics.NewClustersRepo(opts.Reader, querierv2) + deploymentsRepo := inframetrics.NewDeploymentsRepo(opts.Reader, querierv2) + daemonsetsRepo := inframetrics.NewDaemonSetsRepo(opts.Reader, querierv2) + statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2) + jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2) aH := &APIHandler{ reader: opts.Reader, @@ -222,6 +231,10 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { nodesRepo: nodesRepo, namespacesRepo: namespacesRepo, clustersRepo: clustersRepo, + deploymentsRepo: deploymentsRepo, + daemonsetsRepo: daemonsetsRepo, + statefulsetsRepo: statefulsetsRepo, + jobsRepo: jobsRepo, } logsQueryBuilder := logsv3.PrepareLogsQuery @@ -400,6 +413,26 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid clustersSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getClusterAttributeKeys)).Methods(http.MethodGet) clustersSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getClusterAttributeValues)).Methods(http.MethodGet) clustersSubRouter.HandleFunc("/list", am.ViewAccess(aH.getClusterList)).Methods(http.MethodPost) + + deploymentsSubRouter := router.PathPrefix("/api/v1/deployments").Subrouter() + deploymentsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDeploymentAttributeKeys)).Methods(http.MethodGet) + deploymentsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDeploymentAttributeValues)).Methods(http.MethodGet) + deploymentsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDeploymentList)).Methods(http.MethodPost) + + daemonsetsSubRouter := router.PathPrefix("/api/v1/daemonsets").Subrouter() + daemonsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getDaemonSetAttributeKeys)).Methods(http.MethodGet) + daemonsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getDaemonSetAttributeValues)).Methods(http.MethodGet) + daemonsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getDaemonSetList)).Methods(http.MethodPost) + + statefulsetsSubRouter := router.PathPrefix("/api/v1/statefulsets").Subrouter() + statefulsetsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getStatefulSetAttributeKeys)).Methods(http.MethodGet) + statefulsetsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getStatefulSetAttributeValues)).Methods(http.MethodGet) + statefulsetsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getStatefulSetList)).Methods(http.MethodPost) + + jobsSubRouter := router.PathPrefix("/api/v1/jobs").Subrouter() + jobsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getJobAttributeKeys)).Methods(http.MethodGet) + jobsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getJobAttributeValues)).Methods(http.MethodGet) + jobsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getJobList)).Methods(http.MethodPost) } func (aH *APIHandler) RegisterWebSocketPaths(router *mux.Router, am *AuthMiddleware) { diff --git a/pkg/query-service/app/infra.go b/pkg/query-service/app/infra.go index 73d10bdddb..b1f741e244 100644 --- a/pkg/query-service/app/infra.go +++ b/pkg/query-service/app/infra.go @@ -334,3 +334,213 @@ func (aH *APIHandler) getClusterList(w http.ResponseWriter, r *http.Request) { aH.Respond(w, clusterList) } + +func (aH *APIHandler) getDeploymentAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.deploymentsRepo.GetDeploymentAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getDeploymentAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.deploymentsRepo.GetDeploymentAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getDeploymentList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.DeploymentListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + deploymentList, err := aH.deploymentsRepo.GetDeploymentList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, deploymentList) +} + +func (aH *APIHandler) getDaemonSetAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.daemonsetsRepo.GetDaemonSetAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getDaemonSetAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.daemonsetsRepo.GetDaemonSetAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getDaemonSetList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.DaemonSetListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + daemonSetList, err := aH.daemonsetsRepo.GetDaemonSetList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, daemonSetList) +} + +func (aH *APIHandler) getStatefulSetAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.statefulsetsRepo.GetStatefulSetAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, keys) +} + +func (aH *APIHandler) getStatefulSetAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.statefulsetsRepo.GetStatefulSetAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, values) +} + +func (aH *APIHandler) getStatefulSetList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.StatefulSetListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + statefulSetList, err := aH.statefulsetsRepo.GetStatefulSetList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, statefulSetList) +} + +func (aH *APIHandler) getJobAttributeKeys(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeKeyRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + keys, err := aH.jobsRepo.GetJobAttributeKeys(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + aH.Respond(w, keys) +} + +func (aH *APIHandler) getJobAttributeValues(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req, err := parseFilterAttributeValueRequest(r) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + values, err := aH.jobsRepo.GetJobAttributeValues(ctx, *req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + aH.Respond(w, values) +} + +func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := model.JobListRequest{} + + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + jobList, err := aH.jobsRepo.GetJobList(ctx, req) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + + aH.Respond(w, jobList) +} diff --git a/pkg/query-service/app/inframetrics/common.go b/pkg/query-service/app/inframetrics/common.go index 7cde41185e..c4c280cb98 100644 --- a/pkg/query-service/app/inframetrics/common.go +++ b/pkg/query-service/app/inframetrics/common.go @@ -73,6 +73,22 @@ func getParamsForTopClusters(req model.ClusterListRequest) (int64, string, strin return getParamsForTopItems(req.Start, req.End) } +func getParamsForTopDeployments(req model.DeploymentListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopDaemonSets(req model.DaemonSetListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopStatefulSets(req model.StatefulSetListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + +func getParamsForTopJobs(req model.JobListRequest) (int64, string, string) { + return getParamsForTopItems(req.Start, req.End) +} + // TODO(srikanthccv): remove this // What is happening here? // The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint diff --git a/pkg/query-service/app/inframetrics/daemonsets.go b/pkg/query-service/app/inframetrics/daemonsets.go new file mode 100644 index 0000000000..735d52d2a0 --- /dev/null +++ b/pkg/query-service/app/inframetrics/daemonsets.go @@ -0,0 +1,444 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForDaemonSets = "k8s_pod_cpu_utilization" + k8sDaemonSetNameAttrKey = "k8s_daemonset_name" + + metricNamesForDaemonSets = map[string]string{ + "desired_nodes": "k8s_daemonset_desired_scheduled_nodes", + "available_nodes": "k8s_daemonset_current_scheduled_nodes", + } + + daemonSetAttrsToEnrich = []string{ + "k8s_daemonset_name", + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForDaemonSets = map[string][]string{ + "cpu": {"A"}, + "cpu_request": {"B", "A"}, + "cpu_limit": {"C", "A"}, + "memory": {"D"}, + "memory_request": {"E", "D"}, + "memory_limit": {"F", "D"}, + "restarts": {"G", "A"}, + "desired_nodes": {"H"}, + "available_nodes": {"I"}, + } + + builderQueriesForDaemonSets = map[string]*v3.BuilderQuery{ + // desired nodes + "H": { + QueryName: "H", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForDaemonSets["desired_nodes"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "H", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // available nodes + "I": { + QueryName: "I", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForDaemonSets["available_nodes"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "I", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + } + + daemonSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"} +) + +type DaemonSetsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewDaemonSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DaemonSetsRepo { + return &DaemonSetsRepo{reader: reader, querierV2: querierV2} +} + +func (d *DaemonSetsRepo) GetDaemonSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForDaemonSets + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (d *DaemonSetsRepo) GetDaemonSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForDaemonSets + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (d *DaemonSetsRepo) getMetadataAttributes(ctx context.Context, req model.DaemonSetListRequest) (map[string]map[string]string, error) { + daemonSetAttrs := map[string]map[string]string{} + + for _, key := range daemonSetAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForDaemonSets, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := d.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + daemonSetName := stringData[k8sDaemonSetNameAttrKey] + if _, ok := daemonSetAttrs[daemonSetName]; !ok { + daemonSetAttrs[daemonSetName] = map[string]string{} + } + + for _, key := range req.GroupBy { + daemonSetAttrs[daemonSetName][key.Key] = stringData[key.Key] + } + } + + return daemonSetAttrs, nil +} + +func (d *DaemonSetsRepo) getTopDaemonSetGroups(ctx context.Context, req model.DaemonSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopDaemonSets(req) + + queryNames := queryNamesForDaemonSets[req.OrderBy.ColumnName] + topDaemonSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topDaemonSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, topDaemonSetGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDaemonSetGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopDaemonSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)] + + topDaemonSetGroups := []map[string]string{} + for _, series := range paginatedTopDaemonSetGroupsSeries { + topDaemonSetGroups = append(topDaemonSetGroups, series.Labels) + } + allDaemonSetGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allDaemonSetGroups = append(allDaemonSetGroups, series.Labels) + } + + return topDaemonSetGroups, allDaemonSetGroups, nil +} + +func (d *DaemonSetsRepo) GetDaemonSetList(ctx context.Context, req model.DaemonSetListRequest) (model.DaemonSetListResponse, error) { + resp := model.DaemonSetListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sDaemonSetNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := WorkloadTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + // add additional queries for daemon sets + for _, daemonSetQuery := range builderQueriesForDaemonSets { + query.CompositeQuery.BuilderQueries[daemonSetQuery.QueryName] = daemonSetQuery + } + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + // make sure we only get records for daemon sets + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: k8sDaemonSetNameAttrKey}, + Operator: v3.FilterOperatorExists, + }) + } + + daemonSetAttrs, err := d.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topDaemonSetGroups, allDaemonSetGroups, err := d.getTopDaemonSetGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topDaemonSetGroup := range topDaemonSetGroups { + for k, v := range topDaemonSetGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.DaemonSetListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.DaemonSetListRecord{ + DaemonSetName: "", + CPUUsage: -1, + CPURequest: -1, + CPULimit: -1, + MemoryUsage: -1, + MemoryRequest: -1, + MemoryLimit: -1, + DesiredNodes: -1, + AvailableNodes: -1, + } + + if daemonSetName, ok := row.Data[k8sDaemonSetNameAttrKey].(string); ok { + record.DaemonSetName = daemonSetName + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + if cpuRequest, ok := row.Data["B"].(float64); ok { + record.CPURequest = cpuRequest + } + + if cpuLimit, ok := row.Data["C"].(float64); ok { + record.CPULimit = cpuLimit + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + if memoryRequest, ok := row.Data["E"].(float64); ok { + record.MemoryRequest = memoryRequest + } + + if memoryLimit, ok := row.Data["F"].(float64); ok { + record.MemoryLimit = memoryLimit + } + + if restarts, ok := row.Data["G"].(float64); ok { + record.Restarts = int(restarts) + } + + if desiredNodes, ok := row.Data["H"].(float64); ok { + record.DesiredNodes = int(desiredNodes) + } + + if availableNodes, ok := row.Data["I"].(float64); ok { + record.AvailableNodes = int(availableNodes) + } + + record.Meta = map[string]string{} + if _, ok := daemonSetAttrs[record.DaemonSetName]; ok { + record.Meta = daemonSetAttrs[record.DaemonSetName] + } + + for k, v := range row.Data { + if slices.Contains(daemonSetQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allDaemonSetGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/deployments.go b/pkg/query-service/app/inframetrics/deployments.go new file mode 100644 index 0000000000..aed8de1929 --- /dev/null +++ b/pkg/query-service/app/inframetrics/deployments.go @@ -0,0 +1,444 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForDeployments = "k8s_pod_cpu_utilization" + k8sDeploymentNameAttrKey = "k8s_deployment_name" + + metricNamesForDeployments = map[string]string{ + "desired_pods": "k8s_deployment_desired", + "available_pods": "k8s_deployment_available", + } + + deploymentAttrsToEnrich = []string{ + "k8s_deployment_name", + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForDeployments = map[string][]string{ + "cpu": {"A"}, + "cpu_request": {"B", "A"}, + "cpu_limit": {"C", "A"}, + "memory": {"D"}, + "memory_request": {"E", "D"}, + "memory_limit": {"F", "D"}, + "restarts": {"G", "A"}, + "desired_pods": {"H"}, + "available_pods": {"I"}, + } + + builderQueriesForDeployments = map[string]*v3.BuilderQuery{ + // desired pods + "H": { + QueryName: "H", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForDeployments["desired_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "H", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // available pods + "I": { + QueryName: "I", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForDeployments["available_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "I", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + } + + deploymentQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"} +) + +type DeploymentsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewDeploymentsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *DeploymentsRepo { + return &DeploymentsRepo{reader: reader, querierV2: querierV2} +} + +func (d *DeploymentsRepo) GetDeploymentAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForDeployments + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (d *DeploymentsRepo) GetDeploymentAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForDeployments + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (d *DeploymentsRepo) getMetadataAttributes(ctx context.Context, req model.DeploymentListRequest) (map[string]map[string]string, error) { + deploymentAttrs := map[string]map[string]string{} + + for _, key := range deploymentAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForDeployments, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := d.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + deploymentName := stringData[k8sDeploymentNameAttrKey] + if _, ok := deploymentAttrs[deploymentName]; !ok { + deploymentAttrs[deploymentName] = map[string]string{} + } + + for _, key := range req.GroupBy { + deploymentAttrs[deploymentName][key.Key] = stringData[key.Key] + } + } + + return deploymentAttrs, nil +} + +func (d *DeploymentsRepo) getTopDeploymentGroups(ctx context.Context, req model.DeploymentListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopDeployments(req) + + queryNames := queryNamesForDeployments[req.OrderBy.ColumnName] + topDeploymentGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topDeploymentGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, topDeploymentGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topDeploymentGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopDeploymentGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)] + + topDeploymentGroups := []map[string]string{} + for _, series := range paginatedTopDeploymentGroupsSeries { + topDeploymentGroups = append(topDeploymentGroups, series.Labels) + } + allDeploymentGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allDeploymentGroups = append(allDeploymentGroups, series.Labels) + } + + return topDeploymentGroups, allDeploymentGroups, nil +} + +func (d *DeploymentsRepo) GetDeploymentList(ctx context.Context, req model.DeploymentListRequest) (model.DeploymentListResponse, error) { + resp := model.DeploymentListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sDeploymentNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := WorkloadTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + // add additional queries for deployments + for _, deploymentQuery := range builderQueriesForDeployments { + query.CompositeQuery.BuilderQueries[deploymentQuery.QueryName] = deploymentQuery + } + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + // make sure we only get records for deployments + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: k8sDeploymentNameAttrKey}, + Operator: v3.FilterOperatorExists, + }) + } + + deploymentAttrs, err := d.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topDeploymentGroups, allDeploymentGroups, err := d.getTopDeploymentGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topDeploymentGroup := range topDeploymentGroups { + for k, v := range topDeploymentGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.DeploymentListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.DeploymentListRecord{ + DeploymentName: "", + CPUUsage: -1, + CPURequest: -1, + CPULimit: -1, + MemoryUsage: -1, + MemoryRequest: -1, + MemoryLimit: -1, + DesiredPods: -1, + AvailablePods: -1, + } + + if deploymentName, ok := row.Data[k8sDeploymentNameAttrKey].(string); ok { + record.DeploymentName = deploymentName + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + if cpuRequest, ok := row.Data["B"].(float64); ok { + record.CPURequest = cpuRequest + } + + if cpuLimit, ok := row.Data["C"].(float64); ok { + record.CPULimit = cpuLimit + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + if memoryRequest, ok := row.Data["E"].(float64); ok { + record.MemoryRequest = memoryRequest + } + + if memoryLimit, ok := row.Data["F"].(float64); ok { + record.MemoryLimit = memoryLimit + } + + if restarts, ok := row.Data["G"].(float64); ok { + record.Restarts = int(restarts) + } + + if desiredPods, ok := row.Data["H"].(float64); ok { + record.DesiredPods = int(desiredPods) + } + + if availablePods, ok := row.Data["I"].(float64); ok { + record.AvailablePods = int(availablePods) + } + + record.Meta = map[string]string{} + if _, ok := deploymentAttrs[record.DeploymentName]; ok { + record.Meta = deploymentAttrs[record.DeploymentName] + } + + for k, v := range row.Data { + if slices.Contains(deploymentQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allDeploymentGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/jobs.go b/pkg/query-service/app/inframetrics/jobs.go new file mode 100644 index 0000000000..42300f0b87 --- /dev/null +++ b/pkg/query-service/app/inframetrics/jobs.go @@ -0,0 +1,498 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForJobs = "k8s_pod_cpu_utilization" + k8sJobNameAttrKey = "k8s_job_name" + + metricNamesForJobs = map[string]string{ + "desired_successful_pods": "k8s_job_desired_successful_pods", + "active_pods": "k8s_job_active_pods", + "failed_pods": "k8s_job_failed_pods", + "successful_pods": "k8s_job_successful_pods", + } + + jobAttrsToEnrich = []string{ + "k8s_job_name", + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForJobs = map[string][]string{ + "cpu": {"A"}, + "cpu_request": {"B", "A"}, + "cpu_limit": {"C", "A"}, + "memory": {"D"}, + "memory_request": {"E", "D"}, + "memory_limit": {"F", "D"}, + "restarts": {"G", "A"}, + "desired_pods": {"H"}, + "active_pods": {"I"}, + "failed_pods": {"J"}, + "successful_pods": {"K"}, + } + + builderQueriesForJobs = map[string]*v3.BuilderQuery{ + // desired nodes + "H": { + QueryName: "H", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForJobs["desired_successful_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "H", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // available nodes + "I": { + QueryName: "I", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForJobs["active_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "I", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // failed pods + "J": { + QueryName: "J", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForJobs["failed_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "J", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // successful pods + "K": { + QueryName: "K", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForJobs["successful_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "K", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + } + + jobQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"} +) + +type JobsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewJobsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *JobsRepo { + return &JobsRepo{reader: reader, querierV2: querierV2} +} + +func (d *JobsRepo) GetJobAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForJobs + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (d *JobsRepo) GetJobAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForJobs + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (d *JobsRepo) getMetadataAttributes(ctx context.Context, req model.JobListRequest) (map[string]map[string]string, error) { + jobAttrs := map[string]map[string]string{} + + for _, key := range jobAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForJobs, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := d.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + jobName := stringData[k8sJobNameAttrKey] + if _, ok := jobAttrs[jobName]; !ok { + jobAttrs[jobName] = map[string]string{} + } + + for _, key := range req.GroupBy { + jobAttrs[jobName][key.Key] = stringData[key.Key] + } + } + + return jobAttrs, nil +} + +func (d *JobsRepo) getTopJobGroups(ctx context.Context, req model.JobListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopJobs(req) + + queryNames := queryNamesForJobs[req.OrderBy.ColumnName] + topJobGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topJobGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, topJobGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topJobGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopJobGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)] + + topJobGroups := []map[string]string{} + for _, series := range paginatedTopJobGroupsSeries { + topJobGroups = append(topJobGroups, series.Labels) + } + allJobGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allJobGroups = append(allJobGroups, series.Labels) + } + + return topJobGroups, allJobGroups, nil +} + +func (d *JobsRepo) GetJobList(ctx context.Context, req model.JobListRequest) (model.JobListResponse, error) { + resp := model.JobListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "desired_pods", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sJobNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := WorkloadTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + // add additional queries for jobs + for _, jobQuery := range builderQueriesForJobs { + query.CompositeQuery.BuilderQueries[jobQuery.QueryName] = jobQuery + } + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + // make sure we only get records for jobs + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: k8sJobNameAttrKey}, + Operator: v3.FilterOperatorExists, + }) + } + + jobAttrs, err := d.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topJobGroups, allJobGroups, err := d.getTopJobGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topJobGroup := range topJobGroups { + for k, v := range topJobGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.JobListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.JobListRecord{ + JobName: "", + CPUUsage: -1, + CPURequest: -1, + CPULimit: -1, + MemoryUsage: -1, + MemoryRequest: -1, + MemoryLimit: -1, + DesiredSuccessfulPods: -1, + ActivePods: -1, + FailedPods: -1, + SuccessfulPods: -1, + } + + if jobName, ok := row.Data[k8sJobNameAttrKey].(string); ok { + record.JobName = jobName + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + if cpuRequest, ok := row.Data["B"].(float64); ok { + record.CPURequest = cpuRequest + } + + if cpuLimit, ok := row.Data["C"].(float64); ok { + record.CPULimit = cpuLimit + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + if memoryRequest, ok := row.Data["E"].(float64); ok { + record.MemoryRequest = memoryRequest + } + + if memoryLimit, ok := row.Data["F"].(float64); ok { + record.MemoryLimit = memoryLimit + } + + if restarts, ok := row.Data["G"].(float64); ok { + record.Restarts = int(restarts) + } + + if desiredSuccessfulPods, ok := row.Data["H"].(float64); ok { + record.DesiredSuccessfulPods = int(desiredSuccessfulPods) + } + + if activePods, ok := row.Data["I"].(float64); ok { + record.ActivePods = int(activePods) + } + + if failedPods, ok := row.Data["J"].(float64); ok { + record.FailedPods = int(failedPods) + } + + if successfulPods, ok := row.Data["K"].(float64); ok { + record.SuccessfulPods = int(successfulPods) + } + + record.Meta = map[string]string{} + if _, ok := jobAttrs[record.JobName]; ok { + record.Meta = jobAttrs[record.JobName] + } + + for k, v := range row.Data { + if slices.Contains(jobQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allJobGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/statefulsets.go b/pkg/query-service/app/inframetrics/statefulsets.go new file mode 100644 index 0000000000..2d5d6d8313 --- /dev/null +++ b/pkg/query-service/app/inframetrics/statefulsets.go @@ -0,0 +1,444 @@ +package inframetrics + +import ( + "context" + "math" + "sort" + + "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/interfaces" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "golang.org/x/exp/slices" +) + +var ( + metricToUseForStatefulSets = "k8s_pod_cpu_utilization" + k8sStatefulSetNameAttrKey = "k8s_statefulset_name" + + metricNamesForStatefulSets = map[string]string{ + "desired_pods": "k8s_statefulset_desired_pods", + "available_pods": "k8s_statefulset_current_pods", + } + + statefulSetAttrsToEnrich = []string{ + "k8s_statefulset_name", + "k8s_namespace_name", + "k8s_cluster_name", + } + + queryNamesForStatefulSets = map[string][]string{ + "cpu": {"A"}, + "cpu_request": {"B", "A"}, + "cpu_limit": {"C", "A"}, + "memory": {"D"}, + "memory_request": {"E", "D"}, + "memory_limit": {"F", "D"}, + "restarts": {"G", "A"}, + "desired_pods": {"H"}, + "available_pods": {"I"}, + } + + builderQueriesForStatefulSets = map[string]*v3.BuilderQuery{ + // desired pods + "H": { + QueryName: "H", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForStatefulSets["desired_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "H", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // available pods + "I": { + QueryName: "I", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForStatefulSets["available_pods"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "I", + ReduceTo: v3.ReduceToOperatorLast, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + } + + statefulSetQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I"} +) + +type StatefulSetsRepo struct { + reader interfaces.Reader + querierV2 interfaces.Querier +} + +func NewStatefulSetsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *StatefulSetsRepo { + return &StatefulSetsRepo{reader: reader, querierV2: querierV2} +} + +func (d *StatefulSetsRepo) GetStatefulSetAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) { + // TODO(srikanthccv): remove hardcoded metric name and support keys from any pod metric + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForStatefulSets + if req.Limit == 0 { + req.Limit = 50 + } + + attributeKeysResponse, err := d.reader.GetMetricAttributeKeys(ctx, &req) + if err != nil { + return nil, err + } + + // TODO(srikanthccv): only return resource attributes when we have a way to + // distinguish between resource attributes and other attributes. + filteredKeys := []v3.AttributeKey{} + for _, key := range attributeKeysResponse.AttributeKeys { + if slices.Contains(pointAttrsToIgnore, key.Key) { + continue + } + filteredKeys = append(filteredKeys, key) + } + + return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil +} + +func (d *StatefulSetsRepo) GetStatefulSetAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) { + req.DataSource = v3.DataSourceMetrics + req.AggregateAttribute = metricToUseForStatefulSets + if req.Limit == 0 { + req.Limit = 50 + } + + attributeValuesResponse, err := d.reader.GetMetricAttributeValues(ctx, &req) + if err != nil { + return nil, err + } + + return attributeValuesResponse, nil +} + +func (d *StatefulSetsRepo) getMetadataAttributes(ctx context.Context, req model.StatefulSetListRequest) (map[string]map[string]string, error) { + statefulSetAttrs := map[string]map[string]string{} + + for _, key := range statefulSetAttrsToEnrich { + hasKey := false + for _, groupByKey := range req.GroupBy { + if groupByKey.Key == key { + hasKey = true + break + } + } + if !hasKey { + req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key}) + } + } + + mq := v3.BuilderQuery{ + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricToUseForStatefulSets, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + GroupBy: req.GroupBy, + } + + query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq) + if err != nil { + return nil, err + } + + query = localQueryToDistributedQuery(query) + + attrsListResponse, err := d.reader.GetListResultV3(ctx, query) + if err != nil { + return nil, err + } + + for _, row := range attrsListResponse { + stringData := map[string]string{} + for key, value := range row.Data { + if str, ok := value.(string); ok { + stringData[key] = str + } else if strPtr, ok := value.(*string); ok { + stringData[key] = *strPtr + } + } + + statefulSetName := stringData[k8sStatefulSetNameAttrKey] + if _, ok := statefulSetAttrs[statefulSetName]; !ok { + statefulSetAttrs[statefulSetName] = map[string]string{} + } + + for _, key := range req.GroupBy { + statefulSetAttrs[statefulSetName][key.Key] = stringData[key.Key] + } + } + + return statefulSetAttrs, nil +} + +func (d *StatefulSetsRepo) getTopStatefulSetGroups(ctx context.Context, req model.StatefulSetListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) { + step, timeSeriesTableName, samplesTableName := getParamsForTopStatefulSets(req) + + queryNames := queryNamesForStatefulSets[req.OrderBy.ColumnName] + topStatefulSetGroupsQueryRangeParams := &v3.QueryRangeParamsV3{ + Start: req.Start, + End: req.End, + Step: step, + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{}, + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + }, + } + + for _, queryName := range queryNames { + query := q.CompositeQuery.BuilderQueries[queryName].Clone() + query.StepInterval = step + query.MetricTableHints = &v3.MetricTableHints{ + TimeSeriesTableName: timeSeriesTableName, + SamplesTableName: samplesTableName, + } + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + topStatefulSetGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, topStatefulSetGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + formattedResponse, err := postprocess.PostProcessResult(queryResponse, topStatefulSetGroupsQueryRangeParams) + if err != nil { + return nil, nil, err + } + + if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 { + return nil, nil, nil + } + + if req.OrderBy.Order == v3.DirectionDesc { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value + }) + } else { + sort.Slice(formattedResponse[0].Series, func(i, j int) bool { + return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value + }) + } + + limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series))) + + paginatedTopStatefulSetGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)] + + topStatefulSetGroups := []map[string]string{} + for _, series := range paginatedTopStatefulSetGroupsSeries { + topStatefulSetGroups = append(topStatefulSetGroups, series.Labels) + } + allStatefulSetGroups := []map[string]string{} + for _, series := range formattedResponse[0].Series { + allStatefulSetGroups = append(allStatefulSetGroups, series.Labels) + } + + return topStatefulSetGroups, allStatefulSetGroups, nil +} + +func (d *StatefulSetsRepo) GetStatefulSetList(ctx context.Context, req model.StatefulSetListRequest) (model.StatefulSetListResponse, error) { + resp := model.StatefulSetListResponse{} + + if req.Limit == 0 { + req.Limit = 10 + } + + if req.OrderBy == nil { + req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc} + } + + if req.GroupBy == nil { + req.GroupBy = []v3.AttributeKey{{Key: k8sStatefulSetNameAttrKey}} + resp.Type = model.ResponseTypeList + } else { + resp.Type = model.ResponseTypeGroupedList + } + + step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60)) + + query := WorkloadTableListQuery.Clone() + + query.Start = req.Start + query.End = req.End + query.Step = step + + // add additional queries for stateful sets + for _, statefulSetQuery := range builderQueriesForStatefulSets { + query.CompositeQuery.BuilderQueries[statefulSetQuery.QueryName] = statefulSetQuery + } + + for _, query := range query.CompositeQuery.BuilderQueries { + query.StepInterval = step + if req.Filters != nil && len(req.Filters.Items) > 0 { + if query.Filters == nil { + query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}} + } + query.Filters.Items = append(query.Filters.Items, req.Filters.Items...) + } + query.GroupBy = req.GroupBy + // make sure we only get records for daemon sets + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: k8sStatefulSetNameAttrKey}, + Operator: v3.FilterOperatorExists, + }) + } + + statefulSetAttrs, err := d.getMetadataAttributes(ctx, req) + if err != nil { + return resp, err + } + + topStatefulSetGroups, allStatefulSetGroups, err := d.getTopStatefulSetGroups(ctx, req, query) + if err != nil { + return resp, err + } + + groupFilters := map[string][]string{} + for _, topStatefulSetGroup := range topStatefulSetGroups { + for k, v := range topStatefulSetGroup { + groupFilters[k] = append(groupFilters[k], v) + } + } + + for groupKey, groupValues := range groupFilters { + hasGroupFilter := false + if req.Filters != nil && len(req.Filters.Items) > 0 { + for _, filter := range req.Filters.Items { + if filter.Key.Key == groupKey { + hasGroupFilter = true + break + } + } + } + + if !hasGroupFilter { + for _, query := range query.CompositeQuery.BuilderQueries { + query.Filters.Items = append(query.Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{Key: groupKey}, + Value: groupValues, + Operator: v3.FilterOperatorIn, + }) + } + } + } + + queryResponse, _, err := d.querierV2.QueryRange(ctx, query) + if err != nil { + return resp, err + } + + formattedResponse, err := postprocess.PostProcessResult(queryResponse, query) + if err != nil { + return resp, err + } + + records := []model.StatefulSetListRecord{} + + for _, result := range formattedResponse { + for _, row := range result.Table.Rows { + + record := model.StatefulSetListRecord{ + StatefulSetName: "", + CPUUsage: -1, + CPURequest: -1, + CPULimit: -1, + MemoryUsage: -1, + MemoryRequest: -1, + MemoryLimit: -1, + DesiredPods: -1, + AvailablePods: -1, + } + + if statefulSetName, ok := row.Data[k8sStatefulSetNameAttrKey].(string); ok { + record.StatefulSetName = statefulSetName + } + + if cpu, ok := row.Data["A"].(float64); ok { + record.CPUUsage = cpu + } + if cpuRequest, ok := row.Data["B"].(float64); ok { + record.CPURequest = cpuRequest + } + + if cpuLimit, ok := row.Data["C"].(float64); ok { + record.CPULimit = cpuLimit + } + + if memory, ok := row.Data["D"].(float64); ok { + record.MemoryUsage = memory + } + + if memoryRequest, ok := row.Data["E"].(float64); ok { + record.MemoryRequest = memoryRequest + } + + if memoryLimit, ok := row.Data["F"].(float64); ok { + record.MemoryLimit = memoryLimit + } + + if restarts, ok := row.Data["G"].(float64); ok { + record.Restarts = int(restarts) + } + + if desiredPods, ok := row.Data["H"].(float64); ok { + record.DesiredPods = int(desiredPods) + } + + if availablePods, ok := row.Data["I"].(float64); ok { + record.AvailablePods = int(availablePods) + } + + record.Meta = map[string]string{} + if _, ok := statefulSetAttrs[record.StatefulSetName]; ok { + record.Meta = statefulSetAttrs[record.StatefulSetName] + } + + for k, v := range row.Data { + if slices.Contains(statefulSetQueryNames, k) { + continue + } + if labelValue, ok := v.(string); ok { + record.Meta[k] = labelValue + } + } + + records = append(records, record) + } + } + resp.Total = len(allStatefulSetGroups) + resp.Records = records + + return resp, nil +} diff --git a/pkg/query-service/app/inframetrics/workload_query.go b/pkg/query-service/app/inframetrics/workload_query.go new file mode 100644 index 0000000000..6050dba50d --- /dev/null +++ b/pkg/query-service/app/inframetrics/workload_query.go @@ -0,0 +1,166 @@ +package inframetrics + +import v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + +var ( + metricNamesForWorkloads = map[string]string{ + "cpu": "k8s_pod_cpu_utilization", + "cpu_req": "k8s_pod_cpu_request_utilization", + "cpu_limit": "k8s_pod_cpu_limit_utilization", + "memory": "k8s_pod_memory_usage", + "memory_req": "k8s_pod_memory_request_utilization", + "memory_limit": "k8s_pod_memory_limit_utilization", + "restarts": "k8s_container_restarts", + } +) + +var WorkloadTableListQuery = v3.QueryRangeParamsV3{ + CompositeQuery: &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + // pod cpu utilization + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["cpu"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "A", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod cpu request utilization + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["cpu_request"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "B", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod cpu limit utilization + "C": { + QueryName: "C", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["cpu_limit"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "C", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory utilization + "D": { + QueryName: "D", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["memory"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "D", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory request utilization + "E": { + QueryName: "E", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["memory_request"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "E", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + // pod memory limit utilization + "F": { + QueryName: "F", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["memory_limit"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "F", + ReduceTo: v3.ReduceToOperatorAvg, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Disabled: false, + }, + "G": { + QueryName: "G", + DataSource: v3.DataSourceMetrics, + AggregateAttribute: v3.AttributeKey{ + Key: metricNamesForWorkloads["restarts"], + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Temporality: v3.Unspecified, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + GroupBy: []v3.AttributeKey{}, + Expression: "G", + ReduceTo: v3.ReduceToOperatorSum, + TimeAggregation: v3.TimeAggregationAnyLast, + SpaceAggregation: v3.SpaceAggregationMax, + Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}}, + Disabled: false, + }, + }, + PanelType: v3.PanelTypeTable, + QueryType: v3.QueryTypeBuilder, + }, + Version: "v4", + FormatForWeb: true, +} diff --git a/pkg/query-service/model/infra.go b/pkg/query-service/model/infra.go index 00cb48ee77..0c23a0642b 100644 --- a/pkg/query-service/model/infra.go +++ b/pkg/query-service/model/infra.go @@ -173,3 +173,125 @@ type ClusterListRecord struct { MemoryAllocatable float64 `json:"memoryAllocatable"` Meta map[string]string `json:"meta"` } + +type DeploymentListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type DeploymentListResponse struct { + Type ResponseType `json:"type"` + Records []DeploymentListRecord `json:"records"` + Total int `json:"total"` +} + +type DeploymentListRecord struct { + DeploymentName string `json:"deploymentName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + DesiredPods int `json:"desiredPods"` + AvailablePods int `json:"availablePods"` + CPURequest float64 `json:"cpuRequest"` + MemoryRequest float64 `json:"memoryRequest"` + CPULimit float64 `json:"cpuLimit"` + MemoryLimit float64 `json:"memoryLimit"` + Restarts int `json:"restarts"` + Meta map[string]string `json:"meta"` +} + +type DaemonSetListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type DaemonSetListResponse struct { + Type ResponseType `json:"type"` + Records []DaemonSetListRecord `json:"records"` + Total int `json:"total"` +} + +type DaemonSetListRecord struct { + DaemonSetName string `json:"daemonSetName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + CPURequest float64 `json:"cpuRequest"` + MemoryRequest float64 `json:"memoryRequest"` + CPULimit float64 `json:"cpuLimit"` + MemoryLimit float64 `json:"memoryLimit"` + Restarts int `json:"restarts"` + DesiredNodes int `json:"desiredNodes"` + AvailableNodes int `json:"availableNodes"` + Meta map[string]string `json:"meta"` +} + +type StatefulSetListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type StatefulSetListResponse struct { + Type ResponseType `json:"type"` + Records []StatefulSetListRecord `json:"records"` + Total int `json:"total"` +} + +type StatefulSetListRecord struct { + StatefulSetName string `json:"statefulSetName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + CPURequest float64 `json:"cpuRequest"` + MemoryRequest float64 `json:"memoryRequest"` + CPULimit float64 `json:"cpuLimit"` + MemoryLimit float64 `json:"memoryLimit"` + Restarts int `json:"restarts"` + DesiredPods int `json:"desiredPods"` + AvailablePods int `json:"availablePods"` + Meta map[string]string `json:"meta"` +} + +type JobListRequest struct { + Start int64 `json:"start"` // epoch time in ms + End int64 `json:"end"` // epoch time in ms + Filters *v3.FilterSet `json:"filters"` + GroupBy []v3.AttributeKey `json:"groupBy"` + OrderBy *v3.OrderBy `json:"orderBy"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +type JobListResponse struct { + Type ResponseType `json:"type"` + Records []JobListRecord `json:"records"` + Total int `json:"total"` +} + +type JobListRecord struct { + JobName string `json:"jobName"` + CPUUsage float64 `json:"cpuUsage"` + MemoryUsage float64 `json:"memoryUsage"` + CPURequest float64 `json:"cpuRequest"` + MemoryRequest float64 `json:"memoryRequest"` + CPULimit float64 `json:"cpuLimit"` + MemoryLimit float64 `json:"memoryLimit"` + Restarts int `json:"restarts"` + DesiredSuccessfulPods int `json:"desiredSuccessfulPods"` + ActivePods int `json:"activePods"` + FailedPods int `json:"failedPods"` + SuccessfulPods int `json:"successfulPods"` + Meta map[string]string `json:"meta"` +} From 85ac21f2533a2fe566044307b720554db5620013 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 12 Nov 2024 22:52:42 +0530 Subject: [PATCH 26/30] fix: update request payload for span metrics queries (#6323) --- .../MetricsPageQueries/DBCallQueries.ts | 5 +++ .../MetricsPageQueries/ExternalQueries.ts | 5 +++ .../MetricsPageQueriesFactory.ts | 4 +++ .../MetricsPageQueries/OverviewQueries.ts | 18 +++++++++++ .../MetricsApplication/Tabs/types.ts | 2 ++ .../container/MetricsApplication/constant.ts | 32 ++++++++++++++++--- 6 files changed, 61 insertions(+), 5 deletions(-) diff --git a/frontend/src/container/MetricsApplication/MetricsPageQueries/DBCallQueries.ts b/frontend/src/container/MetricsApplication/MetricsPageQueries/DBCallQueries.ts index 91343b7b06..f3124f0ad1 100644 --- a/frontend/src/container/MetricsApplication/MetricsPageQueries/DBCallQueries.ts +++ b/frontend/src/container/MetricsApplication/MetricsPageQueries/DBCallQueries.ts @@ -58,12 +58,17 @@ export const databaseCallsRPS = ({ const legends = [legend]; const dataSource = DataSource.METRICS; + const timeAggregateOperators = [MetricAggregateOperator.RATE]; + const spaceAggregateOperators = [MetricAggregateOperator.SUM]; + return getQueryBuilderQueries({ autocompleteData, groupBy, legends, filterItems, dataSource, + timeAggregateOperators, + spaceAggregateOperators, }); }; diff --git a/frontend/src/container/MetricsApplication/MetricsPageQueries/ExternalQueries.ts b/frontend/src/container/MetricsApplication/MetricsPageQueries/ExternalQueries.ts index a2c87f0874..6a7ab65906 100644 --- a/frontend/src/container/MetricsApplication/MetricsPageQueries/ExternalQueries.ts +++ b/frontend/src/container/MetricsApplication/MetricsPageQueries/ExternalQueries.ts @@ -213,12 +213,17 @@ export const externalCallRpsByAddress = ({ const legends = [legend]; const dataSource = DataSource.METRICS; + const timeAggregateOperators = [MetricAggregateOperator.RATE]; + const spaceAggregateOperators = [MetricAggregateOperator.SUM]; + return getQueryBuilderQueries({ autocompleteData, groupBy, legends, filterItems, dataSource, + timeAggregateOperators, + spaceAggregateOperators, }); }; diff --git a/frontend/src/container/MetricsApplication/MetricsPageQueries/MetricsPageQueriesFactory.ts b/frontend/src/container/MetricsApplication/MetricsPageQueries/MetricsPageQueriesFactory.ts index 71a16fcc07..e8b0fcc807 100644 --- a/frontend/src/container/MetricsApplication/MetricsPageQueries/MetricsPageQueriesFactory.ts +++ b/frontend/src/container/MetricsApplication/MetricsPageQueries/MetricsPageQueriesFactory.ts @@ -25,6 +25,8 @@ export const getQueryBuilderQueries = ({ aggregateOperator, dataSource, queryNameAndExpression, + timeAggregateOperators, + spaceAggregateOperators, }: BuilderQueriesProps): QueryBuilderData => ({ queryFormulas: [], queryData: autocompleteData.map((item, index) => { @@ -50,6 +52,8 @@ export const getQueryBuilderQueries = ({ op: 'AND', }, reduceTo: 'avg', + spaceAggregation: spaceAggregateOperators[index], + timeAggregation: timeAggregateOperators[index], dataSource, }; diff --git a/frontend/src/container/MetricsApplication/MetricsPageQueries/OverviewQueries.ts b/frontend/src/container/MetricsApplication/MetricsPageQueries/OverviewQueries.ts index d27bfc01be..0d2c05a349 100644 --- a/frontend/src/container/MetricsApplication/MetricsPageQueries/OverviewQueries.ts +++ b/frontend/src/container/MetricsApplication/MetricsPageQueries/OverviewQueries.ts @@ -83,6 +83,17 @@ export const latency = ({ const dataSource = isSpanMetricEnable ? DataSource.METRICS : DataSource.TRACES; const queryNameAndExpression = QUERYNAME_AND_EXPRESSION; + const timeAggregateOperators = [ + MetricAggregateOperator.EMPTY, + MetricAggregateOperator.EMPTY, + MetricAggregateOperator.EMPTY, + ]; + const spaceAggregateOperators = [ + MetricAggregateOperator.P50, + MetricAggregateOperator.P90, + MetricAggregateOperator.P99, + ]; + return getQueryBuilderQueries({ autocompleteData, legends, @@ -90,6 +101,8 @@ export const latency = ({ aggregateOperator, dataSource, queryNameAndExpression, + timeAggregateOperators, + spaceAggregateOperators, }); }; @@ -510,11 +523,16 @@ export const operationPerSec = ({ const legends = OPERATION_LEGENDS; const dataSource = DataSource.METRICS; + const timeAggregateOperators = [MetricAggregateOperator.RATE]; + const spaceAggregateOperators = [MetricAggregateOperator.SUM]; + return getQueryBuilderQueries({ autocompleteData, legends, filterItems, dataSource, + timeAggregateOperators, + spaceAggregateOperators, }); }; diff --git a/frontend/src/container/MetricsApplication/Tabs/types.ts b/frontend/src/container/MetricsApplication/Tabs/types.ts index 9b45bd5492..4dcb3bc01e 100644 --- a/frontend/src/container/MetricsApplication/Tabs/types.ts +++ b/frontend/src/container/MetricsApplication/Tabs/types.ts @@ -29,6 +29,8 @@ export interface BuilderQueriesProps { aggregateOperator?: string[]; dataSource: DataSource; queryNameAndExpression?: string[]; + timeAggregateOperators: MetricAggregateOperator[]; + spaceAggregateOperators: MetricAggregateOperator[]; } export interface BuilderQuerieswithFormulaProps { diff --git a/frontend/src/container/MetricsApplication/constant.ts b/frontend/src/container/MetricsApplication/constant.ts index decd31534b..75853cc8ea 100644 --- a/frontend/src/container/MetricsApplication/constant.ts +++ b/frontend/src/container/MetricsApplication/constant.ts @@ -2,18 +2,27 @@ import { DownloadOptions } from 'container/Download/Download.types'; import { MenuItemKeys } from 'container/GridCardLayout/WidgetHeader/contants'; +import { + MetricAggregateOperator, + TracesAggregatorOperator, +} from 'types/common/queryBuilder'; export const legend = { address: '{{address}}', }; export const QUERYNAME_AND_EXPRESSION = ['A', 'B', 'C']; -export const LATENCY_AGGREGATEOPERATOR = ['p50', 'p90', 'p99']; +export const LATENCY_AGGREGATEOPERATOR = [ + TracesAggregatorOperator.P50, + TracesAggregatorOperator.P90, + TracesAggregatorOperator.P99, +]; export const LATENCY_AGGREGATEOPERATOR_SPAN_METRICS = [ - 'hist_quantile_50', - 'hist_quantile_90', - 'hist_quantile_99', + MetricAggregateOperator.P50, + MetricAggregateOperator.P90, + MetricAggregateOperator.P99, ]; + export const OPERATION_LEGENDS = ['Operations']; export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts]; @@ -21,8 +30,21 @@ export const MENU_ITEMS = [MenuItemKeys.View, MenuItemKeys.CreateAlerts]; export enum FORMULA { ERROR_PERCENTAGE = 'A*100/B', DATABASE_CALLS_AVG_DURATION = 'A/B', + // The apdex formula is (satisfied_count + 0.5 * tolerating_count + 0 * frustating_count) / total_count + // The satisfied_count is B, tolerating_count is C, total_count is A + // But why do we have (B+C)/2 instead of B + C/2? + // The way we issue the query is latency <= threshold, which means we over count i.e + // query B => durationNano <= 500ms + // query C => durationNano <= 2000ms + // Since <= 2000ms includes <= 500ms, we over count, to correct we subtract B/2 + // so the full expression would be (B + C/2) - B/2 = (B+C)/2 APDEX_TRACES = '((B + C)/2)/A', - APDEX_DELTA_SPAN_METRICS = '((B + C)/2)/A', + // Does the same not apply for delta span metrics? + // No, because the delta metrics store the counts just for the current bucket + // so we don't need to subtract anything + APDEX_DELTA_SPAN_METRICS = '(B + C)/A', + // Cumulative span metrics store the counts for all buckets + // so we need to subtract B/2 to correct the over counting APDEX_CUMULATIVE_SPAN_METRICS = '((B + C)/2)/A', } From 01fda5195936ad3387c6b348a91b3dec006699df Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Wed, 13 Nov 2024 00:25:00 +0530 Subject: [PATCH 27/30] chore: return proper http codes on unique constraint error (#6428) --- ee/query-service/license/db.go | 13 ++++++++++--- ee/query-service/license/manager.go | 2 +- pkg/query-service/app/http_handler.go | 2 ++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/ee/query-service/license/db.go b/ee/query-service/license/db.go index 12df69233d..eae48e266d 100644 --- a/ee/query-service/license/db.go +++ b/ee/query-service/license/db.go @@ -8,6 +8,7 @@ import ( "time" "github.com/jmoiron/sqlx" + "github.com/mattn/go-sqlite3" "go.signoz.io/signoz/ee/query-service/license/sqlite" "go.signoz.io/signoz/ee/query-service/model" @@ -274,14 +275,14 @@ func (r *Repo) InitFeatures(req basemodel.FeatureSet) error { } // InsertLicenseV3 inserts a new license v3 in db -func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) error { +func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) *model.ApiError { query := `INSERT INTO licenses_v3 (id, key, data) VALUES ($1, $2, $3)` // licsense is the entity of zeus so putting the entire license here without defining schema licenseData, err := json.Marshal(l.Data) if err != nil { - return fmt.Errorf("insert license failed: license marshal error") + return &model.ApiError{Typ: basemodel.ErrorBadData, Err: err} } _, err = r.db.ExecContext(ctx, @@ -292,8 +293,14 @@ func (r *Repo) InsertLicenseV3(ctx context.Context, l *model.LicenseV3) error { ) if err != nil { + if sqliteErr, ok := err.(sqlite3.Error); ok { + if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique { + zap.L().Error("error in inserting license data: ", zap.Error(sqliteErr)) + return &model.ApiError{Typ: model.ErrorConflict, Err: sqliteErr} + } + } zap.L().Error("error in inserting license data: ", zap.Error(err)) - return fmt.Errorf("failed to insert license in db: %v", err) + return &model.ApiError{Typ: basemodel.ErrorExec, Err: err} } return nil diff --git a/ee/query-service/license/manager.go b/ee/query-service/license/manager.go index 13b869da8c..6dcc704e3a 100644 --- a/ee/query-service/license/manager.go +++ b/ee/query-service/license/manager.go @@ -463,7 +463,7 @@ func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseRe err := lm.repo.InsertLicenseV3(ctx, license) if err != nil { zap.L().Error("failed to activate license", zap.Error(err)) - return nil, model.InternalError(err) + return nil, err } // license is valid, activate it diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 0f6d351af7..6586e21d98 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -332,6 +332,8 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa code = http.StatusUnauthorized case model.ErrorForbidden: code = http.StatusForbidden + case model.ErrorConflict: + code = http.StatusConflict default: code = http.StatusInternalServerError } From 323da3494bfd6abb0606674ff78c5c7289dd06de Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Wed, 13 Nov 2024 11:47:56 +0530 Subject: [PATCH 28/30] chore: add experimental rate/increase calc (#6432) --- .../app/metrics/v4/cumulative/timeseries.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/query-service/app/metrics/v4/cumulative/timeseries.go b/pkg/query-service/app/metrics/v4/cumulative/timeseries.go index cf5020b51d..4089cb64a0 100644 --- a/pkg/query-service/app/metrics/v4/cumulative/timeseries.go +++ b/pkg/query-service/app/metrics/v4/cumulative/timeseries.go @@ -2,6 +2,7 @@ package cumulative import ( "fmt" + "os" "go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers" "go.signoz.io/signoz/pkg/query-service/constants" @@ -40,6 +41,9 @@ import ( const ( rateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window)))` increaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window)))` + + experimentalRateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))` + experimentalIncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window))` ) // prepareTimeAggregationSubQueryTimeSeries prepares the sub-query to be used for temporal aggregation @@ -151,14 +155,22 @@ func prepareTimeAggregationSubQuery(start, end, step int64, mq *v3.BuilderQuery) subQuery = fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) case v3.TimeAggregationRate: innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) + rateExp := rateWithoutNegative + if _, ok := os.LookupEnv("EXPERIMENTAL_RATE_WITHOUT_NEGATIVE"); ok { + rateExp = fmt.Sprintf(experimentalRateWithoutNegative, start) + } rateQueryTmpl := - "SELECT %s ts, " + rateWithoutNegative + + "SELECT %s ts, " + rateExp + " as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)" subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery) case v3.TimeAggregationIncrease: innerSubQuery := fmt.Sprintf(queryTmpl, selectLabelsAny, step, op, timeSeriesSubQuery) + increaseExp := increaseWithoutNegative + if _, ok := os.LookupEnv("EXPERIMENTAL_INCREASE_WITHOUT_NEGATIVE"); ok { + increaseExp = experimentalIncreaseWithoutNegative + } rateQueryTmpl := - "SELECT %s ts, " + increaseWithoutNegative + + "SELECT %s ts, " + increaseExp + " as per_series_value FROM (%s) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)" subQuery = fmt.Sprintf(rateQueryTmpl, selectLabels, innerSubQuery) } From 969ac5028e71ea2445d8beef11d39278acb48df5 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Wed, 13 Nov 2024 16:11:28 +0530 Subject: [PATCH 29/30] chore: add v2 metric writer to pipelines (#6345) --- .../otel-collector-config.yaml | 34 ++++-------------- .../otel-collector-config.yaml | 36 ++++--------------- 2 files changed, 14 insertions(+), 56 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml index f10d0bb848..8c0b30df61 100644 --- a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml @@ -66,28 +66,6 @@ processors: # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure. timeout: 2s - signozspanmetrics/cumulative: - metrics_exporter: clickhousemetricswrite - latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 100000 - dimensions: - - name: service.namespace - default: default - - name: deployment.environment - default: default - # This is added to ensure the uniqueness of the timeseries - # Otherwise, identical timeseries produced by multiple replicas of - # collectors result in incorrect APM metrics - - name: signoz.collector.id - - name: service.version - - name: browser.platform - - name: browser.mobile - - name: k8s.cluster.name - - name: k8s.node.name - - name: k8s.namespace.name - - name: host.name - - name: host.type - - name: container.name # memory_limiter: # # 80% of maximum memory up to 2G # limit_mib: 1500 @@ -138,6 +116,8 @@ exporters: enabled: true clickhousemetricswrite/prometheus: endpoint: tcp://clickhouse:9000/signoz_metrics + clickhousemetricswritev2: + dsn: tcp://clickhouse:9000/signoz_metrics # logging: {} clickhouselogsexporter: dsn: tcp://clickhouse:9000/signoz_logs @@ -161,20 +141,20 @@ service: pipelines: traces: receivers: [jaeger, otlp] - processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch] + processors: [signozspanmetrics/delta, batch] exporters: [clickhousetraces] metrics: receivers: [otlp] processors: [batch] - exporters: [clickhousemetricswrite] - metrics/generic: + exporters: [clickhousemetricswrite, clickhousemetricswritev2] + metrics/hostmetrics: receivers: [hostmetrics] processors: [resourcedetection, batch] - exporters: [clickhousemetricswrite] + exporters: [clickhousemetricswrite, clickhousemetricswritev2] metrics/prometheus: receivers: [prometheus] processors: [batch] - exporters: [clickhousemetricswrite/prometheus] + exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] logs: receivers: [otlp, tcplog/docker] processors: [batch] diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index 8fef0af791..cba7756d8e 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -57,35 +57,11 @@ receivers: labels: job_name: otel-collector - processors: batch: send_batch_size: 10000 send_batch_max_size: 11000 timeout: 10s - signozspanmetrics/cumulative: - metrics_exporter: clickhousemetricswrite - metrics_flush_interval: 60s - latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] - dimensions_cache_size: 100000 - dimensions: - - name: service.namespace - default: default - - name: deployment.environment - default: default - # This is added to ensure the uniqueness of the timeseries - # Otherwise, identical timeseries produced by multiple replicas of - # collectors result in incorrect APM metrics - - name: signoz.collector.id - - name: service.version - - name: browser.platform - - name: browser.mobile - - name: k8s.cluster.name - - name: k8s.node.name - - name: k8s.namespace.name - - name: host.name - - name: host.type - - name: container.name # memory_limiter: # # 80% of maximum memory up to 2G # limit_mib: 1500 @@ -149,6 +125,8 @@ exporters: enabled: true clickhousemetricswrite/prometheus: endpoint: tcp://clickhouse:9000/signoz_metrics + clickhousemetricswritev2: + dsn: tcp://clickhouse:9000/signoz_metrics clickhouselogsexporter: dsn: tcp://clickhouse:9000/signoz_logs timeout: 10s @@ -168,20 +146,20 @@ service: pipelines: traces: receivers: [jaeger, otlp] - processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch] + processors: [signozspanmetrics/delta, batch] exporters: [clickhousetraces] metrics: receivers: [otlp] processors: [batch] - exporters: [clickhousemetricswrite] - metrics/generic: + exporters: [clickhousemetricswrite, clickhousemetricswritev2] + metrics/hostmetrics: receivers: [hostmetrics] processors: [resourcedetection, batch] - exporters: [clickhousemetricswrite] + exporters: [clickhousemetricswrite, clickhousemetricswritev2] metrics/prometheus: receivers: [prometheus] processors: [batch] - exporters: [clickhousemetricswrite/prometheus] + exporters: [clickhousemetricswrite/prometheus, clickhousemetricswritev2] logs: receivers: [otlp, tcplog/docker] processors: [batch] From 2faa0c6d4f38ca35b800eed6061be720bfc74009 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Wed, 13 Nov 2024 20:30:01 +0530 Subject: [PATCH 30/30] feat: trace V4 QB (#6407) * feat: trace V4 QB * fix: update get column name and remove id * fix: handle contains and update tests * fix: remove unwanted step interval calculation * fix: add test cases * fix: add tests for static columns in QB * fix: add more order by tests * fix: update order by logic --- .../app/logs/v4/query_builder.go | 2 - .../app/traces/v3/query_builder.go | 48 +- .../app/traces/v4/query_builder.go | 414 ++++++++++ .../app/traces/v4/query_builder_test.go | 708 ++++++++++++++++++ pkg/query-service/constants/constants.go | 146 ++++ 5 files changed, 1292 insertions(+), 26 deletions(-) create mode 100644 pkg/query-service/app/traces/v4/query_builder.go create mode 100644 pkg/query-service/app/traces/v4/query_builder_test.go diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index 42cb19befc..a808a3b57d 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -436,8 +436,6 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build } else if panelType == v3.PanelTypeTable { queryTmplPrefix = "SELECT" - // step or aggregate interval is whole time period in case of table panel - step = (utils.GetEpochNanoSecs(end) - utils.GetEpochNanoSecs(start)) / NANOSECOND } else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue { // Select the aggregate value for interval queryTmplPrefix = diff --git a/pkg/query-service/app/traces/v3/query_builder.go b/pkg/query-service/app/traces/v3/query_builder.go index ad5b69229b..f31088e509 100644 --- a/pkg/query-service/app/traces/v3/query_builder.go +++ b/pkg/query-service/app/traces/v3/query_builder.go @@ -10,7 +10,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/utils" ) -var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ +var AggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ v3.AggregateOperatorP05: 0.05, v3.AggregateOperatorP10: 0.10, v3.AggregateOperatorP20: 0.20, @@ -22,7 +22,7 @@ var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ v3.AggregateOperatorP99: 0.99, } -var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{ +var AggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{ v3.AggregateOperatorAvg: "avg", v3.AggregateOperatorMax: "max", v3.AggregateOperatorMin: "min", @@ -109,7 +109,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri return selectLabels } -func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { +func GetSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { var selectLabels []string if aggregatorOperator == v3.AggregateOperatorNoOp { return "" @@ -173,7 +173,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal)) case v3.FilterOperatorExists, v3.FilterOperatorNotExists: if item.Key.IsColumn { - subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator) + subQuery, err := ExistsSubQueryForFixedColumn(item.Key, item.Operator) if err != nil { return "", err } @@ -199,7 +199,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { return queryString, nil } -func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) { +func ExistsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) { if key.DataType == v3.AttributeKeyDataTypeString { if op == v3.FilterOperatorExists { return fmt.Sprintf("%s %s ''", key.Key, tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil @@ -244,7 +244,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy) - having := having(mq.Having) + having := Having(mq.Having) if having != "" { having = " having " + having } @@ -272,7 +272,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan // we don't need value for first query if options.GraphLimitQtype == constants.FirstQueryGraphLimit { - queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" + queryTmpl = "SELECT " + GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" } emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy) @@ -281,7 +281,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan } filterSubQuery += emptyValuesInGroupByFilter - groupBy := groupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...) + groupBy := GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...) if groupBy != "" { groupBy = " group by " + groupBy } @@ -291,7 +291,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan } if options.GraphLimitQtype == constants.SecondQueryGraphLimit { - filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)" + filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)" } aggregationKey := "" @@ -311,7 +311,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan rate = rate / 60.0 } - op := fmt.Sprintf("%s(%s)/%f", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate) + op := fmt.Sprintf("%s(%s)/%f", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case @@ -324,17 +324,17 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan v3.AggregateOperatorP90, v3.AggregateOperatorP95, v3.AggregateOperatorP99: - op := fmt.Sprintf("quantile(%v)(%s)", aggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey) + op := fmt.Sprintf("quantile(%v)(%s)", AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax: - op := fmt.Sprintf("%s(%s)", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey) + op := fmt.Sprintf("%s(%s)", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case v3.AggregateOperatorCount: if mq.AggregateAttribute.Key != "" { if mq.AggregateAttribute.IsColumn { - subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) + subQuery, err := ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) if err == nil { filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery) } @@ -354,9 +354,9 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, pan var query string if panelType == v3.PanelTypeTrace { withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME, spanIndexTableTimeFilter, filterSubQuery) - withSubQuery = addLimitToQuery(withSubQuery, mq.Limit) + withSubQuery = AddLimitToQuery(withSubQuery, mq.Limit) if mq.Offset != 0 { - withSubQuery = addOffsetToQuery(withSubQuery, mq.Offset) + withSubQuery = AddOffsetToQuery(withSubQuery, mq.Offset) } // query = withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_TABLENAME, spanIndexTableTimeFilter) @@ -403,7 +403,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str return strings.Join(tags, ",") } -func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string { +func GroupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string { groupTags := []string{} for _, tag := range tags { groupTags = append(groupTags, fmt.Sprintf("`%s`", tag.Key)) @@ -456,7 +456,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags [] return str } -func having(items []v3.Having) string { +func Having(items []v3.Having) string { // aggregate something and filter on that aggregate var having []string for _, item := range items { @@ -465,7 +465,7 @@ func having(items []v3.Having) string { return strings.Join(having, " AND ") } -func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) { +func ReduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) { var groupBy string switch reduceTo { @@ -485,14 +485,14 @@ func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOpe return query, nil } -func addLimitToQuery(query string, limit uint64) string { +func AddLimitToQuery(query string, limit uint64) string { if limit == 0 { limit = 100 } return fmt.Sprintf("%s LIMIT %d", query, limit) } -func addOffsetToQuery(query string, offset uint64) string { +func AddOffsetToQuery(query string, offset uint64) string { return fmt.Sprintf("%s OFFSET %d", query, offset) } @@ -513,7 +513,7 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder if err != nil { return "", err } - query = addLimitToQuery(query, mq.Limit) + query = AddLimitToQuery(query, mq.Limit) return query, nil } else if options.GraphLimitQtype == constants.SecondQueryGraphLimit { @@ -529,13 +529,13 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder return "", err } if panelType == v3.PanelTypeValue { - query, err = reduceToQuery(query, mq.ReduceTo, mq.AggregateOperator) + query, err = ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator) } if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable { - query = addLimitToQuery(query, mq.Limit) + query = AddLimitToQuery(query, mq.Limit) if mq.Offset != 0 { - query = addOffsetToQuery(query, mq.Offset) + query = AddOffsetToQuery(query, mq.Offset) } } return query, err diff --git a/pkg/query-service/app/traces/v4/query_builder.go b/pkg/query-service/app/traces/v4/query_builder.go new file mode 100644 index 0000000000..4a0f8f3ef9 --- /dev/null +++ b/pkg/query-service/app/traces/v4/query_builder.go @@ -0,0 +1,414 @@ +package v4 + +import ( + "fmt" + "strings" + + "go.signoz.io/signoz/pkg/query-service/app/resource" + tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + "go.signoz.io/signoz/pkg/query-service/constants" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/utils" +) + +const NANOSECOND = 1000000000 + +var tracesOperatorMappingV3 = map[v3.FilterOperator]string{ + v3.FilterOperatorIn: "IN", + v3.FilterOperatorNotIn: "NOT IN", + v3.FilterOperatorEqual: "=", + v3.FilterOperatorNotEqual: "!=", + v3.FilterOperatorLessThan: "<", + v3.FilterOperatorLessThanOrEq: "<=", + v3.FilterOperatorGreaterThan: ">", + v3.FilterOperatorGreaterThanOrEq: ">=", + v3.FilterOperatorLike: "ILIKE", + v3.FilterOperatorNotLike: "NOT ILIKE", + v3.FilterOperatorRegex: "match(%s, %s)", + v3.FilterOperatorNotRegex: "NOT match(%s, %s)", + v3.FilterOperatorContains: "ILIKE", + v3.FilterOperatorNotContains: "NOT ILIKE", + v3.FilterOperatorExists: "mapContains(%s, '%s')", + v3.FilterOperatorNotExists: "NOT mapContains(%s, '%s')", +} + +func getClickHouseTracesColumnType(columnType v3.AttributeKeyType) string { + if columnType == v3.AttributeKeyTypeResource { + return "resources" + } + return "attributes" +} + +func getClickHouseTracesColumnDataType(columnDataType v3.AttributeKeyDataType) string { + if columnDataType == v3.AttributeKeyDataTypeFloat64 || columnDataType == v3.AttributeKeyDataTypeInt64 { + return "number" + } + if columnDataType == v3.AttributeKeyDataTypeBool { + return "bool" + } + return "string" +} + +func getColumnName(key v3.AttributeKey) string { + // if key present in static return as it is + if _, ok := constants.StaticFieldsTraces[key.Key]; ok { + return key.Key + } + + if !key.IsColumn { + keyType := getClickHouseTracesColumnType(key.Type) + keyDType := getClickHouseTracesColumnDataType(key.DataType) + return fmt.Sprintf("%s_%s['%s']", keyType, keyDType, key.Key) + } + + return "`" + utils.GetClickhouseColumnNameV2(string(key.Type), string(key.DataType), key.Key) + "`" +} + +// getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator +func getSelectLabels(groupBy []v3.AttributeKey) string { + var labels []string + for _, tag := range groupBy { + name := getColumnName(tag) + labels = append(labels, fmt.Sprintf(" %s as `%s`", name, tag.Key)) + } + return strings.Join(labels, ",") +} + +func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { + var conditions []string + + if fs != nil && len(fs.Items) != 0 { + for _, item := range fs.Items { + + // skip if it's a resource attribute + if item.Key.Type == v3.AttributeKeyTypeResource { + continue + } + + val := item.Value + // generate the key + columnName := getColumnName(item.Key) + var fmtVal string + item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) + if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists { + var err error + val, err = utils.ValidateAndCastValue(val, item.Key.DataType) + if err != nil { + return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err) + } + } + if val != nil { + fmtVal = utils.ClickHouseFormattedValue(val) + } + if operator, ok := tracesOperatorMappingV3[item.Operator]; ok { + switch item.Operator { + case v3.FilterOperatorContains, v3.FilterOperatorNotContains: + // we also want to treat %, _ as literals for contains + val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false) + conditions = append(conditions, fmt.Sprintf("%s %s '%%%s%%'", columnName, operator, val)) + case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex: + conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal)) + case v3.FilterOperatorExists, v3.FilterOperatorNotExists: + if item.Key.IsColumn { + subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator) + if err != nil { + return "", err + } + conditions = append(conditions, subQuery) + } else { + cType := getClickHouseTracesColumnType(item.Key.Type) + cDataType := getClickHouseTracesColumnDataType(item.Key.DataType) + col := fmt.Sprintf("%s_%s", cType, cDataType) + conditions = append(conditions, fmt.Sprintf(operator, col, item.Key.Key)) + } + + default: + conditions = append(conditions, fmt.Sprintf("%s %s %s", columnName, operator, fmtVal)) + } + } else { + return "", fmt.Errorf("unsupported operator %s", item.Operator) + } + } + } + queryString := strings.Join(conditions, " AND ") + + return queryString, nil +} + +func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) { + // TODO(nitya): in future when we support user based mat column handle them + // skipping now as we don't support creating them + filterItems := []v3.FilterItem{} + if len(groupBy) != 0 { + for _, item := range groupBy { + if !item.IsColumn { + filterItems = append(filterItems, v3.FilterItem{ + Key: item, + Operator: v3.FilterOperatorExists, + }) + } + } + } + if len(filterItems) != 0 { + filterSet := v3.FilterSet{ + Operator: "AND", + Items: filterItems, + } + return buildTracesFilterQuery(&filterSet) + } + return "", nil +} + +// orderBy returns a string of comma separated tags for order by clause +// if there are remaining items which are not present in tags they are also added +// if the order is not specified, it defaults to ASC +func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string { + var orderBy []string + + for _, item := range items { + if item.ColumnName == constants.SigNozOrderByValue { + orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order)) + } else if _, ok := tagLookup[item.ColumnName]; ok { + orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order)) + } else if panelType == v3.PanelTypeList { + attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} + name := getColumnName(attr) + orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order)) + } + } + + return orderBy +} + +func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string { + tagLookup := map[string]struct{}{} + for _, v := range tags { + tagLookup[v.Key] = struct{}{} + } + + orderByArray := orderBy(panelType, items, tagLookup) + + if len(orderByArray) == 0 { + if panelType == v3.PanelTypeList { + orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC") + } else { + orderByArray = append(orderByArray, "value DESC") + } + } + + str := strings.Join(orderByArray, ",") + return str +} + +func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions) (string, error) { + tracesStart := utils.GetEpochNanoSecs(start) + tracesEnd := utils.GetEpochNanoSecs(end) + + // -1800 this is added so that the bucket start considers all the fingerprints. + bucketStart := tracesStart/NANOSECOND - 1800 + bucketEnd := tracesEnd / NANOSECOND + + timeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d') AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", tracesStart, tracesEnd, bucketStart, bucketEnd) + + filterSubQuery, err := buildTracesFilterQuery(mq.Filters) + if err != nil { + return "", err + } + if filterSubQuery != "" { + filterSubQuery = " AND " + filterSubQuery + } + + emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy) + if err != nil { + return "", err + } + if emptyValuesInGroupByFilter != "" { + filterSubQuery = filterSubQuery + " AND " + emptyValuesInGroupByFilter + } + + resourceSubQuery, err := resource.BuildResourceSubQuery("signoz_traces", "distributed_traces_v3_resource", bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false) + if err != nil { + return "", err + } + // join both the filter clauses + if resourceSubQuery != "" { + filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")" + } + + // timerange will be sent in epoch millisecond + selectLabels := getSelectLabels(mq.GroupBy) + if selectLabels != "" { + selectLabels = selectLabels + "," + } + + orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy) + if orderBy != "" { + orderBy = " order by " + orderBy + } + + if mq.AggregateOperator == v3.AggregateOperatorNoOp { + var query string + if panelType == v3.PanelTypeTrace { + withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery) + withSubQuery = tracesV3.AddLimitToQuery(withSubQuery, mq.Limit) + if mq.Offset != 0 { + withSubQuery = tracesV3.AddOffsetToQuery(withSubQuery, mq.Offset) + } + query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3, timeFilter) + } else if panelType == v3.PanelTypeList { + if len(mq.SelectColumns) == 0 { + return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType) + } + // add it to the select labels + selectLabels = getSelectLabels(mq.SelectColumns) + queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID,%s ", selectLabels) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 + " where %s %s" + "%s" + query = fmt.Sprintf(queryNoOpTmpl, timeFilter, filterSubQuery, orderBy) + } else { + return "", fmt.Errorf("unsupported aggregate operator %s for panelType %s", mq.AggregateOperator, panelType) + } + return query, nil + // ---- NOOP ends here ---- + } + + having := tracesV3.Having(mq.Having) + if having != "" { + having = " having " + having + } + + groupBy := tracesV3.GroupByAttributeKeyTags(panelType, options.GraphLimitQtype, mq.GroupBy...) + if groupBy != "" { + groupBy = " group by " + groupBy + } + + aggregationKey := "" + if mq.AggregateAttribute.Key != "" { + aggregationKey = getColumnName(mq.AggregateAttribute) + } + + var queryTmpl string + if options.GraphLimitQtype == constants.FirstQueryGraphLimit { + queryTmpl = "SELECT" + } else if panelType == v3.PanelTypeTable { + queryTmpl = + "SELECT " + } else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue { + // Select the aggregate value for interval + queryTmpl = + fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d SECOND) AS ts,", step) + } + + queryTmpl = queryTmpl + selectLabels + + " %s as value " + + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_V3 + + " where " + timeFilter + "%s" + + "%s%s" + + "%s" + + // we don't need value for first query + if options.GraphLimitQtype == constants.FirstQueryGraphLimit { + queryTmpl = "SELECT " + tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" + } + + if options.GraphLimitQtype == constants.SecondQueryGraphLimit { + filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)" + } + + switch mq.AggregateOperator { + case v3.AggregateOperatorRateSum, + v3.AggregateOperatorRateMax, + v3.AggregateOperatorRateAvg, + v3.AggregateOperatorRateMin, + v3.AggregateOperatorRate: + + rate := float64(step) + if options.PreferRPM { + rate = rate / 60.0 + } + + op := fmt.Sprintf("%s(%s)/%f", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate) + query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) + return query, nil + case + v3.AggregateOperatorP05, + v3.AggregateOperatorP10, + v3.AggregateOperatorP20, + v3.AggregateOperatorP25, + v3.AggregateOperatorP50, + v3.AggregateOperatorP75, + v3.AggregateOperatorP90, + v3.AggregateOperatorP95, + v3.AggregateOperatorP99: + op := fmt.Sprintf("quantile(%v)(%s)", tracesV3.AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey) + query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax: + op := fmt.Sprintf("%s(%s)", tracesV3.AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey) + query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorCount: + if mq.AggregateAttribute.Key != "" { + if mq.AggregateAttribute.IsColumn { + subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) + if err == nil { + filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery) + } + } else { + column := getColumnName(mq.AggregateAttribute) + filterSubQuery = fmt.Sprintf("%s AND has(%s, '%s')", filterSubQuery, column, mq.AggregateAttribute.Key) + } + } + op := "toFloat64(count())" + query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorCountDistinct: + op := fmt.Sprintf("toFloat64(count(distinct(%s)))", aggregationKey) + query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) + return query, nil + default: + return "", fmt.Errorf("unsupported aggregate operator %s", mq.AggregateOperator) + } +} + +// PrepareTracesQuery returns the query string for traces +// start and end are in epoch millisecond +// step is in seconds +func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) { + // adjust the start and end time to the step interval + if panelType == v3.PanelTypeGraph { + // adjust the start and end time to the step interval for graph panel types + start = start - (start % (mq.StepInterval * 1000)) + end = end - (end % (mq.StepInterval * 1000)) + } + if options.GraphLimitQtype == constants.FirstQueryGraphLimit { + // give me just the group by names + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options) + if err != nil { + return "", err + } + query = tracesV3.AddLimitToQuery(query, mq.Limit) + + return query, nil + } else if options.GraphLimitQtype == constants.SecondQueryGraphLimit { + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options) + if err != nil { + return "", err + } + return query, nil + } + + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options) + if err != nil { + return "", err + } + if panelType == v3.PanelTypeValue { + query, err = tracesV3.ReduceToQuery(query, mq.ReduceTo, mq.AggregateOperator) + } + if panelType == v3.PanelTypeList || panelType == v3.PanelTypeTable { + query = tracesV3.AddLimitToQuery(query, mq.Limit) + + if mq.Offset != 0 { + query = tracesV3.AddOffsetToQuery(query, mq.Offset) + } + } + return query, err +} diff --git a/pkg/query-service/app/traces/v4/query_builder_test.go b/pkg/query-service/app/traces/v4/query_builder_test.go new file mode 100644 index 0000000000..11b5945557 --- /dev/null +++ b/pkg/query-service/app/traces/v4/query_builder_test.go @@ -0,0 +1,708 @@ +package v4 + +import ( + "testing" + + "go.signoz.io/signoz/pkg/query-service/constants" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func Test_getClickHouseTracesColumnType(t *testing.T) { + type args struct { + columnType v3.AttributeKeyType + } + tests := []struct { + name string + args args + want string + }{ + { + name: "tag", + args: args{ + columnType: v3.AttributeKeyTypeTag, + }, + want: "attributes", + }, + { + name: "resource", + args: args{ + columnType: v3.AttributeKeyTypeResource, + }, + want: "resources", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getClickHouseTracesColumnType(tt.args.columnType); got != tt.want { + t.Errorf("GetClickhouseTracesColumnType() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getClickHouseTracesColumnDataType(t *testing.T) { + type args struct { + columnDataType v3.AttributeKeyDataType + } + tests := []struct { + name string + args args + want string + }{ + { + name: "string", + args: args{ + columnDataType: v3.AttributeKeyDataTypeString, + }, + want: "string", + }, + { + name: "float64", + args: args{ + columnDataType: v3.AttributeKeyDataTypeFloat64, + }, + want: "number", + }, + { + name: "int64", + args: args{ + columnDataType: v3.AttributeKeyDataTypeInt64, + }, + want: "number", + }, + { + name: "bool", + args: args{ + columnDataType: v3.AttributeKeyDataTypeBool, + }, + want: "bool", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getClickHouseTracesColumnDataType(tt.args.columnDataType); got != tt.want { + t.Errorf("getClickhouseTracesColumnDataType() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getColumnName(t *testing.T) { + type args struct { + key v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "tag", + args: args{ + key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + }, + want: "attributes_string['data']", + }, + { + name: "column", + args: args{ + key: v3.AttributeKey{Key: "data", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + want: "`attribute_string_data`", + }, + { + name: "static column", + args: args{ + key: v3.AttributeKey{Key: "spanKind", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + want: "spanKind", + }, + { + name: "missing meta", + args: args{ + key: v3.AttributeKey{Key: "xyz"}, + }, + want: "attributes_string['xyz']", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getColumnName(tt.args.key); got != tt.want { + t.Errorf("getColumnName() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getSelectLabels(t *testing.T) { + type args struct { + groupBy []v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "count", + args: args{ + groupBy: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: " attributes_string['user_name'] as `user_name`", + }, + { + name: "multiple group by", + args: args{ + groupBy: []v3.AttributeKey{ + {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, // static col + {Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource, IsColumn: true}, + }, + }, + want: " name as `name`, `resource_string_service_name` as `service_name`", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getSelectLabels(tt.args.groupBy); got != tt.want { + t.Errorf("getSelectLabels() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_buildTracesFilterQuery(t *testing.T) { + type args struct { + fs *v3.FilterSet + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "Test ignore resource", + args: args{ + fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"service"}, Operator: v3.FilterOperatorIn}, + }, + }}, + want: "", + }, + { + name: "Test buildTracesFilterQuery in, nin", + args: args{ + fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"GET", "POST"}, Operator: v3.FilterOperatorIn}, + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{"PUT"}, Operator: v3.FilterOperatorNotIn}, + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: []interface{}{"server"}, Operator: v3.FilterOperatorNotIn}, + {Key: v3.AttributeKey{Key: "status.code", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{200}, Operator: v3.FilterOperatorNotIn}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{100.0}, Operator: v3.FilterOperatorIn}, + {Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: []interface{}{true}, Operator: v3.FilterOperatorIn}, + }}, + }, + want: "attributes_string['method'] IN ['GET','POST'] AND attributes_string['method'] NOT IN ['PUT'] AND attributes_number['status.code'] NOT IN [200] AND attributes_number['duration'] IN [100] AND attributes_bool['isDone'] IN [true]", + wantErr: false, + }, + { + name: "Test buildTracesFilterQuery not eq, neq, gt, lt, gte, lte", + args: args{ + fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 102, Operator: v3.FilterOperatorEqual}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 100, Operator: v3.FilterOperatorNotEqual}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 10, Operator: v3.FilterOperatorGreaterThan}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Value: 200, Operator: v3.FilterOperatorLessThan}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag}, Value: 10.0, Operator: v3.FilterOperatorGreaterThanOrEq}, + {Key: v3.AttributeKey{Key: "duration_str", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "200", Operator: v3.FilterOperatorLessThanOrEq}, + }}, + }, + want: "attributes_number['duration'] = 102 AND attributes_number['duration'] != 100 AND attributes_number['duration'] > 10 AND attributes_number['duration'] < 200" + + " AND attributes_number['duration'] >= 10.000000 AND attributes_string['duration_str'] <= '200'", + wantErr: false, + }, + { + name: "Test contains, ncontains, like, nlike, regex, nregex", + args: args{ + fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.%", Operator: v3.FilterOperatorContains}, + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "103_", Operator: v3.FilterOperatorNotContains}, + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102.", Operator: v3.FilterOperatorLike}, + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "102", Operator: v3.FilterOperatorNotLike}, + {Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/mypath", Operator: v3.FilterOperatorRegex}, + {Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: "/health.*", Operator: v3.FilterOperatorNotRegex}, + }}, + }, + want: "attributes_string['host'] ILIKE '%102.\\%%' AND attributes_string['host'] NOT ILIKE '%103\\_%' AND attributes_string['host'] ILIKE '102.' AND attributes_string['host'] NOT ILIKE '102' AND " + + "match(`attribute_string_path`, '/mypath') AND NOT match(`attribute_string_path`, '/health.*')", + }, + { + name: "Test exists, nexists", + args: args{ + fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists}, + {Key: v3.AttributeKey{Key: "duration", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorExists}, + {Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists}, + {Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists}, + {Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists}, + }}, + }, + want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildTracesFilterQuery(tt.args.fs) + if (err != nil) != tt.wantErr { + t.Errorf("buildTracesFilterQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("buildTracesFilterQuery() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_handleEmptyValuesInGroupBy(t *testing.T) { + type args struct { + groupBy []v3.AttributeKey + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "Test handleEmptyValuesInGroupBy", + args: args{ + groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: "mapContains(attributes_string, 'bytes')", + wantErr: false, + }, + { + name: "Test handleEmptyValuesInGroupBy", + args: args{ + groupBy: []v3.AttributeKey{{Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, + }, + want: "", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := handleEmptyValuesInGroupBy(tt.args.groupBy) + if (err != nil) != tt.wantErr { + t.Errorf("handleEmptyValuesInGroupBy() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("handleEmptyValuesInGroupBy() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_orderByAttributeKeyTags(t *testing.T) { + type args struct { + panelType v3.PanelType + items []v3.OrderBy + tags []v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "test", + args: args{ + panelType: v3.PanelTypeGraph, + items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}}, + tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: "`name` ASC", + }, + { + name: "order by value", + args: args{ + panelType: v3.PanelTypeGraph, + items: []v3.OrderBy{{ColumnName: "name", Order: "ASC"}, {ColumnName: constants.SigNozOrderByValue, Order: "DESC"}}, + tags: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: "`name` ASC,value DESC", + }, + { + name: "test", + args: args{ + panelType: v3.PanelTypeList, + items: []v3.OrderBy{{ColumnName: "status", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + {ColumnName: "route", Order: "DESC", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, + }, + want: "attributes_string['status'] DESC,`attribute_string_route` DESC", + }, + { + name: "ignore order by in table panel", + args: args{ + panelType: v3.PanelTypeTable, + items: []v3.OrderBy{{ColumnName: "timestamp", Order: "DESC"}}, + tags: []v3.AttributeKey{}, + }, + want: "value DESC", + }, + { + name: "add default order by ts for list panel", + args: args{ + panelType: v3.PanelTypeList, + items: []v3.OrderBy{}, + tags: []v3.AttributeKey{}, + }, + want: "timestamp DESC", + }, + { + name: "add default order by value for graph panel", + args: args{ + panelType: v3.PanelTypeGraph, + items: []v3.OrderBy{}, + tags: []v3.AttributeKey{}, + }, + want: "value DESC", + }, + { + name: "don't add default order by for table panel", + args: args{ + panelType: v3.PanelTypeTable, + items: []v3.OrderBy{}, + tags: []v3.AttributeKey{}, + }, + want: "value DESC", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := orderByAttributeKeyTags(tt.args.panelType, tt.args.items, tt.args.tags); got != tt.want { + t.Errorf("orderByAttributeKeyTags() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_buildTracesQuery(t *testing.T) { + type args struct { + start int64 + end int64 + step int64 + mq *v3.BuilderQuery + panelType v3.PanelType + options v3.QBOptions + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "Test buildTracesQuery", + args: args{ + panelType: v3.PanelTypeTable, + start: 1680066360726210000, + end: 1680066458000000000, + step: 1000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorCount, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + Value: 100, + Operator: v3.FilterOperatorEqual, + }, + }, + }, + GroupBy: []v3.AttributeKey{{Key: "http.method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + OrderBy: []v3.OrderBy{ + {ColumnName: "http.method", Order: "ASC"}}, + }, + }, + want: "SELECT attributes_string['http.method'] as `http.method`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['http.method'] = '100' AND mapContains(attributes_string, 'http.method') " + + "group by `http.method` order by `http.method` ASC", + }, + { + name: "Test buildTracesQuery", + args: args{ + panelType: v3.PanelTypeTable, + start: 1680066360726210000, + end: 1680066458000000000, + step: 1000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorCount, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "bytes", Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeInt64}, Value: 100, Operator: ">"}, + {Key: v3.AttributeKey{Key: "service.name", Type: v3.AttributeKeyTypeResource, DataType: v3.AttributeKeyDataTypeString}, Value: "myService", Operator: "="}, + }, + }, + GroupBy: []v3.AttributeKey{{Key: "host", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeResource}}, + OrderBy: []v3.OrderBy{ + {ColumnName: "host", Order: "ASC"}}, + }, + }, + want: "SELECT resources_number['host'] as `host`, toFloat64(count()) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['bytes'] > 100 AND " + + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND " + + "(seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%' AND " + + "( (simpleJSONHas(labels, 'host') AND labels like '%host%') ))) " + + "group by `host` order by `host` ASC", + }, + { + name: "test noop list view", + args: args{ + panelType: v3.PanelTypeList, + start: 1680066360726210000, + end: 1680066458000000000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorNoOp, + Filters: &v3.FilterSet{}, + SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, + OrderBy: []v3.OrderBy{{ColumnName: "timestamp", Order: "ASC"}}, + }, + }, + want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp ASC", + }, + { + name: "test noop list view-without ts", + args: args{ + panelType: v3.PanelTypeList, + start: 1680066360726210000, + end: 1680066458000000000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorNoOp, + Filters: &v3.FilterSet{}, + SelectColumns: []v3.AttributeKey{{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, + }, + }, + want: "SELECT timestamp as timestamp_datetime, spanID, traceID, name as `name` from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') " + + "AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by timestamp DESC", + }, + { + name: "test noop trace view", + args: args{ + panelType: v3.PanelTypeTrace, + start: 1680066360726210000, + end: 1680066458000000000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorNoOp, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="}, + {Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "myService", Operator: "="}, + }, + }, + }, + }, + want: "SELECT subQuery.serviceName, subQuery.name, count() AS span_count, subQuery.durationNano, subQuery.traceID AS traceID FROM signoz_traces.distributed_signoz_index_v3 INNER JOIN " + + "( SELECT * FROM (SELECT traceID, durationNano, serviceName, name FROM signoz_traces.signoz_index_v3 WHERE parentSpanID = '' AND (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " + + "(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['method'] = 'GET' AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource " + + "WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'myService' AND labels like '%service.name%myService%')) " + + "ORDER BY durationNano DESC LIMIT 1 BY traceID LIMIT 100) AS inner_subquery ) AS subQuery ON signoz_traces.distributed_signoz_index_v3.traceID = subQuery.traceID WHERE (timestamp >= '1680066360726210000' AND " + + "timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) GROUP BY subQuery.traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY " + + "subQuery.durationNano desc LIMIT 1 BY subQuery.traceID;", + }, + { + name: "Test order by value with having", + args: args{ + panelType: v3.PanelTypeTable, + start: 1680066360726210000, + end: 1680066458000000000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorCountDistinct, + Filters: &v3.FilterSet{}, + AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "ASC"}}, + Having: []v3.Having{ + { + ColumnName: "name", + Operator: ">", + Value: 10, + }, + }, + }, + }, + want: "SELECT toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND " + + "(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) having value > 10 order by value ASC", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildTracesQuery(tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.panelType, tt.args.options) + if (err != nil) != tt.wantErr { + t.Errorf("buildTracesQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("buildTracesQuery() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPrepareTracesQuery(t *testing.T) { + type args struct { + start int64 + end int64 + panelType v3.PanelType + mq *v3.BuilderQuery + options v3.QBOptions + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "test with limit - first", + args: args{ + start: 1680066360726210000, + end: 1680066458000000000, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Filters: &v3.FilterSet{}, + AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + Limit: 10, + OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}}, + }, + options: v3.QBOptions{ + GraphLimitQtype: constants.FirstQueryGraphLimit, + }, + }, + want: "SELECT `function` from (SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " + + "where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') group by `function` order by value DESC) LIMIT 10", + }, + { + name: "test with limit - second", + args: args{ + start: 1680066360726210000, + end: 1680066458000000000, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Filters: &v3.FilterSet{}, + AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + GroupBy: []v3.AttributeKey{{Key: "function", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}}, + Limit: 10, + }, + options: v3.QBOptions{ + GraphLimitQtype: constants.SecondQueryGraphLimit, + }, + }, + want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " + + "(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC", + }, + { + name: "test with limit with resources- first", + args: args{ + start: 1680066360726210000, + end: 1680066458000000000, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, + Value: 100, + Operator: v3.FilterOperatorEqual, + }, + { + Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, + Value: "server1", + Operator: v3.FilterOperatorEqual, + }, + }, + }, + AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + GroupBy: []v3.AttributeKey{ + {Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + {Key: "service.name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, + }, + Limit: 10, + OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}}, + }, + options: v3.QBOptions{ + GraphLimitQtype: constants.FirstQueryGraphLimit, + }, + }, + want: "SELECT `function`,`service.name` from (SELECT `attribute_string_function` as `function`, `resource_string_service$$name` as `service.name`, toFloat64(count(distinct(name))) as value " + + "from signoz_traces.distributed_signoz_index_v3 where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "AND attributes_number['line'] = 100 AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE " + + "(seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%' AND " + + "( (simpleJSONHas(labels, 'service.name') AND labels like '%service.name%') ))) group by `function`,`service.name` order by value DESC) LIMIT 10", + }, + { + name: "test with limit with resources - second", + args: args{ + start: 1680066360726210000, + end: 1680066458000000000, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "line", DataType: v3.AttributeKeyDataTypeInt64, Type: v3.AttributeKeyTypeTag}, + Value: 100, + Operator: v3.FilterOperatorEqual, + }, + { + Key: v3.AttributeKey{Key: "hostname", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, + Value: "server1", + Operator: v3.FilterOperatorEqual, + }, + }, + }, + AggregateAttribute: v3.AttributeKey{Key: "name", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + GroupBy: []v3.AttributeKey{ + {Key: "function", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + {Key: "serviceName", IsColumn: true, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + }, + OrderBy: []v3.OrderBy{{ColumnName: "#SIGNOZ_VALUE", Order: "DESC"}}, + Limit: 10, + }, + options: v3.QBOptions{ + GraphLimitQtype: constants.SecondQueryGraphLimit, + }, + }, + want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " + + "where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " + + "AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " + + "AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := PrepareTracesQuery(tt.args.start, tt.args.end, tt.args.panelType, tt.args.mq, tt.args.options) + if (err != nil) != tt.wantErr { + t.Errorf("PrepareTracesQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("PrepareTracesQuery() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index b1a12880ed..dc52f6fd88 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -239,6 +239,8 @@ const ( SIGNOZ_TRACE_DBNAME = "signoz_traces" SIGNOZ_SPAN_INDEX_TABLENAME = "distributed_signoz_index_v2" SIGNOZ_SPAN_INDEX_LOCAL_TABLENAME = "signoz_index_v2" + SIGNOZ_SPAN_INDEX_V3 = "distributed_signoz_index_v3" + SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME = "signoz_index_v3" SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME = "time_series_v4" SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME = "time_series_v4_6hrs" SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day" @@ -444,3 +446,147 @@ const MaxFilterSuggestionsExamplesLimit = 10 var SpanRenderLimitStr = GetOrDefaultEnv("SPAN_RENDER_LIMIT", "2500") var MaxSpansInTraceStr = GetOrDefaultEnv("MAX_SPANS_IN_TRACE", "250000") + +var StaticFieldsTraces = map[string]v3.AttributeKey{ + "timestamp": {}, + "traceID": { + Key: "traceID", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "spanID": { + Key: "spanID", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "parentSpanID": { + Key: "parentSpanID", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "name": { + Key: "name", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "serviceName": { + Key: "serviceName", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "kind": { + Key: "kind", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "spanKind": { + Key: "spanKind", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "durationNano": { + Key: "durationNano", + DataType: v3.AttributeKeyDataTypeFloat64, + IsColumn: true, + }, + "statusCode": { + Key: "statusCode", + DataType: v3.AttributeKeyDataTypeFloat64, + IsColumn: true, + }, + "hasError": { + Key: "hasError", + DataType: v3.AttributeKeyDataTypeBool, + IsColumn: true, + }, + "statusMessage": { + Key: "statusMessage", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "statusCodeString": { + Key: "statusCodeString", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "externalHttpMethod": { + Key: "externalHttpMethod", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "externalHttpUrl": { + Key: "externalHttpUrl", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "dbSystem": { + Key: "dbSystem", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "dbName": { + Key: "dbName", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "dbOperation": { + Key: "dbOperation", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "peerService": { + Key: "peerService", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "httpMethod": { + Key: "httpMethod", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "httpUrl": { + Key: "httpUrl", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "httpRoute": { + Key: "httpRoute", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "httpHost": { + Key: "httpHost", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "msgSystem": { + Key: "msgSystem", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "msgOperation": { + Key: "msgOperation", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "rpcSystem": { + Key: "rpcSystem", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "rpcService": { + Key: "rpcService", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "rpcMethod": { + Key: "rpcMethod", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + "responseStatusCode": { + Key: "responseStatusCode", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, +}