From c0117bdf356c0fc33f44bc34cf3af44716b389f5 Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 21 Mar 2023 11:50:57 +0400 Subject: [PATCH 01/29] wip --- .../main/resources/swagger/kafka-ui-api.yaml | 611 ++++-------------- 1 file changed, 123 insertions(+), 488 deletions(-) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index ae51d31568f..26831c73b33 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -625,25 +625,6 @@ paths: schema: $ref: '#/components/schemas/TopicSerdeSuggestion' - /api/smartfilters/testexecutions: - put: - tags: - - Messages - summary: executeSmartFilterTest - operationId: executeSmartFilterTest - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SmartFilterTestExecution' - responses: - 200: - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/SmartFilterTestExecutionResult' - /api/clusters/{clusterName}/topics/{topicName}/messages: get: @@ -763,12 +744,12 @@ paths: 404: description: Not found - /api/clusters/{clusterName}/topics/{topicName}/activeproducers: - get: + /api/clusters/{clusterName}/topics/{topicName}/smartfilters: + post: tags: - - Topics - summary: get producer states for topic - operationId: getActiveProducerStates + - Messages + summary: registerFilter + operationId: registerFilter parameters: - name: clusterName in: path @@ -780,6 +761,11 @@ paths: required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MessageFilterRegistration' responses: 200: description: OK @@ -788,7 +774,90 @@ paths: schema: type: array items: - $ref: '#/components/schemas/TopicProducerState' + $ref: '#/components/schemas/MessageFilterId' + + + /api/clusters/{clusterName}/topics/{topicName}/messages/v2: + get: + tags: + - Messages + summary: getTopicMessagesV2 + operationId: getTopicMessagesV2 + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + - name: topicName + in: path + required: true + schema: + type: string + - name: m + in: query + description: Messages polling mode + schema: + $ref: "#/components/schemas/PollingMode" + - name: p + in: query + schema: + type: array + description: List of target partitions( all partitions if not provided) + items: + type: integer + - name: lim + in: query + description: Max number of messages can be returned + schema: + type: integer + - name: q + in: query + description: query string to contains string filtration + schema: + type: string + - name: fid + in: query + description: filter id, that was registered beforehand + schema: + type: string + - name: off + in: query + description: offset to read from / to + schema: + type: integer + format: int64 + - name: offs + in: query + description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." + schema: + type: integer + format: int64 + - name: ts + in: query + description: timestamp (in ms) to read from / to + schema: + type: integer + format: int64 + - name: ks + in: query + description: "Serde that should be used for deserialization. Will be chosen automatically if not set." + schema: + type: string + - name: vs + in: query + description: "Serde that should be used for deserialization. Will be chosen automatically if not set." + schema: + type: string + responses: + 200: + description: OK + content: + text/event-stream: + schema: + type: array + items: + $ref: '#/components/schemas/TopicMessageEvent' /api/clusters/{clusterName}/topics/{topicName}/consumer-groups: get: @@ -1776,188 +1845,6 @@ paths: 404: description: Not found - /api/clusters/{clusterName}/acls: - get: - tags: - - Acls - summary: listKafkaAcls - operationId: listAcls - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - - name: resourceType - in: query - required: false - schema: - $ref: '#/components/schemas/KafkaAclResourceType' - - name: resourceName - in: query - required: false - schema: - type: string - - name: namePatternType - in: query - required: false - schema: - $ref: '#/components/schemas/KafkaAclNamePatternType' - responses: - 200: - description: OK - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/KafkaAcl' - - /api/clusters/{clusterName}/acl/csv: - get: - tags: - - Acls - summary: getAclAsCsv - operationId: getAclAsCsv - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - responses: - 200: - description: OK - content: - text/plain: - schema: - type: string - post: - tags: - - Acls - summary: syncAclsCsv - operationId: syncAclsCsv - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - text/plain: - schema: - type: string - responses: - 200: - description: OK - - /api/clusters/{clusterName}/acl: - post: - tags: - - Acls - summary: createAcl - operationId: createAcl - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/KafkaAcl' - responses: - 200: - description: OK - - delete: - tags: - - Acls - summary: deleteAcl - operationId: deleteAcl - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/KafkaAcl' - responses: - 200: - description: OK - 404: - description: Acl not found - - /api/clusters/{clusterName}/acl/consumer: - post: - tags: - - Acls - summary: createConsumerAcl - operationId: createConsumerAcl - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateConsumerAcl' - responses: - 200: - description: OK - - /api/clusters/{clusterName}/acl/producer: - post: - tags: - - Acls - summary: createProducerAcl - operationId: createProducerAcl - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateProducerAcl' - responses: - 200: - description: OK - - /api/clusters/{clusterName}/acl/streamApp: - post: - tags: - - Acls - summary: createStreamAppAcl - operationId: createStreamAppAcl - parameters: - - name: clusterName - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateStreamAppAcl' - responses: - 200: - description: OK - /api/authorization: get: tags: @@ -2047,7 +1934,7 @@ paths: properties: file: type: string - format: binary + format: filepart responses: 200: description: OK @@ -2145,26 +2032,6 @@ components: type: string enum: - DYNAMIC_CONFIG - build: - type: object - properties: - commitId: - type: string - version: - type: string - buildTime: - type: string - isLatestRelease: - type: boolean - latestRelease: - type: object - properties: - versionTag: - type: string - publishedAt: - type: string - htmlUrl: - type: string Cluster: type: object @@ -2200,8 +2067,6 @@ components: - KAFKA_CONNECT - KSQL_DB - TOPIC_DELETION - - KAFKA_ACL_VIEW # get ACLs listing - - KAFKA_ACL_EDIT # create & delete ACLs required: - id - name @@ -2605,16 +2470,6 @@ components: type: number bytesOutPerSec: type: number - partitionsLeader: - type: integer - partitions: - type: integer - inSyncPartitions: - type: integer - partitionsSkew: - type: number - leadersSkew: - type: number required: - id @@ -2646,36 +2501,7 @@ components: - PROTOBUF - UNKNOWN - TopicProducerState: - type: object - properties: - partition: - type: integer - format: int32 - producerId: - type: integer - format: int64 - producerEpoch: - type: integer - format: int32 - lastSequence: - type: integer - format: int32 - lastTimestampMs: - type: integer - format: int64 - coordinatorEpoch: - type: integer - format: int32 - currentTransactionStartOffset: - type: integer - format: int64 - ConsumerGroup: - discriminator: - propertyName: inherit - mapping: - details: "#/components/schemas/ConsumerGroupDetails" type: object properties: groupId: @@ -2692,7 +2518,7 @@ components: $ref: "#/components/schemas/ConsumerGroupState" coordinator: $ref: "#/components/schemas/Broker" - consumerLag: + messagesBehind: type: integer format: int64 description: null if consumer group has no offsets committed @@ -2705,8 +2531,6 @@ components: - NAME - MEMBERS - STATE - - MESSAGES_BEHIND - - TOPIC_NUM ConsumerGroupsPageResponse: type: object @@ -2718,37 +2542,6 @@ components: items: $ref: '#/components/schemas/ConsumerGroup' - SmartFilterTestExecution: - type: object - required: [filterCode] - properties: - filterCode: - type: string - key: - type: string - value: - type: string - headers: - type: object - additionalProperties: - type: string - partition: - type: integer - offset: - type: integer - format: int64 - timestampMs: - type: integer - format: int64 - - SmartFilterTestExecutionResult: - type: object - properties: - result: - type: boolean - error: - type: string - CreateTopicMessage: type: object properties: @@ -2891,6 +2684,29 @@ components: - TIMESTAMP - LATEST + MessageFilterRegistration: + type: object + properties: + filterCode: + type: string + + MessageFilterId: + type: object + properties: + id: + type: string + + PollingMode: + type: string + enum: + - FROM_OFFSET + - TO_OFFSET + - FROM_TIMESTAMP + - TO_TIMESTAMP + - LATEST + - FIRST + - TAILING + MessageFilterType: type: string enum: @@ -2941,7 +2757,7 @@ components: endOffset: type: integer format: int64 - consumerLag: + messagesBehind: type: integer format: int64 description: null if consumer group has no offsets committed @@ -3031,10 +2847,6 @@ components: type: string schemaType: $ref: '#/components/schemas/SchemaType' - references: - type: array - items: - $ref: '#/components/schemas/SchemaReference' required: - id - subject @@ -3052,30 +2864,13 @@ components: schema: type: string schemaType: - $ref: '#/components/schemas/SchemaType' # upon updating a schema, the type of existing schema can't be changed - references: - type: array - items: - $ref: '#/components/schemas/SchemaReference' + $ref: '#/components/schemas/SchemaType' + # upon updating a schema, the type of existing schema can't be changed required: - subject - schema - schemaType - SchemaReference: - type: object - properties: - name: - type: string - subject: - type: string - version: - type: integer - required: - - name - - subject - - version - CompatibilityLevel: type: object properties: @@ -3638,7 +3433,6 @@ components: - MESSAGES_READ - MESSAGES_PRODUCE - MESSAGES_DELETE - - RESTART ResourceType: type: string @@ -3650,126 +3444,6 @@ components: - SCHEMA - CONNECT - KSQL - - ACL - - AUDIT - - KafkaAcl: - type: object - required: [resourceType, resourceName, namePatternType, principal, host, operation, permission] - properties: - resourceType: - $ref: '#/components/schemas/KafkaAclResourceType' - resourceName: - type: string # "*" if acl can be applied to any resource of given type - namePatternType: - $ref: '#/components/schemas/KafkaAclNamePatternType' - principal: - type: string - host: - type: string - operation: - type: string - enum: - - UNKNOWN # Unknown operation, need to update mapping code on BE - - ALL # Cluster, Topic, Group - - READ # Topic, Group - - WRITE # Topic, TransactionalId - - CREATE # Cluster, Topic - - DELETE # Topic, Group - - ALTER # Cluster, Topic, - - DESCRIBE # Cluster, Topic, Group, TransactionalId, DelegationToken - - CLUSTER_ACTION # Cluster - - DESCRIBE_CONFIGS # Cluster, Topic - - ALTER_CONFIGS # Cluster, Topic - - IDEMPOTENT_WRITE # Cluster - - CREATE_TOKENS - - DESCRIBE_TOKENS - permission: - type: string - enum: - - ALLOW - - DENY - - CreateConsumerAcl: - type: object - required: [principal, host] - properties: - principal: - type: string - host: - type: string - topics: - type: array - items: - type: string - topicsPrefix: - type: string - consumerGroups: - type: array - items: - type: string - consumerGroupsPrefix: - type: string - - CreateProducerAcl: - type: object - required: [principal, host] - properties: - principal: - type: string - host: - type: string - topics: - type: array - items: - type: string - topicsPrefix: - type: string - transactionalId: - type: string - transactionsIdPrefix: - type: string - idempotent: - type: boolean - default: false - - CreateStreamAppAcl: - type: object - required: [principal, host, applicationId, inputTopics, outputTopics] - properties: - principal: - type: string - host: - type: string - inputTopics: - type: array - items: - type: string - outputTopics: - type: array - items: - type: string - applicationId: - nullable: false - type: string - - KafkaAclResourceType: - type: string - enum: - - UNKNOWN # Unknown operation, need to update mapping code on BE - - TOPIC - - GROUP - - CLUSTER - - TRANSACTIONAL_ID - - DELEGATION_TOKEN - - USER - - KafkaAclNamePatternType: - type: string - enum: - - MATCH - - LITERAL - - PREFIXED RestartRequest: type: object @@ -3906,28 +3580,9 @@ components: type: array items: $ref: '#/components/schemas/Action' - webclient: - type: object - properties: - maxInMemoryBufferSize: - type: string - description: "examples: 20, 12KB, 5MB" kafka: type: object properties: - polling: - type: object - properties: - pollTimeoutMs: - type: integer - maxPageSize: - type: integer - defaultPageSize: - type: integer - adminClientTimeout: - type: integer - internalTopicPrefix: - type: string clusters: type: array items: @@ -4056,9 +3711,7 @@ components: type: array items: type: string - fieldsNamePattern: - type: string - maskingCharsReplacement: + pattern: type: array items: type: string @@ -4071,21 +3724,3 @@ components: pollingThrottleRate: type: integer format: int64 - audit: - type: object - properties: - level: - type: string - enum: [ "ALL", "ALTER_ONLY" ] - topic: - type: string - auditTopicsPartitions: - type: integer - topicAuditEnabled: - type: boolean - consoleAuditEnabled: - type: boolean - auditTopicProperties: - type: object - additionalProperties: - type: string From e4d16b16c6dd5a37f5813e745f80ae6f35dc5479 Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 21 Mar 2023 19:13:01 +0400 Subject: [PATCH 02/29] wip --- documentation/compose/kafka-ui.yaml | 13 +- .../ui/controller/MessagesController.java | 104 +++++--- .../kafka/ui/service/MessagesService.java | 242 +++++++++++------- .../main/resources/swagger/kafka-ui-api.yaml | 20 +- 4 files changed, 228 insertions(+), 151 deletions(-) diff --git a/documentation/compose/kafka-ui.yaml b/documentation/compose/kafka-ui.yaml index 14a269ca7cb..c5843b4bd5c 100644 --- a/documentation/compose/kafka-ui.yaml +++ b/documentation/compose/kafka-ui.yaml @@ -20,11 +20,6 @@ services: KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085 KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083 - KAFKA_CLUSTERS_1_NAME: secondLocal - KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092 - KAFKA_CLUSTERS_1_METRICS_PORT: 9998 - KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085 - DYNAMIC_CONFIG_ENABLED: 'true' kafka0: image: confluentinc/cp-kafka:7.2.1 @@ -45,7 +40,7 @@ services: KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093,2@kafka1:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' @@ -62,7 +57,7 @@ services: - "9093:9092" - "9998:9998" environment: - KAFKA_BROKER_ID: 1 + KAFKA_BROKER_ID: 2 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 @@ -72,8 +67,8 @@ services: KAFKA_JMX_PORT: 9998 KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998 KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093' + KAFKA_NODE_ID: 2 + KAFKA_CONTROLLER_QUORUM_VOTERS: '2@kafka1:29093,1@kafka0:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 32d341e6134..9fa53beff1f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -11,19 +11,20 @@ import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; +import com.provectus.kafka.ui.model.MessageFilterIdDTO; +import com.provectus.kafka.ui.model.MessageFilterRegistrationDTO; import com.provectus.kafka.ui.model.MessageFilterTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SeekTypeDTO; import com.provectus.kafka.ui.model.SerdeUsageDTO; -import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; -import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO; import com.provectus.kafka.ui.model.rbac.AccessContext; -import com.provectus.kafka.ui.model.rbac.permission.AuditAction; import com.provectus.kafka.ui.model.rbac.permission.TopicAction; import com.provectus.kafka.ui.service.DeserializationService; import com.provectus.kafka.ui.service.MessagesService; +import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; import java.util.Map; import java.util.Optional; @@ -45,37 +46,34 @@ @Slf4j public class MessagesController extends AbstractController implements MessagesApi { + private static final int MAX_LOAD_RECORD_LIMIT = 100; + private static final int DEFAULT_LOAD_RECORD_LIMIT = 20; + private final MessagesService messagesService; private final DeserializationService deserializationService; + private final AccessControlService accessControlService; @Override public Mono> deleteTopicMessages( String clusterName, String topicName, @Valid List partitions, ServerWebExchange exchange) { - var context = AccessContext.builder() + Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_DELETE) - .build(); + .build()); - return validateAccess(context).>then( + return validateAccess.then( messagesService.deleteTopicMessages( getCluster(clusterName), topicName, Optional.ofNullable(partitions).orElse(List.of()) ).thenReturn(ResponseEntity.ok().build()) - ).doOnEach(sig -> audit(context, sig)); - } - - @Override - public Mono> executeSmartFilterTest( - Mono smartFilterTestExecutionDto, ServerWebExchange exchange) { - return smartFilterTestExecutionDto - .map(MessagesService::execSmartFilterTest) - .map(ResponseEntity::ok); + ); } + @Deprecated @Override public Mono>> getTopicMessages(String clusterName, String topicName, @@ -88,19 +86,17 @@ public Mono>> getTopicMessages(String String keySerde, String valueSerde, ServerWebExchange exchange) { - var contextBuilder = AccessContext.builder() + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) - .operationName("getTopicMessages"); - - if (auditService.isAuditTopic(getCluster(clusterName), topicName)) { - contextBuilder.auditActions(AuditAction.VIEW); - } + .build()); seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING; seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD; filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS; + int recordsLimit = + Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); var positions = new ConsumerPosition( seekType, @@ -111,14 +107,11 @@ public Mono>> getTopicMessages(String ResponseEntity.ok( messagesService.loadMessages( getCluster(clusterName), topicName, positions, q, filterQueryType, - limit, seekDirection, keySerde, valueSerde) + recordsLimit, seekDirection, keySerde, valueSerde) ) ); - var context = contextBuilder.build(); - return validateAccess(context) - .then(job) - .doOnEach(sig -> audit(context, sig)); + return validateAccess.then(job); } @Override @@ -126,18 +119,17 @@ public Mono> sendTopicMessages( String clusterName, String topicName, @Valid Mono createTopicMessage, ServerWebExchange exchange) { - var context = AccessContext.builder() + Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_PRODUCE) - .operationName("sendTopicMessages") - .build(); + .build()); - return validateAccess(context).then( + return validateAccess.then( createTopicMessage.flatMap(msg -> messagesService.sendMessage(getCluster(clusterName), topicName, msg).then() ).map(ResponseEntity::ok) - ).doOnEach(sig -> audit(context, sig)); + ); } /** @@ -173,12 +165,12 @@ public Mono> getSerdes(String clusterNam String topicName, SerdeUsageDTO use, ServerWebExchange exchange) { - var context = AccessContext.builder() + + Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(TopicAction.VIEW) - .operationName("getSerdes") - .build(); + .build()); TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO() .key(use == SerdeUsageDTO.SERIALIZE @@ -188,7 +180,7 @@ public Mono> getSerdes(String clusterNam ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE) : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE)); - return validateAccess(context).then( + return validateAccess.then( Mono.just(dto) .subscribeOn(Schedulers.boundedElastic()) .map(ResponseEntity::ok) @@ -196,6 +188,48 @@ public Mono> getSerdes(String clusterNam } + @Override + public Mono>> getTopicMessagesV2(String clusterName, String topicName, + PollingModeDTO mode, + @Nullable List partitions, + @Nullable Integer limit, + @Nullable String query, + @Nullable String filterId, + @Nullable String offsetString, + @Nullable Long ts, + @Nullable String ks, + @Nullable String vs, + ServerWebExchange exchange) { + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + .cluster(clusterName) + .topic(topicName) + .topicActions(MESSAGES_READ) + .build()); + + int recordsLimit = + Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); + + return validateAccess.then( + Mono.just( + ResponseEntity.ok( + messagesService.loadMessagesV2( + getCluster(clusterName), topicName, positions, q, filterQueryType, + recordsLimit, seekDirection, keySerde, valueSerde) + ) + ) + ); + } + interface PollingMode { + static PollingMode create(PollingModeDTO mode, @Nullable String offsetString, @Nullable Long timestamp) { + return null; + } + } + @Override + public Mono>> registerFilter(String clusterName, String topicName, + Mono messageFilterRegistrationDTO, + ServerWebExchange exchange) { + return null; + } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 620bd840861..b92f0b6f2a9 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -1,9 +1,9 @@ package com.provectus.kafka.ui.service; import com.google.common.util.concurrent.RateLimiter; -import com.provectus.kafka.ui.config.ClustersProperties; -import com.provectus.kafka.ui.emitter.BackwardEmitter; -import com.provectus.kafka.ui.emitter.ForwardEmitter; +import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; +import com.provectus.kafka.ui.emitter.MessageFilterStats; import com.provectus.kafka.ui.emitter.MessageFilters; import com.provectus.kafka.ui.emitter.TailingEmitter; import com.provectus.kafka.ui.exception.TopicNotFoundException; @@ -12,25 +12,23 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; import com.provectus.kafka.ui.model.MessageFilterTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; -import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; -import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serde.api.Serde; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; +import com.provectus.kafka.ui.util.ResultSizeLimiter; import com.provectus.kafka.ui.util.SslPropertiesUtil; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.apache.kafka.clients.admin.OffsetSpec; @@ -43,39 +41,21 @@ import org.apache.kafka.common.serialization.ByteArraySerializer; import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @Service +@RequiredArgsConstructor @Slf4j public class MessagesService { - private static final int DEFAULT_MAX_PAGE_SIZE = 500; - private static final int DEFAULT_PAGE_SIZE = 100; // limiting UI messages rate to 20/sec in tailing mode - private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; + public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; private final AdminClientService adminClientService; private final DeserializationService deserializationService; private final ConsumerGroupService consumerGroupService; - private final int maxPageSize; - private final int defaultPageSize; - - public MessagesService(AdminClientService adminClientService, - DeserializationService deserializationService, - ConsumerGroupService consumerGroupService, - ClustersProperties properties) { - this.adminClientService = adminClientService; - this.deserializationService = deserializationService; - this.consumerGroupService = consumerGroupService; - - var pollingProps = Optional.ofNullable(properties.getPolling()) - .orElseGet(ClustersProperties.PollingProperties::new); - this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize()) - .orElse(DEFAULT_MAX_PAGE_SIZE); - this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize()) - .orElse(DEFAULT_PAGE_SIZE); - } private Mono withExistingTopic(KafkaCluster cluster, String topicName) { return adminClientService.get(cluster) @@ -83,40 +63,6 @@ private Mono withExistingTopic(KafkaCluster cluster, String to .switchIfEmpty(Mono.error(new TopicNotFoundException())); } - public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) { - Predicate predicate; - try { - predicate = MessageFilters.createMsgFilter( - execData.getFilterCode(), - MessageFilterTypeDTO.GROOVY_SCRIPT - ); - } catch (Exception e) { - log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e); - return new SmartFilterTestExecutionResultDTO() - .error("Compilation error : " + e.getMessage()); - } - try { - var result = predicate.test( - new TopicMessageDTO() - .key(execData.getKey()) - .content(execData.getValue()) - .headers(execData.getHeaders()) - .offset(execData.getOffset()) - .partition(execData.getPartition()) - .timestamp( - Optional.ofNullable(execData.getTimestampMs()) - .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC)) - .orElse(null)) - ); - return new SmartFilterTestExecutionResultDTO() - .result(result); - } catch (Exception e) { - log.info("Smart filter {} execution error", execData, e); - return new SmartFilterTestExecutionResultDTO() - .error("Execution error : " + e.getMessage()); - } - } - public Mono deleteTopicMessages(KafkaCluster cluster, String topicName, List partitionsToInclude) { return withExistingTopic(cluster, topicName) @@ -163,7 +109,13 @@ private Mono sendMessageImpl(KafkaCluster cluster, msg.getValueSerde().get() ); - try (KafkaProducer producer = createProducer(cluster, Map.of())) { + Properties properties = new Properties(); + SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties); + properties.putAll(cluster.getProperties()); + properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); + properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + try (KafkaProducer producer = new KafkaProducer<>(properties)) { ProducerRecord producerRecord = producerRecordCreator.create( topicDescription.name(), msg.getPartition(), @@ -185,23 +137,11 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - public static KafkaProducer createProducer(KafkaCluster cluster, - Map additionalProps) { - Properties properties = new Properties(); - SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties); - properties.putAll(cluster.getProperties()); - properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); - properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - properties.putAll(additionalProps); - return new KafkaProducer<>(properties); - } - public Flux loadMessages(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, @Nullable String query, MessageFilterTypeDTO filterQueryType, - @Nullable Integer pageSize, + int limit, SeekDirectionDTO seekDirection, @Nullable String keySerde, @Nullable String valueSerde) { @@ -209,13 +149,7 @@ public Flux loadMessages(KafkaCluster cluster, String topi .flux() .publishOn(Schedulers.boundedElastic()) .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query, - filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde)); - } - - private int fixPageSize(@Nullable Integer pageSize) { - return Optional.ofNullable(pageSize) - .filter(ps -> ps > 0 && ps <= maxPageSize) - .orElse(defaultPageSize); + filterQueryType, limit, seekDirection, keySerde, valueSerde)); } private Flux loadMessagesImpl(KafkaCluster cluster, @@ -228,32 +162,142 @@ private Flux loadMessagesImpl(KafkaCluster cluster, @Nullable String keySerde, @Nullable String valueSerde) { - var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - var filter = getMsgFilter(query, filterQueryType); - var emitter = switch (seekDirection) { - case FORWARD -> new ForwardEmitter( + java.util.function.Consumer> emitter; + ConsumerRecordDeserializer recordDeserializer = + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); + if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { + emitter = new ForwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), - consumerPosition, limit, deserializer, filter, cluster.getPollingSettings() + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() ); - case BACKWARD -> new BackwardEmitter( + } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { + emitter = new BackwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), - consumerPosition, limit, deserializer, filter, cluster.getPollingSettings() + consumerPosition, + limit, + recordDeserializer, + cluster.getThrottler().get() ); - case TAILING -> new TailingEmitter( + } else { + emitter = new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), - consumerPosition, deserializer, filter, cluster.getPollingSettings() + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() ); - }; + } + MessageFilterStats filterStats = new MessageFilterStats(); return Flux.create(emitter) + .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) + .filter(getMsgFilter(query, filterQueryType, filterStats)) + .map(getDataMasker(cluster, topic)) + .takeWhile(createTakeWhilePredicate(seekDirection, limit)) .map(throttleUiPublish(seekDirection)); } - private Predicate getMsgFilter(String query, - MessageFilterTypeDTO filterQueryType) { + public Flux loadMessagesV2(KafkaCluster cluster, + String topic, + PollingModeDTO pollingMode, + @Nullable String query, + @Nullable String filterId, + int limit, + @Nullable String keySerde, + @Nullable String valueSerde) { + return withExistingTopic(cluster, topic) + .flux() + .publishOn(Schedulers.boundedElastic()) + .flatMap(td -> loadMessagesImplV2(cluster, topic, consumerPosition, query, + filterQueryType, limit, seekDirection, keySerde, valueSerde)); + } + + private Flux loadMessagesImplV2(KafkaCluster cluster, + String topic, + ConsumerPosition consumerPosition, + @Nullable String query, + MessageFilterTypeDTO filterQueryType, + int limit, + SeekDirectionDTO seekDirection, + @Nullable String keySerde, + @Nullable String valueSerde) { + + java.util.function.Consumer> emitter; + ConsumerRecordDeserializer recordDeserializer = + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); + if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { + emitter = new ForwardRecordEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() + ); + } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { + emitter = new BackwardRecordEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + limit, + recordDeserializer, + cluster.getThrottler().get() + ); + } else { + emitter = new TailingEmitter( + () -> consumerGroupService.createConsumer(cluster), + consumerPosition, + recordDeserializer, + cluster.getThrottler().get() + ); + } + MessageFilterStats filterStats = new MessageFilterStats(); + return Flux.create(emitter) + .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) + .filter(getMsgFilter(query, filterQueryType, filterStats)) + .map(getDataMasker(cluster, topic)) + .takeWhile(createTakeWhilePredicate(seekDirection, limit)) + .map(throttleUiPublish(seekDirection)); + } + + private Predicate createTakeWhilePredicate( + SeekDirectionDTO seekDirection, int limit) { + return seekDirection == SeekDirectionDTO.TAILING + ? evt -> true // no limit for tailing + : new ResultSizeLimiter(limit); + } + + private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { + var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); + var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); + return evt -> { + if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { + return evt; + } + return evt.message( + evt.getMessage() + .key(keyMasker.apply(evt.getMessage().getKey())) + .content(valMasker.apply(evt.getMessage().getContent()))); + }; + } + + private Predicate getMsgFilter(String query, + MessageFilterTypeDTO filterQueryType, + MessageFilterStats filterStats) { if (StringUtils.isEmpty(query)) { return evt -> true; } - return MessageFilters.createMsgFilter(query, filterQueryType); + var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType); + return evt -> { + // we only apply filter for message events + if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) { + try { + return messageFilter.test(evt.getMessage()); + } catch (Exception e) { + filterStats.incrementApplyErrors(); + log.trace("Error applying filter '{}' for message {}", query, evt.getMessage()); + return false; + } + } + return true; + }; } private UnaryOperator throttleUiPublish(SeekDirectionDTO seekDirection) { diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index 26831c73b33..ca85d018979 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -797,6 +797,7 @@ paths: - name: m in: query description: Messages polling mode + required: true schema: $ref: "#/components/schemas/PollingMode" - name: p @@ -821,18 +822,11 @@ paths: description: filter id, that was registered beforehand schema: type: string - - name: off - in: query - description: offset to read from / to - schema: - type: integer - format: int64 - name: offs in: query description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." schema: - type: integer - format: int64 + type: string - name: ts in: query description: timestamp (in ms) to read from / to @@ -2576,6 +2570,7 @@ components: - MESSAGE - CONSUMING - DONE + - CURSOR - EMIT_THROTTLING message: $ref: "#/components/schemas/TopicMessage" @@ -2583,6 +2578,8 @@ components: $ref: "#/components/schemas/TopicMessagePhase" consuming: $ref: "#/components/schemas/TopicMessageConsuming" + cursor: + $ref: "#/components/schemas/TopicMessageNextPageCursor" TopicMessagePhase: type: object @@ -2612,6 +2609,13 @@ components: filterApplyErrors: type: integer + TopicMessageNextPageCursor: + type: object + properties: + offsetsString: + type: string + pollingMode: + $ref: "#/components/schemas/PollingMode" TopicMessage: type: object From 01f5648ab23925d6abf5fe0888d5a9aa998cf7a9 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:18:05 +0400 Subject: [PATCH 03/29] wip --- .../ui/controller/MessagesController.java | 96 +++--------- .../kafka/ui/emitter/AbstractEmitter.java | 68 ++++++-- .../ui/emitter/BackwardRecordEmitter.java | 128 ++++++++++++++++ .../ui/emitter/ForwardRecordEmitter.java | 69 +++++++++ .../kafka/ui/emitter/MessageFilters.java | 35 ++--- .../kafka/ui/emitter/SeekOperations.java | 101 ++++++------ .../kafka/ui/model/ConsumerPosition.java | 90 ++++++++++- .../kafka/ui/service/MessagesService.java | 145 ++++++------------ .../kafka/ui/KafkaConsumerTests.java | 2 +- .../kafka/ui/emitter/SeekOperationsTest.java | 36 +++-- .../kafka/ui/emitter/TailingEmitterTest.java | 16 +- .../kafka/ui/service/MessagesServiceTest.java | 53 +------ .../kafka/ui/service/RecordEmitterTest.java | 141 ++++++++--------- .../kafka/ui/service/SendAndReadTests.java | 13 +- .../main/resources/swagger/kafka-ui-api.yaml | 11 +- 15 files changed, 571 insertions(+), 433 deletions(-) create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 9fa53beff1f..88cf0746bb6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -5,10 +5,8 @@ import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ; import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY; import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE; -import static java.util.stream.Collectors.toMap; import com.provectus.kafka.ui.api.MessagesApi; -import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.MessageFilterIdDTO; @@ -26,14 +24,11 @@ import com.provectus.kafka.ui.service.MessagesService; import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; -import java.util.Map; import java.util.Optional; import javax.annotation.Nullable; import javax.validation.Valid; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.kafka.common.TopicPartition; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.server.ServerWebExchange; @@ -86,32 +81,7 @@ public Mono>> getTopicMessages(String String keySerde, String valueSerde, ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() - .cluster(clusterName) - .topic(topicName) - .topicActions(MESSAGES_READ) - .build()); - - seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING; - seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD; - filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS; - int recordsLimit = - Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); - - var positions = new ConsumerPosition( - seekType, - topicName, - parseSeekTo(topicName, seekType, seekTo) - ); - Mono>> job = Mono.just( - ResponseEntity.ok( - messagesService.loadMessages( - getCluster(clusterName), topicName, positions, q, filterQueryType, - recordsLimit, seekDirection, keySerde, valueSerde) - ) - ); - - return validateAccess.then(job); + throw new IllegalStateException(); } @Override @@ -132,34 +102,6 @@ public Mono> sendTopicMessages( ); } - /** - * The format is [partition]::[offset] for specifying offsets - * or [partition]::[timestamp in millis] for specifying timestamps. - */ - @Nullable - private Map parseSeekTo(String topic, SeekTypeDTO seekType, List seekTo) { - if (seekTo == null || seekTo.isEmpty()) { - if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) { - return null; - } - throw new ValidationException("seekTo should be set if seekType is " + seekType); - } - return seekTo.stream() - .map(p -> { - String[] split = p.split("::"); - if (split.length != 2) { - throw new IllegalArgumentException( - "Wrong seekTo argument format. See API docs for details"); - } - - return Pair.of( - new TopicPartition(topic, Integer.parseInt(split[0])), - Long.parseLong(split[1]) - ); - }) - .collect(toMap(Pair::getKey, Pair::getValue)); - } - @Override public Mono> getSerdes(String clusterName, String topicName, @@ -197,8 +139,8 @@ public Mono>> getTopicMessagesV2(Strin @Nullable String filterId, @Nullable String offsetString, @Nullable Long ts, - @Nullable String ks, - @Nullable String vs, + @Nullable String keySerde, + @Nullable String valueSerde, ServerWebExchange exchange) { final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() .cluster(clusterName) @@ -206,6 +148,8 @@ public Mono>> getTopicMessagesV2(Strin .topicActions(MESSAGES_READ) .build()); + ConsumerPosition consumerPosition = ConsumerPosition.create(mode, topicName, partitions, ts, offsetString); + int recordsLimit = Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); @@ -213,23 +157,25 @@ public Mono>> getTopicMessagesV2(Strin Mono.just( ResponseEntity.ok( messagesService.loadMessagesV2( - getCluster(clusterName), topicName, positions, q, filterQueryType, - recordsLimit, seekDirection, keySerde, valueSerde) - ) - ) - ); + getCluster(clusterName), topicName, consumerPosition, + query, filterId, recordsLimit, keySerde, valueSerde)))); } - interface PollingMode { - static PollingMode create(PollingModeDTO mode, @Nullable String offsetString, @Nullable Long timestamp) { - return null; - } - } @Override - public Mono>> registerFilter(String clusterName, String topicName, - Mono messageFilterRegistrationDTO, - ServerWebExchange exchange) { - return null; + public Mono> registerFilter(String clusterName, + String topicName, + Mono registration, + ServerWebExchange exchange) { + + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + .cluster(clusterName) + .topic(topicName) + .topicActions(MESSAGES_READ) + .build()); + + return validateAccess.then(registration) + .map(reg -> messagesService.registerMessageFilter(reg.getFilterCode())) + .map(id -> ResponseEntity.ok(new MessageFilterIdDTO().id(id))); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index ec576a1d1a6..5dfe1b85a1b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -1,44 +1,78 @@ package com.provectus.kafka.ui.emitter; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.model.TopicMessagePhaseDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.time.Duration; +import java.time.Instant; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; -abstract class AbstractEmitter implements java.util.function.Consumer> { +public abstract class AbstractEmitter implements java.util.function.Consumer> { - private final MessagesProcessing messagesProcessing; - private final PollingSettings pollingSettings; + private final ConsumerRecordDeserializer recordDeserializer; + private final ConsumingStats consumingStats = new ConsumingStats(); + private final PollingThrottler throttler; + protected final PollingSettings pollingSettings; - protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) { - this.messagesProcessing = messagesProcessing; + protected AbstractEmitter(ConsumerRecordDeserializer recordDeserializer, PollingSettings pollingSettings) { + this.recordDeserializer = recordDeserializer; this.pollingSettings = pollingSettings; + this.throttler = pollingSettings.getPollingThrottler(); } - protected PolledRecords poll(FluxSink sink, EnhancedConsumer consumer) { - var records = consumer.pollEnhanced(pollingSettings.getPollTimeout()); - sendConsuming(sink, records); - return records; + protected ConsumerRecords poll( + FluxSink sink, Consumer consumer) { + return poll(sink, consumer, pollingSettings.getPollTimeout()); } - protected boolean sendLimitReached() { - return messagesProcessing.limitReached(); + protected ConsumerRecords poll( + FluxSink sink, Consumer consumer, Duration timeout) { + Instant start = Instant.now(); + ConsumerRecords records = consumer.poll(timeout); + Instant finish = Instant.now(); + int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis()); + throttler.throttleAfterPoll(polledBytes); + return records; } - protected void send(FluxSink sink, Iterable> records) { - messagesProcessing.send(sink, records); + protected void sendMessage(FluxSink sink, + ConsumerRecord msg) { + final TopicMessageDTO topicMessage = recordDeserializer.deserialize(msg); + sink.next( + new TopicMessageEventDTO() + .type(TopicMessageEventDTO.TypeEnum.MESSAGE) + .message(topicMessage) + ); } protected void sendPhase(FluxSink sink, String name) { - messagesProcessing.sendPhase(sink, name); + sink.next( + new TopicMessageEventDTO() + .type(TopicMessageEventDTO.TypeEnum.PHASE) + .phase(new TopicMessagePhaseDTO().name(name)) + ); } - protected void sendConsuming(FluxSink sink, PolledRecords records) { - messagesProcessing.sentConsumingInfo(sink, records); + protected int sendConsuming(FluxSink sink, + ConsumerRecords records, + long elapsed) { + return consumingStats.sendConsumingEvt(sink, records, elapsed, getFilterApplyErrors(sink)); } protected void sendFinishStatsAndCompleteSink(FluxSink sink) { - messagesProcessing.sendFinishEvent(sink); + consumingStats.sendFinishEvent(sink, getFilterApplyErrors(sink)); sink.complete(); } + + protected Number getFilterApplyErrors(FluxSink sink) { + return sink.contextView() + .getOrEmpty(MessageFilterStats.class) + .map(MessageFilterStats::getFilterApplyErrors) + .orElse(0); + } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java new file mode 100644 index 00000000000..d2ab8773e77 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -0,0 +1,128 @@ +package com.provectus.kafka.ui.emitter; + +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.TreeMap; +import java.util.function.Supplier; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.utils.Bytes; +import reactor.core.publisher.FluxSink; + +@Slf4j +public class BackwardRecordEmitter extends AbstractEmitter { + + private final Supplier> consumerSupplier; + private final ConsumerPosition consumerPosition; + private final int messagesPerPage; + + public BackwardRecordEmitter( + Supplier> consumerSupplier, + ConsumerPosition consumerPosition, + int messagesPerPage, + ConsumerRecordDeserializer recordDeserializer, + PollingSettings pollingSettings) { + super(recordDeserializer, pollingSettings); + this.consumerPosition = consumerPosition; + this.messagesPerPage = messagesPerPage; + this.consumerSupplier = consumerSupplier; + } + + @Override + public void accept(FluxSink sink) { + log.debug("Starting backward polling for {}", consumerPosition); + try (KafkaConsumer consumer = consumerSupplier.get()) { + sendPhase(sink, "Created consumer"); + + var seekOperations = SeekOperations.create(consumer, consumerPosition); + var readUntilOffsets = new TreeMap(Comparator.comparingInt(TopicPartition::partition)); + readUntilOffsets.putAll(seekOperations.getOffsetsForSeek()); + + int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size()); + log.debug("'Until' offsets for polling: {}", readUntilOffsets); + + while (!sink.isCancelled() && !readUntilOffsets.isEmpty()) { + new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> { + if (sink.isCancelled()) { + return; //fast return in case of sink cancellation + } + long beginOffset = seekOperations.getBeginOffsets().get(tp); + long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition); + + partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink) + .stream() + .filter(r -> !sink.isCancelled()) + .forEach(r -> sendMessage(sink, r)); + + if (beginOffset == readFromOffset) { + // we fully read this partition -> removing it from polling iterations + readUntilOffsets.remove(tp); + } else { + // updating 'to' offset for next polling iteration + readUntilOffsets.put(tp, readFromOffset); + } + }); + if (readUntilOffsets.isEmpty()) { + log.debug("begin reached after partitions poll iteration"); + } else if (sink.isCancelled()) { + log.debug("sink is cancelled after partitions poll iteration"); + } + } + sendFinishStatsAndCompleteSink(sink); + log.debug("Polling finished"); + } catch (InterruptException kafkaInterruptException) { + log.debug("Polling finished due to thread interruption"); + sink.complete(); + } catch (Exception e) { + log.error("Error occurred while consuming records", e); + sink.error(e); + } + } + + private List> partitionPollIteration( + TopicPartition tp, + long fromOffset, + long toOffset, + Consumer consumer, + FluxSink sink + ) { + consumer.assign(Collections.singleton(tp)); + consumer.seek(tp, fromOffset); + sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset)); + int desiredMsgsToPoll = (int) (toOffset - fromOffset); + + var recordsToSend = new ArrayList>(); + + EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); + while (!sink.isCancelled() + && recordsToSend.size() < desiredMsgsToPoll + && !emptyPolls.noDataEmptyPollsReached()) { + var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout()); + emptyPolls.count(polledRecords); + + log.debug("{} records polled from {}", polledRecords.count(), tp); + + var filteredRecords = polledRecords.records(tp).stream() + .filter(r -> r.offset() < toOffset) + .toList(); + + if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) { + // we already read all messages in target offsets interval + break; + } + recordsToSend.addAll(filteredRecords); + } + log.debug("{} records to send", recordsToSend.size()); + Collections.reverse(recordsToSend); + return recordsToSend; + } +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java new file mode 100644 index 00000000000..d60d99d76cf --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -0,0 +1,69 @@ +package com.provectus.kafka.ui.emitter; + +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.function.Supplier; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.utils.Bytes; +import reactor.core.publisher.FluxSink; + +@Slf4j +public class ForwardRecordEmitter + extends AbstractEmitter { + + private final Supplier> consumerSupplier; + private final ConsumerPosition position; + + public ForwardRecordEmitter( + Supplier> consumerSupplier, + ConsumerPosition position, + ConsumerRecordDeserializer recordDeserializer, + PollingSettings pollingSettings) { + super(recordDeserializer, pollingSettings); + this.position = position; + this.consumerSupplier = consumerSupplier; + } + + @Override + public void accept(FluxSink sink) { + log.debug("Starting forward polling for {}", position); + try (KafkaConsumer consumer = consumerSupplier.get()) { + sendPhase(sink, "Assigning partitions"); + var seekOperations = SeekOperations.create(consumer, position); + seekOperations.assignAndSeekNonEmptyPartitions(); + + EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); + while (!sink.isCancelled() + && !seekOperations.assignedPartitionsFullyPolled() + && !emptyPolls.noDataEmptyPollsReached()) { + + sendPhase(sink, "Polling"); + ConsumerRecords records = poll(sink, consumer); + emptyPolls.count(records); + + log.debug("{} records polled", records.count()); + + for (ConsumerRecord msg : records) { + if (!sink.isCancelled()) { + sendMessage(sink, msg); + } else { + break; + } + } + } + sendFinishStatsAndCompleteSink(sink); + log.debug("Polling finished"); + } catch (InterruptException kafkaInterruptException) { + log.debug("Polling finished due to thread interruption"); + sink.complete(); + } catch (Exception e) { + log.error("Error occurred while consuming records", e); + sink.error(e); + } + } +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java index 6e9f8a8bbe3..28a1b20b178 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java @@ -1,7 +1,6 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.exception.ValidationException; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import groovy.json.JsonSlurper; import java.util.function.Predicate; @@ -22,59 +21,47 @@ public class MessageFilters { private MessageFilters() { } - public static Predicate createMsgFilter(String query, MessageFilterTypeDTO type) { - switch (type) { - case STRING_CONTAINS: - return containsStringFilter(query); - case GROOVY_SCRIPT: - return groovyScriptFilter(query); - default: - throw new IllegalStateException("Unknown query type: " + type); - } - } - - static Predicate containsStringFilter(String string) { + public static Predicate containsStringFilter(String string) { return msg -> StringUtils.contains(msg.getKey(), string) || StringUtils.contains(msg.getContent(), string); } - static Predicate groovyScriptFilter(String script) { - var engine = getGroovyEngine(); - var compiledScript = compileScript(engine, script); + public static Predicate groovyScriptFilter(String script) { + var compiledScript = compileScript(script); var jsonSlurper = new JsonSlurper(); return new Predicate() { @SneakyThrows @Override public boolean test(TopicMessageDTO msg) { - var bindings = engine.createBindings(); + var bindings = getGroovyEngine().createBindings(); bindings.put("partition", msg.getPartition()); bindings.put("offset", msg.getOffset()); bindings.put("timestampMs", msg.getTimestamp().toInstant().toEpochMilli()); bindings.put("keyAsText", msg.getKey()); bindings.put("valueAsText", msg.getContent()); bindings.put("headers", msg.getHeaders()); - bindings.put("key", parseToJsonOrReturnAsIs(jsonSlurper, msg.getKey())); - bindings.put("value", parseToJsonOrReturnAsIs(jsonSlurper, msg.getContent())); + bindings.put("key", parseToJsonOrReturnNull(jsonSlurper, msg.getKey())); + bindings.put("value", parseToJsonOrReturnNull(jsonSlurper, msg.getContent())); var result = compiledScript.eval(bindings); if (result instanceof Boolean) { return (Boolean) result; } else { throw new ValidationException( - "Unexpected script result: %s, Boolean should be returned instead".formatted(result)); + String.format("Unexpected script result: %s, Boolean should be returned instead", result)); } } }; } @Nullable - private static Object parseToJsonOrReturnAsIs(JsonSlurper parser, @Nullable String str) { + private static Object parseToJsonOrReturnNull(JsonSlurper parser, @Nullable String str) { if (str == null) { return null; } try { return parser.parseText(str); } catch (Exception e) { - return str; + return null; } } @@ -87,9 +74,9 @@ private static synchronized GroovyScriptEngineImpl getGroovyEngine() { return GROOVY_ENGINE; } - private static CompiledScript compileScript(GroovyScriptEngineImpl engine, String script) { + private static CompiledScript compileScript(String script) { try { - return engine.compile(script); + return getGroovyEngine().compile(script); } catch (ScriptException e) { throw new ValidationException("Script syntax error: " + e.getMessage()); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 4de027bdb23..3d1d02345a7 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -1,71 +1,58 @@ package com.provectus.kafka.ui.emitter; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; -import javax.annotation.Nullable; import lombok.AccessLevel; import lombok.RequiredArgsConstructor; -import org.apache.commons.lang3.mutable.MutableLong; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; @RequiredArgsConstructor(access = AccessLevel.PACKAGE) -public class SeekOperations { +class SeekOperations { private final Consumer consumer; private final OffsetsInfo offsetsInfo; private final Map offsetsForSeek; //only contains non-empty partitions! - public static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { + static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { OffsetsInfo offsetsInfo; - if (consumerPosition.getSeekTo() == null) { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic()); + if (consumerPosition.partitions().isEmpty()) { + offsetsInfo = new OffsetsInfo(consumer, consumerPosition.topic()); } else { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet()); + offsetsInfo = new OffsetsInfo(consumer, consumerPosition.partitions()); } return new SeekOperations( consumer, offsetsInfo, - getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo()) + getOffsetsForSeek(consumer, offsetsInfo, consumerPosition) ); } - public void assignAndSeekNonEmptyPartitions() { + void assignAndSeekNonEmptyPartitions() { consumer.assign(offsetsForSeek.keySet()); offsetsForSeek.forEach(consumer::seek); } - public Map getBeginOffsets() { + Map getBeginOffsets() { return offsetsInfo.getBeginOffsets(); } - public Map getEndOffsets() { + Map getEndOffsets() { return offsetsInfo.getEndOffsets(); } - public boolean assignedPartitionsFullyPolled() { + boolean assignedPartitionsFullyPolled() { return offsetsInfo.assignedPartitionsFullyPolled(); } - // sum of (end - start) offsets for all partitions - public long summaryOffsetsRange() { - return offsetsInfo.summaryOffsetsRange(); - } - - // sum of differences between initial consumer seek and current consumer position (across all partitions) - public long offsetsProcessedFromSeek() { - MutableLong count = new MutableLong(); - offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset)); - return count.getValue(); - } - // Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets - public Map getOffsetsForSeek() { + Map getOffsetsForSeek() { return offsetsForSeek; } @@ -75,27 +62,33 @@ public Map getOffsetsForSeek() { @VisibleForTesting static Map getOffsetsForSeek(Consumer consumer, OffsetsInfo offsetsInfo, - SeekTypeDTO seekType, - @Nullable Map seekTo) { - switch (seekType) { - case LATEST: + ConsumerPosition position) { + switch (position.pollingMode()) { + case LATEST, TAILING: return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); - case BEGINNING: + case EARLIEST: return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); - case OFFSET: - Preconditions.checkNotNull(seekTo); - return fixOffsets(offsetsInfo, seekTo); - case TIMESTAMP: - Preconditions.checkNotNull(seekTo); - return offsetsForTimestamp(consumer, offsetsInfo, seekTo); + case FROM_OFFSET, TO_OFFSET: + Preconditions.checkNotNull(position.offsets()); + return fixOffsets(offsetsInfo, position.offsets()); + case FROM_TIMESTAMP, TO_TIMESTAMP: + Preconditions.checkNotNull(position.timestamp()); + return offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, position.timestamp()); default: throw new IllegalStateException(); } } - private static Map fixOffsets(OffsetsInfo offsetsInfo, Map offsets) { - offsets = new HashMap<>(offsets); - offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + private static Map fixOffsets(OffsetsInfo offsetsInfo, + ConsumerPosition.Offsets positionOffset) { + var offsets = new HashMap(); + if (positionOffset.offset() != null) { + offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); + } else { + Preconditions.checkNotNull(positionOffset.tpOffsets()); + offsets.putAll(positionOffset.tpOffsets()); + offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + } Map result = new HashMap<>(); offsets.forEach((tp, targetOffset) -> { @@ -112,13 +105,25 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, Map return result; } - private static Map offsetsForTimestamp(Consumer consumer, OffsetsInfo offsetsInfo, - Map timestamps) { - timestamps = new HashMap<>(timestamps); - timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); + private static Map offsetsForTimestamp(Consumer consumer, + PollingModeDTO pollingMode, + OffsetsInfo offsetsInfo, + Long timestamp) { + Map timestamps = new HashMap<>(); + offsetsInfo.getNonEmptyPartitions().forEach(tp -> timestamps.put(tp, timestamp)); - return consumer.offsetsForTimes(timestamps).entrySet().stream() - .filter(e -> e.getValue() != null) - .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())); + Map result = new HashMap<>(); + consumer.offsetsForTimes(timestamps).forEach((tp, offsetAndTimestamp) -> { + if (offsetAndTimestamp == null) { + if (pollingMode == TO_TIMESTAMP && offsetsInfo.getNonEmptyPartitions().contains(tp)) { + // if no offset was returned this means that *all* timestamps are lower + // than target timestamp. Is case of TO_OFFSET mode we need to read from the ending of tp + result.put(tp, offsetsInfo.getEndOffsets().get(tp)); + } + } else { + result.put(tp, offsetAndTimestamp.offset()); + } + }); + return result; } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 9d77923fbc6..6bc71edb5f2 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,14 +1,90 @@ package com.provectus.kafka.ui.model; +import static java.util.stream.Collectors.toMap; + +import com.provectus.kafka.ui.exception.ValidationException; +import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.annotation.Nullable; -import lombok.Value; +import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.common.TopicPartition; +import org.springframework.util.StringUtils; + + +public record ConsumerPosition(PollingModeDTO pollingMode, + String topic, + List partitions, //all partitions if list is empty + @Nullable Long timestamp, + @Nullable Offsets offsets) { + + public record Offsets(@Nullable Long offset, + @Nullable Map tpOffsets) { + } + + public static ConsumerPosition create(PollingModeDTO pollingMode, + String topic, + @Nullable List partitions, + @Nullable Long timestamp, + @Nullable String offsetsStr) { + Offsets offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + + var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) + .stream() + .map(p -> new TopicPartition(topic, p)) + .collect(Collectors.toList()); + + // if offsets are specified -inferring partitions list from there + topicPartitions = offsets.tpOffsets == null ? topicPartitions : List.copyOf(offsets.tpOffsets.keySet()); + + return new ConsumerPosition( + pollingMode, + topic, + Optional.ofNullable(topicPartitions).orElse(List.of()), + validateTimestamp(pollingMode, timestamp), + offsets + ); + } + + private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long ts) { + if (pollingMode == PollingModeDTO.FROM_TIMESTAMP || pollingMode == PollingModeDTO.TO_TIMESTAMP) { + if (ts == null) { + throw new ValidationException("timestamp not provided for " + pollingMode); + } + } + return ts; + } + + private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, + String topic, + @Nullable String offsetsStr) { + Offsets offsets = null; + if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) { + if (!StringUtils.hasText(offsetsStr)) { + throw new ValidationException("offsets not provided for " + pollingMode); + } + if (offsetsStr.contains(":")) { + offsets = new Offsets(Long.parseLong(offsetsStr), null); + } else { + Map tpOffsets = Stream.of(offsetsStr.split(",")) + .map(p -> { + String[] split = p.split(":"); + if (split.length != 2) { + throw new IllegalArgumentException( + "Wrong seekTo argument format. See API docs for details"); + } + return Pair.of( + new TopicPartition(topic, Integer.parseInt(split[0])), + Long.parseLong(split[1]) + ); + }) + .collect(toMap(Pair::getKey, Pair::getValue)); + offsets = new Offsets(null, tpOffsets); + } + } + return offsets; + } -@Value -public class ConsumerPosition { - SeekTypeDTO seekType; - String topic; - @Nullable - Map seekTo; // null if positioning should apply to all tps } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index b92f0b6f2a9..722f0997217 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -5,32 +5,33 @@ import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; import com.provectus.kafka.ui.emitter.MessageFilterStats; import com.provectus.kafka.ui.emitter.MessageFilters; +import com.provectus.kafka.ui.emitter.ResultSizeLimiter; import com.provectus.kafka.ui.emitter.TailingEmitter; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; -import com.provectus.kafka.ui.util.ResultSizeLimiter; import com.provectus.kafka.ui.util.SslPropertiesUtil; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import javax.annotation.Nullable; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.producer.KafkaProducer; @@ -41,7 +42,6 @@ import org.apache.kafka.common.serialization.ByteArraySerializer; import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; -import reactor.core.publisher.FluxSink; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @@ -57,6 +57,8 @@ public class MessagesService { private final DeserializationService deserializationService; private final ConsumerGroupService consumerGroupService; + private final Map> registeredFilters = new ConcurrentHashMap<>(); + private Mono withExistingTopic(KafkaCluster cluster, String topicName) { return adminClientService.get(cluster) .flatMap(client -> client.describeTopic(topicName)) @@ -137,69 +139,9 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - public Flux loadMessages(KafkaCluster cluster, String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - int limit, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - return withExistingTopic(cluster, topic) - .flux() - .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query, - filterQueryType, limit, seekDirection, keySerde, valueSerde)); - } - - private Flux loadMessagesImpl(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - MessageFilterTypeDTO filterQueryType, - int limit, - SeekDirectionDTO seekDirection, - @Nullable String keySerde, - @Nullable String valueSerde) { - - java.util.function.Consumer> emitter; - ConsumerRecordDeserializer recordDeserializer = - deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - recordDeserializer, - cluster.getThrottler().get() - ); - } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { - emitter = new BackwardRecordEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - limit, - recordDeserializer, - cluster.getThrottler().get() - ); - } else { - emitter = new TailingEmitter( - () -> consumerGroupService.createConsumer(cluster), - consumerPosition, - recordDeserializer, - cluster.getThrottler().get() - ); - } - MessageFilterStats filterStats = new MessageFilterStats(); - return Flux.create(emitter) - .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterQueryType, filterStats)) - .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(seekDirection, limit)) - .map(throttleUiPublish(seekDirection)); - } - public Flux loadMessagesV2(KafkaCluster cluster, String topic, - PollingModeDTO pollingMode, + ConsumerPosition position, @Nullable String query, @Nullable String filterId, int limit, @@ -208,58 +150,55 @@ public Flux loadMessagesV2(KafkaCluster cluster, return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImplV2(cluster, topic, consumerPosition, query, - filterQueryType, limit, seekDirection, keySerde, valueSerde)); + .flatMap(td -> loadMessagesImplV2(cluster, topic, position, query, filterId, limit, keySerde, valueSerde)); } private Flux loadMessagesImplV2(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, @Nullable String query, - MessageFilterTypeDTO filterQueryType, + @Nullable String filterId, int limit, - SeekDirectionDTO seekDirection, @Nullable String keySerde, @Nullable String valueSerde) { - java.util.function.Consumer> emitter; ConsumerRecordDeserializer recordDeserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); - if (seekDirection.equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( + + var emitter = switch (consumerPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, + limit, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) { - emitter = new BackwardRecordEmitter( + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - limit, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } else { - emitter = new TailingEmitter( + case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, recordDeserializer, - cluster.getThrottler().get() + cluster.getPollingSettings() ); - } + }; + MessageFilterStats filterStats = new MessageFilterStats(); return Flux.create(emitter) .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterQueryType, filterStats)) + .filter(getMsgFilter(query, filterId, filterStats)) .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(seekDirection, limit)) - .map(throttleUiPublish(seekDirection)); + .takeWhile(createTakeWhilePredicate(consumerPosition.pollingMode(), limit)) + .map(throttleUiPublish(consumerPosition.pollingMode())); } private Predicate createTakeWhilePredicate( - SeekDirectionDTO seekDirection, int limit) { - return seekDirection == SeekDirectionDTO.TAILING + PollingModeDTO pollingMode, int limit) { + return pollingMode == PollingModeDTO.TAILING ? evt -> true // no limit for tailing : new ResultSizeLimiter(limit); } @@ -278,21 +217,35 @@ private UnaryOperator getDataMasker(KafkaCluster cluster, }; } - private Predicate getMsgFilter(String query, - MessageFilterTypeDTO filterQueryType, + public String registerMessageFilter(String groovyCode) { + var filter = MessageFilters.groovyScriptFilter(groovyCode); + var id = RandomStringUtils.random(10, true, true); + registeredFilters.put(id, filter); + return id; + } + + private Predicate getMsgFilter(@Nullable String containsStrFilter, + @Nullable String filterId, MessageFilterStats filterStats) { - if (StringUtils.isEmpty(query)) { - return evt -> true; + Predicate messageFilter = e -> true; + if (containsStrFilter != null) { + messageFilter = MessageFilters.containsStringFilter(containsStrFilter); + } + if (filterId != null) { + messageFilter = registeredFilters.get(filterId); + if (messageFilter == null) { + throw new ValidationException("No filter was registered with id " + filterId); + } } - var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType); + Predicate finalMessageFilter = messageFilter; return evt -> { // we only apply filter for message events if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) { try { - return messageFilter.test(evt.getMessage()); + return finalMessageFilter.test(evt.getMessage()); } catch (Exception e) { filterStats.incrementApplyErrors(); - log.trace("Error applying filter '{}' for message {}", query, evt.getMessage()); + log.trace("Error applying filter for message {}", evt.getMessage()); return false; } } @@ -300,8 +253,8 @@ private Predicate getMsgFilter(String query, }; } - private UnaryOperator throttleUiPublish(SeekDirectionDTO seekDirection) { - if (seekDirection == SeekDirectionDTO.TAILING) { + private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { + if (pollingMode == PollingModeDTO.TAILING) { RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE); return m -> { rateLimiter.acquire(1); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java index ff11aa6656a..b925ea607f2 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java @@ -56,7 +56,7 @@ public void shouldDeleteRecords() { } long count = webTestClient.get() - .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName) + .uri("/api/clusters/{clusterName}/topics/{topicName}/messages/v2?m=EARLIEST", LOCAL, topicName) .accept(TEXT_EVENT_STREAM) .exchange() .expectStatus() diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java index affa423123c..79bda501740 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java @@ -2,7 +2,9 @@ import static org.assertj.core.api.Assertions.assertThat; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -14,6 +16,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; class SeekOperationsTest { @@ -45,8 +49,7 @@ void latest() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.LATEST, - null + new ConsumerPosition(PollingModeDTO.LATEST, topic, null, null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L)); } @@ -56,33 +59,38 @@ void beginning() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.BEGINNING, - null + new ConsumerPosition(PollingModeDTO.EARLIEST, topic, null, null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L)); } - @Test - void offsets() { + @ParameterizedTest + @CsvSource({"TO_OFFSET", "FROM_OFFSET"}) + void offsets(PollingModeDTO mode) { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.OFFSET, - Map.of(tp1, 10L, tp2, 10L, tp3, 26L) + new ConsumerPosition( + mode, topic, List.of(tp1, tp2, tp3), null, + new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 10L, tp3, 26L)) + ) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L)); } - @Test - void offsetsWithBoundsFixing() { + @ParameterizedTest + @CsvSource({"TO_OFFSET", "FROM_OFFSET"}) + void offsetsWithBoundsFixing(PollingModeDTO mode) { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - SeekTypeDTO.OFFSET, - Map.of(tp1, 10L, tp2, 21L, tp3, 24L) + new ConsumerPosition( + mode, topic, List.of(tp1, tp2, tp3), null, + new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 21L, tp3, 24L)) + ) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L)); } } -} \ No newline at end of file +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java index 2798bd213fe..6585ba840ea 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java @@ -4,10 +4,9 @@ import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.builtin.StringSerde; import com.provectus.kafka.ui.service.ClustersStorage; import com.provectus.kafka.ui.service.MessagesService; import java.time.Duration; @@ -110,14 +109,13 @@ private Flux createTailingFlux( .get(); return applicationContext.getBean(MessagesService.class) - .loadMessages(cluster, topicName, - new ConsumerPosition(SeekTypeDTO.LATEST, topic, null), + .loadMessagesV2(cluster, topicName, + new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null), query, - MessageFilterTypeDTO.STRING_CONTAINS, + null, 0, - SeekDirectionDTO.TAILING, - "String", - "String"); + StringSerde.name(), + StringSerde.name()); } private List startTailing(String filterQuery) { diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index cb50c0eb818..fa2ad19aace 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -1,22 +1,16 @@ package com.provectus.kafka.ui.service; -import static com.provectus.kafka.ui.service.MessagesService.execSmartFilterTest; -import static org.assertj.core.api.Assertions.assertThat; - import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; -import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serdes.builtin.StringSerde; import java.util.List; -import java.util.Map; import java.util.UUID; import org.apache.kafka.clients.admin.NewTopic; import org.junit.jupiter.api.BeforeEach; @@ -60,7 +54,9 @@ void sendMessageReturnsExceptionWhenTopicNotFound() { @Test void loadMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService - .loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String")) + .loadMessagesV2(cluster, NON_EXISTING_TOPIC, + new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null), + null, null, 1, "String", "String")) .expectError(TopicNotFoundException.class) .verify(); } @@ -73,14 +69,13 @@ void maskingAppliedOnConfiguredClusters() throws Exception { producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); - Flux msgsFlux = messagesService.loadMessages( + Flux msgsFlux = messagesService.loadMessagesV2( cluster, testTopic, - new ConsumerPosition(SeekTypeDTO.BEGINNING, testTopic, null), + new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), null, null, 100, - SeekDirectionDTO.FORWARD, StringSerde.name(), StringSerde.name() ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) @@ -96,40 +91,4 @@ void maskingAppliedOnConfiguredClusters() throws Exception { } } - @Test - void execSmartFilterTestReturnsExecutionResult() { - var params = new SmartFilterTestExecutionDTO() - .filterCode("key != null && value != null && headers != null && timestampMs != null && offset != null") - .key("1234") - .value("{ \"some\" : \"value\" } ") - .headers(Map.of("h1", "hv1")) - .offset(12345L) - .timestampMs(System.currentTimeMillis()) - .partition(1); - assertThat(execSmartFilterTest(params).getResult()).isTrue(); - - params.setFilterCode("return false"); - assertThat(execSmartFilterTest(params).getResult()).isFalse(); - } - - @Test - void execSmartFilterTestReturnsErrorOnFilterApplyError() { - var result = execSmartFilterTest( - new SmartFilterTestExecutionDTO() - .filterCode("return 1/0") - ); - assertThat(result.getResult()).isNull(); - assertThat(result.getError()).containsIgnoringCase("execution error"); - } - - @Test - void execSmartFilterTestReturnsErrorOnFilterCompilationError() { - var result = execSmartFilterTest( - new SmartFilterTestExecutionDTO() - .filterCode("this is invalid groovy syntax = 1") - ); - assertThat(result.getResult()).isNull(); - assertThat(result.getError()).containsIgnoringCase("Compilation error"); - } - } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index 2a9fa76f136..1474a034a84 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -1,26 +1,25 @@ package com.provectus.kafka.ui.service; -import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING; -import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST; -import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET; -import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_OFFSET; +import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_TIMESTAMP; +import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_OFFSET; +import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.AbstractIntegrationTest; -import com.provectus.kafka.ui.emitter.BackwardEmitter; -import com.provectus.kafka.ui.emitter.EnhancedConsumer; -import com.provectus.kafka.ui.emitter.ForwardEmitter; +import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; import com.provectus.kafka.ui.emitter.PollingSettings; -import com.provectus.kafka.ui.emitter.PollingThrottler; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.model.ConsumerPosition.Offsets; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.PropertyResolverImpl; import com.provectus.kafka.ui.serdes.builtin.StringSerde; -import com.provectus.kafka.ui.util.ApplicationMetrics; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; @@ -31,15 +30,17 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.Collectors; import lombok.Value; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.internals.RecordHeader; +import org.apache.kafka.common.serialization.BytesDeserializer; +import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -57,16 +58,16 @@ class RecordEmitterTest extends AbstractIntegrationTest { static final String EMPTY_TOPIC = TOPIC + "_empty"; static final List SENT_RECORDS = new ArrayList<>(); static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer(); - static final Predicate NOOP_FILTER = m -> true; @BeforeAll static void generateMsgs() throws Exception { createTopic(new NewTopic(TOPIC, PARTITIONS, (short) 1)); createTopic(new NewTopic(EMPTY_TOPIC, PARTITIONS, (short) 1)); + long startTs = System.currentTimeMillis(); try (var producer = KafkaTestProducer.forKafka(kafka)) { for (int partition = 0; partition < PARTITIONS; partition++) { for (int i = 0; i < MSGS_PER_PARTITION; i++) { - long ts = System.currentTimeMillis() + i; + long ts = (startTs += 100); var value = "msg_" + partition + "_" + i; var metadata = producer.send( new ProducerRecord<>( @@ -93,7 +94,6 @@ static void generateMsgs() throws Exception { static void cleanup() { deleteTopic(TOPIC); deleteTopic(EMPTY_TOPIC); - SENT_RECORDS.clear(); } private static ConsumerRecordDeserializer createRecordsDeserializer() { @@ -106,28 +106,24 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { s.deserializer(null, Serde.Target.VALUE), StringSerde.name(), s.deserializer(null, Serde.Target.KEY), - s.deserializer(null, Serde.Target.VALUE), - msg -> msg + s.deserializer(null, Serde.Target.VALUE) ); } @Test void pollNothingOnEmptyTopic() { - var forwardEmitter = new ForwardEmitter( + var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null), - 100, + new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); - var backwardEmitter = new BackwardEmitter( + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null), + new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); @@ -146,21 +142,18 @@ void pollNothingOnEmptyTopic() { @Test void pollFullTopicFromBeginning() { - var forwardEmitter = new ForwardEmitter( + var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(BEGINNING, TOPIC, null), - PARTITIONS * MSGS_PER_PARTITION, + new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); - var backwardEmitter = new BackwardEmitter( + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(LATEST, TOPIC, null), + new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); @@ -178,21 +171,20 @@ void pollWithOffsets() { targetOffsets.put(new TopicPartition(TOPIC, i), offset); } - var forwardEmitter = new ForwardEmitter( + var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), - PARTITIONS * MSGS_PER_PARTITION, + new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); - var backwardEmitter = new BackwardEmitter( + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); @@ -213,50 +205,40 @@ void pollWithOffsets() { @Test void pollWithTimestamps() { - Map targetTimestamps = new HashMap<>(); - final Map> perPartition = - SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp)); - for (int i = 0; i < PARTITIONS; i++) { - final List records = perPartition.get(new TopicPartition(TOPIC, i)); - int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size()); - log.info("partition: {} position: {}", i, randRecordIdx); - targetTimestamps.put( - new TopicPartition(TOPIC, i), - records.get(randRecordIdx).getTimestamp() - ); - } + var tsStats = SENT_RECORDS.stream().mapToLong(Record::getTimestamp).summaryStatistics(); + //choosing ts in the middle + long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2); - var forwardEmitter = new ForwardEmitter( + var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, - new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps), - PARTITIONS * MSGS_PER_PARTITION, + new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); - var backwardEmitter = new BackwardEmitter( + expectEmitter( + forwardEmitter, + SENT_RECORDS.stream() + .filter(r -> r.getTimestamp() >= targetTimestamp) + .map(Record::getValue) + .collect(Collectors.toList()) + ); + + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps), + new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); - var expectedValues = SENT_RECORDS.stream() - .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp())) - .map(Record::getValue) - .collect(Collectors.toList()); - - expectEmitter(forwardEmitter, expectedValues); - - expectedValues = SENT_RECORDS.stream() - .filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp())) - .map(Record::getValue) - .collect(Collectors.toList()); - - expectEmitter(backwardEmitter, expectedValues); + expectEmitter( + backwardEmitter, + SENT_RECORDS.stream() + .filter(r -> r.getTimestamp() < targetTimestamp) + .map(Record::getValue) + .collect(Collectors.toList()) + ); } @Test @@ -267,12 +249,12 @@ void backwardEmitterSeekToEnd() { targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION); } - var backwardEmitter = new BackwardEmitter( + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, targetOffsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, + new Offsets(null, targetOffsets)), numMessages, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); @@ -294,12 +276,11 @@ void backwardEmitterSeekToBegin() { offsets.put(new TopicPartition(TOPIC, i), 0L); } - var backwardEmitter = new BackwardEmitter( + var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, - new ConsumerPosition(OFFSET, TOPIC, offsets), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), 100, RECORD_DESERIALIZER, - NOOP_FILTER, PollingSettings.createDefault() ); @@ -339,20 +320,22 @@ private void expectEmitter( assertionsConsumer.accept(step.expectComplete().verifyThenAssertThat()); } - private EnhancedConsumer createConsumer() { + private KafkaConsumer createConsumer() { return createConsumer(Map.of()); } - private EnhancedConsumer createConsumer(Map properties) { + private KafkaConsumer createConsumer(Map properties) { final Map map = Map.of( ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), - ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19 // to check multiple polls + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class ); Properties props = new Properties(); props.putAll(map); props.putAll(properties); - return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop()); + return new KafkaConsumer<>(props); } @Value diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java index 78c111cdd19..c8a8adef2f7 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java @@ -7,8 +7,7 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.SeekTypeDTO; +import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serdes.builtin.Int32Serde; @@ -20,6 +19,7 @@ import io.confluent.kafka.schemaregistry.json.JsonSchema; import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema; import java.time.Duration; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.UUID; @@ -497,18 +497,13 @@ public void doAssert(Consumer msgAssert) { String topic = createTopicAndCreateSchemas(); try { messagesService.sendMessage(targetCluster, topic, msgToSend).block(); - TopicMessageDTO polled = messagesService.loadMessages( + TopicMessageDTO polled = messagesService.loadMessagesV2( targetCluster, topic, - new ConsumerPosition( - SeekTypeDTO.BEGINNING, - topic, - Map.of(new TopicPartition(topic, 0), 0L) - ), + new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null), null, null, 1, - SeekDirectionDTO.FORWARD, msgToSend.getKeySerde().get(), msgToSend.getValueSerde().get() ).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index ca85d018979..2fa79b8b7d0 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -772,9 +772,7 @@ paths: content: application/json: schema: - type: array - items: - $ref: '#/components/schemas/MessageFilterId' + $ref: '#/components/schemas/MessageFilterId' /api/clusters/{clusterName}/topics/{topicName}/messages/v2: @@ -804,7 +802,7 @@ paths: in: query schema: type: array - description: List of target partitions( all partitions if not provided) + description: List of target partitions (all partitions if not provided) items: type: integer - name: lim @@ -824,7 +822,7 @@ paths: type: string - name: offs in: query - description: partition offsets to read from / to. Format is "p1:off1,p2:off2,..." + description: partition offsets to read from / to. Format is "p1:offset1,p2:offset2,...". schema: type: string - name: ts @@ -2571,7 +2569,6 @@ components: - CONSUMING - DONE - CURSOR - - EMIT_THROTTLING message: $ref: "#/components/schemas/TopicMessage" phase: @@ -2708,7 +2705,7 @@ components: - FROM_TIMESTAMP - TO_TIMESTAMP - LATEST - - FIRST + - EARLIEST - TAILING MessageFilterType: From e43e62bcb7012225c977938e7ce4eff57cb25959 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:21:37 +0400 Subject: [PATCH 04/29] wip --- documentation/compose/kafka-ui.yaml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/documentation/compose/kafka-ui.yaml b/documentation/compose/kafka-ui.yaml index c5843b4bd5c..14a269ca7cb 100644 --- a/documentation/compose/kafka-ui.yaml +++ b/documentation/compose/kafka-ui.yaml @@ -20,6 +20,11 @@ services: KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085 KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083 + KAFKA_CLUSTERS_1_NAME: secondLocal + KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092 + KAFKA_CLUSTERS_1_METRICS_PORT: 9998 + KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085 + DYNAMIC_CONFIG_ENABLED: 'true' kafka0: image: confluentinc/cp-kafka:7.2.1 @@ -40,7 +45,7 @@ services: KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_NODE_ID: 1 - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093,2@kafka1:29093' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' @@ -57,7 +62,7 @@ services: - "9093:9092" - "9998:9998" environment: - KAFKA_BROKER_ID: 2 + KAFKA_BROKER_ID: 1 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 @@ -67,8 +72,8 @@ services: KAFKA_JMX_PORT: 9998 KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998 KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_NODE_ID: 2 - KAFKA_CONTROLLER_QUORUM_VOTERS: '2@kafka1:29093,1@kafka0:29093' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093' KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092' KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' From 1037e41889691ab129ed1cd2fbaa9080c878d3bb Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:29:48 +0400 Subject: [PATCH 05/29] wip --- .../kafka/ui/model/ConsumerPosition.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 6bc71edb5f2..666278d26fe 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -20,7 +20,8 @@ public record ConsumerPosition(PollingModeDTO pollingMode, @Nullable Long timestamp, @Nullable Offsets offsets) { - public record Offsets(@Nullable Long offset, + // one of properties will be null + public record Offsets(@Nullable Long offset, //should be applied to all partitions @Nullable Map tpOffsets) { } @@ -29,20 +30,22 @@ public static ConsumerPosition create(PollingModeDTO pollingMode, @Nullable List partitions, @Nullable Long timestamp, @Nullable String offsetsStr) { - Offsets offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + @Nullable var offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) .stream() .map(p -> new TopicPartition(topic, p)) .collect(Collectors.toList()); - // if offsets are specified -inferring partitions list from there - topicPartitions = offsets.tpOffsets == null ? topicPartitions : List.copyOf(offsets.tpOffsets.keySet()); + // if offsets are specified - inferring partitions list from there + topicPartitions = (offsets != null && offsets.tpOffsets() != null) + ? List.copyOf(offsets.tpOffsets().keySet()) + : topicPartitions; return new ConsumerPosition( pollingMode, topic, - Optional.ofNullable(topicPartitions).orElse(List.of()), + topicPartitions, validateTimestamp(pollingMode, timestamp), offsets ); @@ -65,7 +68,7 @@ private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, if (!StringUtils.hasText(offsetsStr)) { throw new ValidationException("offsets not provided for " + pollingMode); } - if (offsetsStr.contains(":")) { + if (!offsetsStr.contains(":")) { offsets = new Offsets(Long.parseLong(offsetsStr), null); } else { Map tpOffsets = Stream.of(offsetsStr.split(",")) From 65f60b9edb3d158edb4a09c33bd34b34fb3f9cba Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 22 Mar 2023 23:52:50 +0400 Subject: [PATCH 06/29] wip --- .../ui/emitter/ForwardRecordEmitter.java | 2 +- .../kafka/ui/emitter/OffsetsInfo.java | 22 +++++++------- .../kafka/ui/emitter/SeekOperations.java | 26 ++++++----------- .../kafka/ui/emitter/TailingEmitter.java | 29 +++++++------------ .../kafka/ui/emitter/SeekOperationsTest.java | 17 +++++++++-- 5 files changed, 46 insertions(+), 50 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index d60d99d76cf..94b6ce236be 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -35,7 +35,7 @@ public void accept(FluxSink sink) { try (KafkaConsumer consumer = consumerSupplier.get()) { sendPhase(sink, "Assigning partitions"); var seekOperations = SeekOperations.create(consumer, position); - seekOperations.assignAndSeekNonEmptyPartitions(); + seekOperations.assignAndSeek(); EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java index 85802724178..b8c31c0af56 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java @@ -1,19 +1,20 @@ package com.provectus.kafka.ui.emitter; import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; import java.util.Collection; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import lombok.Getter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.mutable.MutableLong; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; @Slf4j @Getter -class OffsetsInfo { +public class OffsetsInfo { private final Consumer consumer; @@ -23,15 +24,16 @@ class OffsetsInfo { private final Set nonEmptyPartitions = new HashSet<>(); private final Set emptyPartitions = new HashSet<>(); - OffsetsInfo(Consumer consumer, String topic) { + public OffsetsInfo(Consumer consumer, String topic) { this(consumer, consumer.partitionsFor(topic).stream() .map(pi -> new TopicPartition(topic, pi.partition())) - .toList() + .collect(Collectors.toList()) ); } - OffsetsInfo(Consumer consumer, Collection targetPartitions) { + public OffsetsInfo(Consumer consumer, + Collection targetPartitions) { this.consumer = consumer; this.beginOffsets = consumer.beginningOffsets(targetPartitions); this.endOffsets = consumer.endOffsets(targetPartitions); @@ -45,8 +47,8 @@ class OffsetsInfo { }); } - boolean assignedPartitionsFullyPolled() { - for (var tp : consumer.assignment()) { + public boolean assignedPartitionsFullyPolled() { + for (var tp: consumer.assignment()) { Preconditions.checkArgument(endOffsets.containsKey(tp)); if (endOffsets.get(tp) > consumer.position(tp)) { return false; @@ -55,10 +57,8 @@ boolean assignedPartitionsFullyPolled() { return true; } - long summaryOffsetsRange() { - MutableLong cnt = new MutableLong(); - nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp))); - return cnt.getValue(); + public Set allTargetPartitions() { + return Sets.union(nonEmptyPartitions, emptyPartitions); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 3d1d02345a7..44f727b20e9 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -21,20 +21,14 @@ class SeekOperations { private final Map offsetsForSeek; //only contains non-empty partitions! static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { - OffsetsInfo offsetsInfo; - if (consumerPosition.partitions().isEmpty()) { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.topic()); - } else { - offsetsInfo = new OffsetsInfo(consumer, consumerPosition.partitions()); - } - return new SeekOperations( - consumer, - offsetsInfo, - getOffsetsForSeek(consumer, offsetsInfo, consumerPosition) - ); + OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty() + ? new OffsetsInfo(consumer, consumerPosition.topic()) + : new OffsetsInfo(consumer, consumerPosition.partitions()); + var offsetsToSeek = getOffsetsForSeek(consumer, offsetsInfo, consumerPosition); + return new SeekOperations(consumer, offsetsInfo, offsetsToSeek); } - void assignAndSeekNonEmptyPartitions() { + void assignAndSeek() { consumer.assign(offsetsForSeek.keySet()); offsetsForSeek.forEach(consumer::seek); } @@ -43,10 +37,6 @@ Map getBeginOffsets() { return offsetsInfo.getBeginOffsets(); } - Map getEndOffsets() { - return offsetsInfo.getEndOffsets(); - } - boolean assignedPartitionsFullyPolled() { return offsetsInfo.assignedPartitionsFullyPolled(); } @@ -64,7 +54,9 @@ static Map getOffsetsForSeek(Consumer consumer, OffsetsInfo offsetsInfo, ConsumerPosition position) { switch (position.pollingMode()) { - case LATEST, TAILING: + case TAILING: + return consumer.endOffsets(offsetsInfo.allTargetPartitions()); + case LATEST: return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); case EARLIEST: return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index c3f04fe8cc2..dee522b01ea 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -1,28 +1,26 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; -import java.util.HashMap; -import java.util.function.Predicate; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @Slf4j public class TailingEmitter extends AbstractEmitter { - private final Supplier consumerSupplier; + private final Supplier> consumerSupplier; private final ConsumerPosition consumerPosition; - public TailingEmitter(Supplier consumerSupplier, + public TailingEmitter(Supplier> consumerSupplier, ConsumerPosition consumerPosition, - ConsumerRecordDeserializer deserializer, - Predicate filter, + ConsumerRecordDeserializer recordDeserializer, PollingSettings pollingSettings) { - super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings); + super(recordDeserializer, pollingSettings); this.consumerSupplier = consumerSupplier; this.consumerPosition = consumerPosition; } @@ -30,12 +28,13 @@ public TailingEmitter(Supplier consumerSupplier, @Override public void accept(FluxSink sink) { log.debug("Starting tailing polling for {}", consumerPosition); - try (EnhancedConsumer consumer = consumerSupplier.get()) { - assignAndSeek(consumer); + try (KafkaConsumer consumer = consumerSupplier.get()) { + SeekOperations.create(consumer, consumerPosition) + .assignAndSeek(); while (!sink.isCancelled()) { sendPhase(sink, "Polling"); var polled = poll(sink, consumer); - send(sink, polled); + polled.forEach(r -> sendMessage(sink, r)); } sink.complete(); log.debug("Tailing finished"); @@ -48,12 +47,4 @@ public void accept(FluxSink sink) { } } - private void assignAndSeek(EnhancedConsumer consumer) { - var seekOperations = SeekOperations.create(consumer, consumerPosition); - var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end - seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions - consumer.assign(seekOffsets.keySet()); - seekOffsets.forEach(consumer::seek); - } - } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java index 79bda501740..e288e77a113 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java @@ -1,5 +1,8 @@ package com.provectus.kafka.ui.emitter; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST; +import static com.provectus.kafka.ui.model.PollingModeDTO.TAILING; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.model.ConsumerPosition; @@ -44,12 +47,22 @@ void initMockConsumer() { @Nested class GetOffsetsForSeek { + @Test + void tailing() { + var offsets = SeekOperations.getOffsetsForSeek( + consumer, + new OffsetsInfo(consumer, topic), + new ConsumerPosition(TAILING, topic, List.of(), null, null) + ); + assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L)); + } + @Test void latest() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - new ConsumerPosition(PollingModeDTO.LATEST, topic, null, null, null) + new ConsumerPosition(LATEST, topic, List.of(), null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L)); } @@ -59,7 +72,7 @@ void beginning() { var offsets = SeekOperations.getOffsetsForSeek( consumer, new OffsetsInfo(consumer, topic), - new ConsumerPosition(PollingModeDTO.EARLIEST, topic, null, null, null) + new ConsumerPosition(EARLIEST, topic, List.of(), null, null) ); assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L)); } From e6f94c916878f0a15b90f304d16f20acac002596 Mon Sep 17 00:00:00 2001 From: iliax Date: Thu, 23 Mar 2023 00:01:07 +0400 Subject: [PATCH 07/29] wip --- .../java/com/provectus/kafka/ui/emitter/MessageFilters.java | 4 ++++ .../java/com/provectus/kafka/ui/service/MessagesService.java | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java index 28a1b20b178..f109289fe49 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java @@ -21,6 +21,10 @@ public class MessageFilters { private MessageFilters() { } + public static Predicate noop() { + return e -> true; + } + public static Predicate containsStringFilter(String string) { return msg -> StringUtils.contains(msg.getKey(), string) || StringUtils.contains(msg.getContent(), string); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 722f0997217..11845bd0c05 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; @@ -227,7 +226,7 @@ public String registerMessageFilter(String groovyCode) { private Predicate getMsgFilter(@Nullable String containsStrFilter, @Nullable String filterId, MessageFilterStats filterStats) { - Predicate messageFilter = e -> true; + Predicate messageFilter = MessageFilters.noop(); if (containsStrFilter != null) { messageFilter = MessageFilters.containsStringFilter(containsStrFilter); } From 05321fd335afb4fd48ef263306684086a9c27470 Mon Sep 17 00:00:00 2001 From: iliax Date: Thu, 23 Mar 2023 14:43:19 +0400 Subject: [PATCH 08/29] wip --- kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index 2fa79b8b7d0..f43c4f622d9 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -2568,7 +2568,6 @@ components: - MESSAGE - CONSUMING - DONE - - CURSOR message: $ref: "#/components/schemas/TopicMessage" phase: From ebae7730439b96842d63b665771fcae96b9e5cc6 Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 20:00:45 +0400 Subject: [PATCH 09/29] api updates --- .../ui/controller/MessagesController.java | 78 ++++--- .../kafka/ui/emitter/AbstractEmitter.java | 48 ++-- .../ui/emitter/BackwardRecordEmitter.java | 24 +- .../kafka/ui/emitter/ConsumingStats.java | 48 ++-- .../provectus/kafka/ui/emitter/Cursor.java | 69 ++++++ .../ui/emitter/ForwardRecordEmitter.java | 30 ++- .../kafka/ui/emitter/MessagesProcessing.java | 100 +++----- .../kafka/ui/emitter/SeekOperations.java | 28 +-- .../kafka/ui/model/ConsumerPosition.java | 37 +-- .../ui/serdes/ConsumerRecordDeserializer.java | 18 +- .../kafka/ui/service/MessagesService.java | 216 +++++++++++------- .../ui/service/PollingCursorsStorage.java | 25 ++ .../kafka/ui/emitter/TailingEmitterTest.java | 2 +- .../kafka/ui/service/MessagesServiceTest.java | 4 +- .../kafka/ui/service/RecordEmitterTest.java | 58 +++-- .../kafka/ui/service/SendAndReadTests.java | 2 +- .../main/resources/swagger/kafka-ui-api.yaml | 32 +-- 17 files changed, 465 insertions(+), 354 deletions(-) create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java create mode 100644 kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index 88cf0746bb6..fed0544fff6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -25,8 +25,8 @@ import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; import java.util.Optional; -import javax.annotation.Nullable; import javax.validation.Valid; +import javax.validation.ValidationException; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.http.ResponseEntity; @@ -41,9 +41,6 @@ @Slf4j public class MessagesController extends AbstractController implements MessagesApi { - private static final int MAX_LOAD_RECORD_LIMIT = 100; - private static final int DEFAULT_LOAD_RECORD_LIMIT = 20; - private final MessagesService messagesService; private final DeserializationService deserializationService; private final AccessControlService accessControlService; @@ -81,7 +78,45 @@ public Mono>> getTopicMessages(String String keySerde, String valueSerde, ServerWebExchange exchange) { - throw new IllegalStateException(); + throw new ValidationException("Not supported"); + } + + + @Override + public Mono>> getTopicMessagesV2(String clusterName, String topicName, + PollingModeDTO mode, + List partitions, + Integer limit, + String stringFilter, + String smartFilterId, + Long offset, + Long timestamp, + String keySerde, + String valueSerde, + String cursor, + ServerWebExchange exchange) { + final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + .cluster(clusterName) + .topic(topicName) + .topicActions(MESSAGES_READ) + .build()); + + Flux messagesFlux; + if (cursor != null) { + messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor); + } else { + messagesFlux = messagesService.loadMessages( + getCluster(clusterName), + topicName, + ConsumerPosition.create(mode, topicName, partitions, timestamp, offset), + stringFilter, + smartFilterId, + limit, + keySerde, + valueSerde + ); + } + return validateAccess.then(Mono.just(ResponseEntity.ok(messagesFlux))); } @Override @@ -129,39 +164,6 @@ public Mono> getSerdes(String clusterNam ); } - - @Override - public Mono>> getTopicMessagesV2(String clusterName, String topicName, - PollingModeDTO mode, - @Nullable List partitions, - @Nullable Integer limit, - @Nullable String query, - @Nullable String filterId, - @Nullable String offsetString, - @Nullable Long ts, - @Nullable String keySerde, - @Nullable String valueSerde, - ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() - .cluster(clusterName) - .topic(topicName) - .topicActions(MESSAGES_READ) - .build()); - - ConsumerPosition consumerPosition = ConsumerPosition.create(mode, topicName, partitions, ts, offsetString); - - int recordsLimit = - Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT); - - return validateAccess.then( - Mono.just( - ResponseEntity.ok( - messagesService.loadMessagesV2( - getCluster(clusterName), topicName, consumerPosition, - query, filterId, recordsLimit, keySerde, valueSerde)))); - } - - @Override public Mono> registerFilter(String clusterName, String topicName, diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 5dfe1b85a1b..1b9a0efa064 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -1,11 +1,9 @@ package com.provectus.kafka.ui.emitter; -import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.model.TopicMessagePhaseDTO; -import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.time.Duration; import java.time.Instant; +import javax.annotation.Nullable; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -14,13 +12,12 @@ public abstract class AbstractEmitter implements java.util.function.Consumer> { - private final ConsumerRecordDeserializer recordDeserializer; - private final ConsumingStats consumingStats = new ConsumingStats(); + private final MessagesProcessing messagesProcessing; private final PollingThrottler throttler; protected final PollingSettings pollingSettings; - protected AbstractEmitter(ConsumerRecordDeserializer recordDeserializer, PollingSettings pollingSettings) { - this.recordDeserializer = recordDeserializer; + protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) { + this.messagesProcessing = messagesProcessing; this.pollingSettings = pollingSettings; this.throttler = pollingSettings.getPollingThrottler(); } @@ -40,39 +37,28 @@ protected ConsumerRecords poll( return records; } + protected boolean isSendLimitReached() { + return messagesProcessing.limitReached(); + } + protected void sendMessage(FluxSink sink, - ConsumerRecord msg) { - final TopicMessageDTO topicMessage = recordDeserializer.deserialize(msg); - sink.next( - new TopicMessageEventDTO() - .type(TopicMessageEventDTO.TypeEnum.MESSAGE) - .message(topicMessage) - ); + ConsumerRecord msg) { + messagesProcessing.sendMsg(sink, msg); } protected void sendPhase(FluxSink sink, String name) { - sink.next( - new TopicMessageEventDTO() - .type(TopicMessageEventDTO.TypeEnum.PHASE) - .phase(new TopicMessagePhaseDTO().name(name)) - ); + messagesProcessing.sendPhase(sink, name); } protected int sendConsuming(FluxSink sink, - ConsumerRecords records, - long elapsed) { - return consumingStats.sendConsumingEvt(sink, records, elapsed, getFilterApplyErrors(sink)); + ConsumerRecords records, + long elapsed) { + return messagesProcessing.sentConsumingInfo(sink, records, elapsed); } - protected void sendFinishStatsAndCompleteSink(FluxSink sink) { - consumingStats.sendFinishEvent(sink, getFilterApplyErrors(sink)); + // cursor is null if target partitions were fully polled (no, need to do paging) + protected void sendFinishStatsAndCompleteSink(FluxSink sink, @Nullable Cursor.Tracking cursor) { + messagesProcessing.sendFinishEvents(sink, cursor); sink.complete(); } - - protected Number getFilterApplyErrors(FluxSink sink) { - return sink.contextView() - .getOrEmpty(MessageFilterStats.class) - .map(MessageFilterStats::getFilterApplyErrors) - .orElse(0); - } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java index d2ab8773e77..ceb77d3b54e 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -2,7 +2,6 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -24,17 +23,20 @@ public class BackwardRecordEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition consumerPosition; private final int messagesPerPage; + private final Cursor.Tracking cursor; public BackwardRecordEmitter( Supplier> consumerSupplier, ConsumerPosition consumerPosition, int messagesPerPage, - ConsumerRecordDeserializer recordDeserializer, - PollingSettings pollingSettings) { - super(recordDeserializer, pollingSettings); + MessagesProcessing messagesProcessing, + PollingSettings pollingSettings, + Cursor.Tracking cursor) { + super(messagesProcessing, pollingSettings); this.consumerPosition = consumerPosition; this.messagesPerPage = messagesPerPage; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } @Override @@ -46,11 +48,12 @@ public void accept(FluxSink sink) { var seekOperations = SeekOperations.create(consumer, consumerPosition); var readUntilOffsets = new TreeMap(Comparator.comparingInt(TopicPartition::partition)); readUntilOffsets.putAll(seekOperations.getOffsetsForSeek()); + cursor.trackOffsets(readUntilOffsets); int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size()); log.debug("'Until' offsets for polling: {}", readUntilOffsets); - while (!sink.isCancelled() && !readUntilOffsets.isEmpty()) { + while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !isSendLimitReached()) { new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> { if (sink.isCancelled()) { return; //fast return in case of sink cancellation @@ -59,8 +62,6 @@ public void accept(FluxSink sink) { long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition); partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink) - .stream() - .filter(r -> !sink.isCancelled()) .forEach(r -> sendMessage(sink, r)); if (beginOffset == readFromOffset) { @@ -77,7 +78,12 @@ public void accept(FluxSink sink) { log.debug("sink is cancelled after partitions poll iteration"); } } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink( + sink, + readUntilOffsets.isEmpty() + ? null + : cursor + ); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -97,6 +103,7 @@ private List> partitionPollIteration( ) { consumer.assign(Collections.singleton(tp)); consumer.seek(tp, fromOffset); + cursor.trackOffset(tp, fromOffset); sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset)); int desiredMsgsToPoll = (int) (toOffset - fromOffset); @@ -104,6 +111,7 @@ private List> partitionPollIteration( EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() + && !isSendLimitReached() && recordsToSend.size() < desiredMsgsToPoll && !emptyPolls.noDataEmptyPollsReached()) { var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout()); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java index b0737e1cb9c..b4ed63dafa4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java @@ -2,6 +2,11 @@ import com.provectus.kafka.ui.model.TopicMessageConsumingDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO; +import com.provectus.kafka.ui.util.ConsumerRecordsUtil; +import javax.annotation.Nullable; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; class ConsumingStats { @@ -9,37 +14,46 @@ class ConsumingStats { private long bytes = 0; private int records = 0; private long elapsed = 0; - private int filterApplyErrors = 0; - void sendConsumingEvt(FluxSink sink, PolledRecords polledRecords) { - bytes += polledRecords.bytes(); - records += polledRecords.count(); - elapsed += polledRecords.elapsed().toMillis(); + /** + * returns bytes polled. + */ + int sendConsumingEvt(FluxSink sink, + ConsumerRecords polledRecords, + long elapsed, + int filterApplyErrors) { + int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords); + bytes += polledBytes; + this.records += polledRecords.count(); + this.elapsed += elapsed; sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.CONSUMING) - .consuming(createConsumingStats()) + .consuming(createConsumingStats(sink, filterApplyErrors)) ); + return polledBytes; } - void incFilterApplyError() { - filterApplyErrors++; - } - - void sendFinishEvent(FluxSink sink) { + void sendFinishEvent(FluxSink sink, int filterApplyErrors, @Nullable Cursor.Tracking cursor) { sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.DONE) - .consuming(createConsumingStats()) + .cursor( + cursor != null + ? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor()) + : null + ) + .consuming(createConsumingStats(sink, filterApplyErrors)) ); } - private TopicMessageConsumingDTO createConsumingStats() { + private TopicMessageConsumingDTO createConsumingStats(FluxSink sink, + int filterApplyErrors) { return new TopicMessageConsumingDTO() - .bytesConsumed(bytes) - .elapsedMs(elapsed) - .isCancelled(false) + .bytesConsumed(this.bytes) + .elapsedMs(this.elapsed) + .isCancelled(sink.isCancelled()) .filterApplyErrors(filterApplyErrors) - .messagesConsumed(records); + .messagesConsumed(this.records); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java new file mode 100644 index 00000000000..d78a583e829 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java @@ -0,0 +1,69 @@ +package com.provectus.kafka.ui.emitter; + +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; +import org.apache.kafka.common.TopicPartition; + +public record Cursor(ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { + + public static class Tracking { + private final ConsumerRecordDeserializer deserializer; + private final ConsumerPosition originalPosition; + private final Predicate filter; + private final int limit; + private final Function cursorRegistry; + + private final Map trackingOffsets = new HashMap<>(); + + public Tracking(ConsumerRecordDeserializer deserializer, + ConsumerPosition originalPosition, + Predicate filter, + int limit, + Function cursorRegistry) { + this.deserializer = deserializer; + this.originalPosition = originalPosition; + this.filter = filter; + this.limit = limit; + this.cursorRegistry = cursorRegistry; + } + + void trackOffset(TopicPartition tp, long offset) { + trackingOffsets.put(tp, offset); + } + + void trackOffsets(Map offsets) { + this.trackingOffsets.putAll(offsets); + } + + String registerCursor() { + return cursorRegistry.apply( + new Cursor( + deserializer, + new ConsumerPosition( + switch (originalPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> PollingModeDTO.TO_OFFSET; + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> PollingModeDTO.FROM_OFFSET; + case TAILING -> throw new IllegalStateException(); + }, + originalPosition.topic(), + originalPosition.partitions(), + null, + new ConsumerPosition.Offsets(null, trackingOffsets) + ), + filter, + limit + ) + ); + } + } + +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index 94b6ce236be..3aedb9f6d5b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -2,9 +2,9 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -13,20 +13,22 @@ import reactor.core.publisher.FluxSink; @Slf4j -public class ForwardRecordEmitter - extends AbstractEmitter { +public class ForwardRecordEmitter extends AbstractEmitter { private final Supplier> consumerSupplier; private final ConsumerPosition position; + private final Cursor.Tracking cursor; public ForwardRecordEmitter( Supplier> consumerSupplier, ConsumerPosition position, - ConsumerRecordDeserializer recordDeserializer, - PollingSettings pollingSettings) { - super(recordDeserializer, pollingSettings); + MessagesProcessing messagesProcessing, + PollingSettings pollingSettings, + Cursor.Tracking cursor) { + super(messagesProcessing, pollingSettings); this.position = position; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } @Override @@ -36,27 +38,26 @@ public void accept(FluxSink sink) { sendPhase(sink, "Assigning partitions"); var seekOperations = SeekOperations.create(consumer, position); seekOperations.assignAndSeek(); + cursor.trackOffsets(seekOperations.getOffsetsForSeek()); EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter(); while (!sink.isCancelled() + && !isSendLimitReached() && !seekOperations.assignedPartitionsFullyPolled() && !emptyPolls.noDataEmptyPollsReached()) { sendPhase(sink, "Polling"); ConsumerRecords records = poll(sink, consumer); emptyPolls.count(records); + trackOffsetsAfterPoll(consumer); log.debug("{} records polled", records.count()); for (ConsumerRecord msg : records) { - if (!sink.isCancelled()) { - sendMessage(sink, msg); - } else { - break; - } + sendMessage(sink, msg); } } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink(sink, seekOperations.assignedPartitionsFullyPolled() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -66,4 +67,9 @@ public void accept(FluxSink sink) { sink.error(e); } } + + private void trackOffsetsAfterPoll(Consumer consumer) { + consumer.assignment().forEach(tp -> cursor.trackOffset(tp, consumer.position(tp))); + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java index df8505a2e9a..59848fac042 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java @@ -1,75 +1,71 @@ package com.provectus.kafka.ui.emitter; -import static java.util.stream.Collectors.collectingAndThen; -import static java.util.stream.Collectors.groupingBy; -import static java.util.stream.Collectors.toList; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Iterables; -import com.google.common.collect.Streams; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicMessagePhaseDTO; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.function.Predicate; import javax.annotation.Nullable; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @Slf4j -@RequiredArgsConstructor -class MessagesProcessing { +public class MessagesProcessing { private final ConsumingStats consumingStats = new ConsumingStats(); private long sentMessages = 0; + private int filterApplyErrors = 0; private final ConsumerRecordDeserializer deserializer; private final Predicate filter; - private final boolean ascendingSortBeforeSend; private final @Nullable Integer limit; + public MessagesProcessing(ConsumerRecordDeserializer deserializer, + Predicate filter, + @Nullable Integer limit) { + this.deserializer = deserializer; + this.filter = filter; + this.limit = limit; + } + boolean limitReached() { return limit != null && sentMessages >= limit; } - void send(FluxSink sink, Iterable> polled) { - sortForSending(polled, ascendingSortBeforeSend) - .forEach(rec -> { - if (!limitReached() && !sink.isCancelled()) { - TopicMessageDTO topicMessage = deserializer.deserialize(rec); - try { - if (filter.test(topicMessage)) { - sink.next( - new TopicMessageEventDTO() - .type(TopicMessageEventDTO.TypeEnum.MESSAGE) - .message(topicMessage) - ); - sentMessages++; - } - } catch (Exception e) { - consumingStats.incFilterApplyError(); - log.trace("Error applying filter for message {}", topicMessage); - } - } - }); + void sendMsg(FluxSink sink, ConsumerRecord rec) { + if (!sink.isCancelled() && !limitReached()) { + TopicMessageDTO topicMessage = deserializer.deserialize(rec); + try { + if (filter.test(topicMessage)) { + sink.next( + new TopicMessageEventDTO() + .type(TopicMessageEventDTO.TypeEnum.MESSAGE) + .message(topicMessage) + ); + sentMessages++; + } + } catch (Exception e) { + filterApplyErrors++; + log.trace("Error applying filter for message {}", topicMessage); + } + } } - void sentConsumingInfo(FluxSink sink, PolledRecords polledRecords) { + int sentConsumingInfo(FluxSink sink, + ConsumerRecords polledRecords, + long elapsed) { if (!sink.isCancelled()) { - consumingStats.sendConsumingEvt(sink, polledRecords); + return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors); } + return 0; } - void sendFinishEvent(FluxSink sink) { + void sendFinishEvents(FluxSink sink, @Nullable Cursor.Tracking cursor) { if (!sink.isCancelled()) { - consumingStats.sendFinishEvent(sink); + consumingStats.sendFinishEvent(sink, filterApplyErrors, cursor); } } @@ -83,30 +79,4 @@ void sendPhase(FluxSink sink, String name) { } } - /* - * Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets. - */ - @VisibleForTesting - static Iterable> sortForSending(Iterable> records, - boolean asc) { - Comparator offsetComparator = asc - ? Comparator.comparingLong(ConsumerRecord::offset) - : Comparator.comparingLong(ConsumerRecord::offset).reversed(); - - // partition -> sorted by offsets records - Map>> perPartition = Streams.stream(records) - .collect( - groupingBy( - ConsumerRecord::partition, - TreeMap::new, - collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList()))); - - Comparator tsComparator = asc - ? Comparator.comparing(ConsumerRecord::timestamp) - : Comparator.comparingLong(ConsumerRecord::timestamp).reversed(); - - // merge-sorting records from partitions one by one using timestamp comparator - return Iterables.mergeSorted(perPartition.values(), tsComparator); - } - } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index 44f727b20e9..f10be11c2d3 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -1,9 +1,9 @@ package com.provectus.kafka.ui.emitter; import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP; +import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.PollingModeDTO; import java.util.HashMap; @@ -53,22 +53,14 @@ Map getOffsetsForSeek() { static Map getOffsetsForSeek(Consumer consumer, OffsetsInfo offsetsInfo, ConsumerPosition position) { - switch (position.pollingMode()) { - case TAILING: - return consumer.endOffsets(offsetsInfo.allTargetPartitions()); - case LATEST: - return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); - case EARLIEST: - return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); - case FROM_OFFSET, TO_OFFSET: - Preconditions.checkNotNull(position.offsets()); - return fixOffsets(offsetsInfo, position.offsets()); - case FROM_TIMESTAMP, TO_TIMESTAMP: - Preconditions.checkNotNull(position.timestamp()); - return offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, position.timestamp()); - default: - throw new IllegalStateException(); - } + return switch (position.pollingMode()) { + case TAILING -> consumer.endOffsets(offsetsInfo.allTargetPartitions()); + case LATEST -> consumer.endOffsets(offsetsInfo.getNonEmptyPartitions()); + case EARLIEST -> consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions()); + case FROM_OFFSET, TO_OFFSET -> fixOffsets(offsetsInfo, requireNonNull(position.offsets())); + case FROM_TIMESTAMP, TO_TIMESTAMP -> + offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, requireNonNull(position.timestamp())); + }; } private static Map fixOffsets(OffsetsInfo offsetsInfo, @@ -77,7 +69,7 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, if (positionOffset.offset() != null) { offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); } else { - Preconditions.checkNotNull(positionOffset.tpOffsets()); + requireNonNull(positionOffset.tpOffsets()); offsets.putAll(positionOffset.tpOffsets()); offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 666278d26fe..6d09b20b3a5 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,18 +1,12 @@ package com.provectus.kafka.ui.model; -import static java.util.stream.Collectors.toMap; - import com.provectus.kafka.ui.exception.ValidationException; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; -import java.util.stream.Stream; import javax.annotation.Nullable; -import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.common.TopicPartition; -import org.springframework.util.StringUtils; - public record ConsumerPosition(PollingModeDTO pollingMode, String topic, @@ -29,8 +23,8 @@ public static ConsumerPosition create(PollingModeDTO pollingMode, String topic, @Nullable List partitions, @Nullable Long timestamp, - @Nullable String offsetsStr) { - @Nullable var offsets = parseAndValidateOffsets(pollingMode, topic, offsetsStr); + @Nullable Long offset) { + @Nullable var offsets = parseAndValidateOffsets(pollingMode, offset); var topicPartitions = Optional.ofNullable(partitions).orElse(List.of()) .stream() @@ -61,33 +55,14 @@ private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long } private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode, - String topic, - @Nullable String offsetsStr) { - Offsets offsets = null; + @Nullable Long offset) { if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) { - if (!StringUtils.hasText(offsetsStr)) { + if (offset == null) { throw new ValidationException("offsets not provided for " + pollingMode); } - if (!offsetsStr.contains(":")) { - offsets = new Offsets(Long.parseLong(offsetsStr), null); - } else { - Map tpOffsets = Stream.of(offsetsStr.split(",")) - .map(p -> { - String[] split = p.split(":"); - if (split.length != 2) { - throw new IllegalArgumentException( - "Wrong seekTo argument format. See API docs for details"); - } - return Pair.of( - new TopicPartition(topic, Integer.parseInt(split[0])), - Long.parseLong(split[1]) - ); - }) - .collect(toMap(Pair::getKey, Pair::getValue)); - offsets = new Offsets(null, tpOffsets); - } + return new Offsets(offset, null); } - return offsets; + return null; } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java index 4b507b86fcd..9fdbb5839f1 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java @@ -1,7 +1,6 @@ package com.provectus.kafka.ui.serdes; import com.provectus.kafka.ui.model.TopicMessageDTO; -import com.provectus.kafka.ui.model.TopicMessageDTO.TimestampTypeEnum; import com.provectus.kafka.ui.serde.api.Serde; import java.time.Instant; import java.time.OffsetDateTime; @@ -9,7 +8,6 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.function.UnaryOperator; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -34,8 +32,6 @@ public class ConsumerRecordDeserializer { private final Serde.Deserializer fallbackKeyDeserializer; private final Serde.Deserializer fallbackValueDeserializer; - private final UnaryOperator masker; - public TopicMessageDTO deserialize(ConsumerRecord rec) { var message = new TopicMessageDTO(); fillKey(message, rec); @@ -51,14 +47,14 @@ public TopicMessageDTO deserialize(ConsumerRecord rec) { message.setValueSize(getValueSize(rec)); message.setHeadersSize(getHeadersSize(rec)); - return masker.apply(message); + return message; } - private static TimestampTypeEnum mapToTimestampType(TimestampType timestampType) { + private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) { return switch (timestampType) { - case CREATE_TIME -> TimestampTypeEnum.CREATE_TIME; - case LOG_APPEND_TIME -> TimestampTypeEnum.LOG_APPEND_TIME; - case NO_TIMESTAMP_TYPE -> TimestampTypeEnum.NO_TIMESTAMP_TYPE; + case CREATE_TIME -> TopicMessageDTO.TimestampTypeEnum.CREATE_TIME; + case LOG_APPEND_TIME -> TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME; + case NO_TIMESTAMP_TYPE -> TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE; }; } @@ -122,11 +118,11 @@ private static Long getHeadersSize(ConsumerRecord consumerRecord) } private static Long getKeySize(ConsumerRecord consumerRecord) { - return consumerRecord.key() != null ? (long) consumerRecord.serializedKeySize() : null; + return consumerRecord.key() != null ? (long) consumerRecord.key().get().length : null; } private static Long getValueSize(ConsumerRecord consumerRecord) { - return consumerRecord.value() != null ? (long) consumerRecord.serializedValueSize() : null; + return consumerRecord.value() != null ? (long) consumerRecord.value().get().length : null; } private static int headerSize(Header header) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 11845bd0c05..43ee4f231ac 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -1,11 +1,16 @@ package com.provectus.kafka.ui.service; +import com.google.common.base.Charsets; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.hash.Hashing; import com.google.common.util.concurrent.RateLimiter; +import com.provectus.kafka.ui.config.ClustersProperties; import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.Cursor; import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; -import com.provectus.kafka.ui.emitter.MessageFilterStats; import com.provectus.kafka.ui.emitter.MessageFilters; -import com.provectus.kafka.ui.emitter.ResultSizeLimiter; +import com.provectus.kafka.ui.emitter.MessagesProcessing; import com.provectus.kafka.ui.emitter.TailingEmitter; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.exception.ValidationException; @@ -21,16 +26,15 @@ import com.provectus.kafka.ui.util.SslPropertiesUtil; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Properties; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; import java.util.function.UnaryOperator; import java.util.stream.Collectors; import javax.annotation.Nullable; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.kafka.clients.admin.OffsetSpec; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.clients.producer.KafkaProducer; @@ -45,18 +49,43 @@ import reactor.core.scheduler.Schedulers; @Service -@RequiredArgsConstructor @Slf4j public class MessagesService { + private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong(); + + private static final int DEFAULT_MAX_PAGE_SIZE = 500; + private static final int DEFAULT_PAGE_SIZE = 100; // limiting UI messages rate to 20/sec in tailing mode - public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; + private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; private final AdminClientService adminClientService; private final DeserializationService deserializationService; private final ConsumerGroupService consumerGroupService; + private final int maxPageSize; + private final int defaultPageSize; + + private final Cache> registeredFilters = CacheBuilder.newBuilder() + .maximumSize(5_000) + .build(); + + private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); - private final Map> registeredFilters = new ConcurrentHashMap<>(); + public MessagesService(AdminClientService adminClientService, + DeserializationService deserializationService, + ConsumerGroupService consumerGroupService, + ClustersProperties properties) { + this.adminClientService = adminClientService; + this.deserializationService = deserializationService; + this.consumerGroupService = consumerGroupService; + + var pollingProps = Optional.ofNullable(properties.getPolling()) + .orElseGet(ClustersProperties.PollingProperties::new); + this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize()) + .orElse(DEFAULT_MAX_PAGE_SIZE); + this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize()) + .orElse(DEFAULT_PAGE_SIZE); + } private Mono withExistingTopic(KafkaCluster cluster, String topicName) { return adminClientService.get(cluster) @@ -138,118 +167,135 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - public Flux loadMessagesV2(KafkaCluster cluster, - String topic, - ConsumerPosition position, - @Nullable String query, - @Nullable String filterId, - int limit, - @Nullable String keySerde, - @Nullable String valueSerde) { + private int fixPageSize(@Nullable Integer pageSize) { + return Optional.ofNullable(pageSize) + .filter(ps -> ps > 0 && ps <= maxPageSize) + .orElse(defaultPageSize); + } + + private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { + var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); + var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); + return evt -> { + if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { + return evt; + } + return evt.message( + evt.getMessage() + .key(keyMasker.apply(evt.getMessage().getKey())) + .content(valMasker.apply(evt.getMessage().getContent()))); + }; + } + + public Flux loadMessages(KafkaCluster cluster, + String topic, + ConsumerPosition consumerPosition, + @Nullable String containsStringFilter, + @Nullable String filterId, + @Nullable Integer limit, + @Nullable String keySerde, + @Nullable String valueSerde) { + return loadMessages( + cluster, + topic, + deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde), + consumerPosition, + getMsgFilter(containsStringFilter, filterId), + fixPageSize(limit) + ); + } + + public Flux loadMessages(KafkaCluster cluster, String topic, String cursorId) { + Cursor cursor = cursorsStorage.getCursor(cursorId) + .orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache.")); + return loadMessages( + cluster, + topic, + cursor.deserializer(), + cursor.consumerPosition(), + cursor.filter(), + cursor.limit() + ); + } + + private Flux loadMessages(KafkaCluster cluster, + String topic, + ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImplV2(cluster, topic, position, query, filterId, limit, keySerde, valueSerde)); + .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, fixPageSize(limit))); } - private Flux loadMessagesImplV2(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String query, - @Nullable String filterId, - int limit, - @Nullable String keySerde, - @Nullable String valueSerde) { - - ConsumerRecordDeserializer recordDeserializer = - deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde); + private Flux loadMessagesImpl(KafkaCluster cluster, + String topic, + ConsumerRecordDeserializer deserializer, + ConsumerPosition consumerPosition, + Predicate filter, + int limit) { + var processing = new MessagesProcessing( + deserializer, + filter, + consumerPosition.pollingMode() == PollingModeDTO.TAILING ? null : limit + ); var emitter = switch (consumerPosition.pollingMode()) { case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, limit, - recordDeserializer, - cluster.getPollingSettings() + processing, + cluster.getPollingSettings(), + new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) ); case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - recordDeserializer, - cluster.getPollingSettings() + processing, + cluster.getPollingSettings(), + new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) ); case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - recordDeserializer, + processing, cluster.getPollingSettings() ); }; - - MessageFilterStats filterStats = new MessageFilterStats(); return Flux.create(emitter) - .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats)) - .filter(getMsgFilter(query, filterId, filterStats)) .map(getDataMasker(cluster, topic)) - .takeWhile(createTakeWhilePredicate(consumerPosition.pollingMode(), limit)) .map(throttleUiPublish(consumerPosition.pollingMode())); } - private Predicate createTakeWhilePredicate( - PollingModeDTO pollingMode, int limit) { - return pollingMode == PollingModeDTO.TAILING - ? evt -> true // no limit for tailing - : new ResultSizeLimiter(limit); - } - - private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { - var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); - var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); - return evt -> { - if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { - return evt; - } - return evt.message( - evt.getMessage() - .key(keyMasker.apply(evt.getMessage().getKey())) - .content(valMasker.apply(evt.getMessage().getContent()))); - }; - } - public String registerMessageFilter(String groovyCode) { - var filter = MessageFilters.groovyScriptFilter(groovyCode); - var id = RandomStringUtils.random(10, true, true); - registeredFilters.put(id, filter); - return id; + String saltedCode = groovyCode + SALT_FOR_HASHING; + String filterId = Hashing.sha256() + .hashString(saltedCode, Charsets.UTF_8) + .toString() + .substring(0, 8); + if (registeredFilters.getIfPresent(filterId) == null) { + registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode)); + } + return filterId; } - private Predicate getMsgFilter(@Nullable String containsStrFilter, - @Nullable String filterId, - MessageFilterStats filterStats) { + private Predicate getMsgFilter(@Nullable String containsStrFilter, + @Nullable String smartFilterId) { Predicate messageFilter = MessageFilters.noop(); if (containsStrFilter != null) { - messageFilter = MessageFilters.containsStringFilter(containsStrFilter); + messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter)); } - if (filterId != null) { - messageFilter = registeredFilters.get(filterId); - if (messageFilter == null) { - throw new ValidationException("No filter was registered with id " + filterId); + if (smartFilterId != null) { + var registered = registeredFilters.getIfPresent(smartFilterId); + if (registered == null) { + throw new ValidationException("No filter was registered with id " + smartFilterId); } + messageFilter = messageFilter.and(registered); } - Predicate finalMessageFilter = messageFilter; - return evt -> { - // we only apply filter for message events - if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) { - try { - return finalMessageFilter.test(evt.getMessage()); - } catch (Exception e) { - filterStats.incrementApplyErrors(); - log.trace("Error applying filter for message {}", evt.getMessage()); - return false; - } - } - return true; - }; + return messageFilter; } private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java new file mode 100644 index 00000000000..a789c2afbe2 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -0,0 +1,25 @@ +package com.provectus.kafka.ui.service; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.provectus.kafka.ui.emitter.Cursor; +import java.util.Optional; +import org.apache.commons.lang3.RandomStringUtils; + +public class PollingCursorsStorage { + + private final Cache cursorsCache = CacheBuilder.newBuilder() + .maximumSize(10_000) + .build(); + + public Optional getCursor(String id) { + return Optional.ofNullable(cursorsCache.getIfPresent(id)); + } + + public String register(Cursor cursor) { + var id = RandomStringUtils.random(8, true, true); + cursorsCache.put(id, cursor); + return id; + } + +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java index 6585ba840ea..972a573bab9 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java @@ -109,7 +109,7 @@ private Flux createTailingFlux( .get(); return applicationContext.getBean(MessagesService.class) - .loadMessagesV2(cluster, topicName, + .loadMessages(cluster, topicName, new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null), query, null, diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fa2ad19aace..fbaa0748bdb 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -54,7 +54,7 @@ void sendMessageReturnsExceptionWhenTopicNotFound() { @Test void loadMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService - .loadMessagesV2(cluster, NON_EXISTING_TOPIC, + .loadMessages(cluster, NON_EXISTING_TOPIC, new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null), null, null, 1, "String", "String")) .expectError(TopicNotFoundException.class) @@ -69,7 +69,7 @@ void maskingAppliedOnConfiguredClusters() throws Exception { producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); - Flux msgsFlux = messagesService.loadMessagesV2( + Flux msgsFlux = messagesService.loadMessages( cluster, testTopic, new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index 1474a034a84..92d896a2967 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -10,7 +10,9 @@ import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.Cursor; import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; +import com.provectus.kafka.ui.emitter.MessagesProcessing; import com.provectus.kafka.ui.emitter.PollingSettings; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.ConsumerPosition.Offsets; @@ -44,6 +46,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import reactor.core.publisher.Flux; import reactor.core.publisher.FluxSink; import reactor.test.StepVerifier; @@ -58,6 +61,7 @@ class RecordEmitterTest extends AbstractIntegrationTest { static final String EMPTY_TOPIC = TOPIC + "_empty"; static final List SENT_RECORDS = new ArrayList<>(); static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer(); + static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class); @BeforeAll static void generateMsgs() throws Exception { @@ -110,21 +114,27 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { ); } + private MessagesProcessing createMessagesProcessing() { + return new MessagesProcessing(RECORD_DESERIALIZER, msg -> true, null); + } + @Test void pollNothingOnEmptyTopic() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); StepVerifier.create(Flux.create(forwardEmitter)) @@ -145,16 +155,18 @@ void pollFullTopicFromBeginning() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( this::createConsumer, new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); List expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()); @@ -175,8 +187,9 @@ void pollWithOffsets() { this::createConsumer, new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, new Offsets(null, targetOffsets)), - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var backwardEmitter = new BackwardRecordEmitter( @@ -184,8 +197,9 @@ void pollWithOffsets() { new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, new Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -212,8 +226,9 @@ void pollWithTimestamps() { var forwardEmitter = new ForwardRecordEmitter( this::createConsumer, new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter( @@ -228,8 +243,9 @@ void pollWithTimestamps() { this::createConsumer, new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter( @@ -254,8 +270,9 @@ void backwardEmitterSeekToEnd() { new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, new Offsets(null, targetOffsets)), numMessages, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); var expectedValues = SENT_RECORDS.stream() @@ -280,8 +297,9 @@ void backwardEmitterSeekToBegin() { this::createConsumer, new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), 100, - RECORD_DESERIALIZER, - PollingSettings.createDefault() + createMessagesProcessing(), + PollingSettings.createDefault(), + CURSOR_MOCK ); expectEmitter(backwardEmitter, diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java index c8a8adef2f7..a9639ca3893 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java @@ -497,7 +497,7 @@ public void doAssert(Consumer msgAssert) { String topic = createTopicAndCreateSchemas(); try { messagesService.sendMessage(targetCluster, topic, msgToSend).block(); - TopicMessageDTO polled = messagesService.loadMessagesV2( + TopicMessageDTO polled = messagesService.loadMessages( targetCluster, topic, new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null), diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index f43c4f622d9..dcf26d58e68 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -792,55 +792,61 @@ paths: required: true schema: type: string - - name: m + - name: mode in: query description: Messages polling mode required: true schema: $ref: "#/components/schemas/PollingMode" - - name: p + - name: partitions in: query schema: type: array description: List of target partitions (all partitions if not provided) items: type: integer - - name: lim + - name: limit in: query description: Max number of messages can be returned schema: type: integer - - name: q + - name: stringFilter in: query description: query string to contains string filtration schema: type: string - - name: fid + - name: smartFilterId in: query description: filter id, that was registered beforehand schema: type: string - - name: offs + - name: offset in: query - description: partition offsets to read from / to. Format is "p1:offset1,p2:offset2,...". + description: message offset to read from / to schema: - type: string - - name: ts + type: integer + format: int64 + - name: timestamp in: query description: timestamp (in ms) to read from / to schema: type: integer format: int64 - - name: ks + - name: keySerde in: query description: "Serde that should be used for deserialization. Will be chosen automatically if not set." schema: type: string - - name: vs + - name: valueSerde in: query description: "Serde that should be used for deserialization. Will be chosen automatically if not set." schema: type: string + - name: cursor + in: query + description: "id of the cursor for pagination" + schema: + type: string responses: 200: description: OK @@ -2608,10 +2614,8 @@ components: TopicMessageNextPageCursor: type: object properties: - offsetsString: + id: type: string - pollingMode: - $ref: "#/components/schemas/PollingMode" TopicMessage: type: object From a17afc0422ea165b708598dc550178ea0906353d Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:21:32 +0400 Subject: [PATCH 10/29] new tests --- .../ui/emitter/ForwardRecordEmitter.java | 22 ++- .../kafka/ui/service/MessagesService.java | 1 + .../ui/service/PollingCursorsStorage.java | 6 + .../kafka/ui/emitter/CursorTest.java | 181 ++++++++++++++++++ .../kafka/ui/service/MessagesServiceTest.java | 108 +++++++++-- 5 files changed, 288 insertions(+), 30 deletions(-) create mode 100644 kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index 3aedb9f6d5b..cd73aa08450 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -4,10 +4,10 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @@ -49,15 +49,21 @@ public void accept(FluxSink sink) { sendPhase(sink, "Polling"); ConsumerRecords records = poll(sink, consumer); emptyPolls.count(records); - trackOffsetsAfterPoll(consumer); - log.debug("{} records polled", records.count()); - for (ConsumerRecord msg : records) { - sendMessage(sink, msg); + for (TopicPartition tp : records.partitions()) { + for (ConsumerRecord record : records.records(tp)) { + // checking if send limit reached - if so, we will skip some + // of already polled records (and we don't need to track their offsets) - they + // should be present on next page, polled by cursor + if (!isSendLimitReached()) { + sendMessage(sink, record); + cursor.trackOffset(tp, record.offset() + 1); + } + } } } - sendFinishStatsAndCompleteSink(sink, seekOperations.assignedPartitionsFullyPolled() ? null : cursor); + sendFinishStatsAndCompleteSink(sink, !isSendLimitReached() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); @@ -68,8 +74,4 @@ public void accept(FluxSink sink) { } } - private void trackOffsetsAfterPoll(Consumer consumer) { - consumer.assignment().forEach(tp -> cursor.trackOffset(tp, consumer.position(tp))); - } - } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 43ee4f231ac..b83da33bd74 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -56,6 +56,7 @@ public class MessagesService { private static final int DEFAULT_MAX_PAGE_SIZE = 500; private static final int DEFAULT_PAGE_SIZE = 100; + // limiting UI messages rate to 20/sec in tailing mode private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index a789c2afbe2..30f6ef92e03 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -1,8 +1,10 @@ package com.provectus.kafka.ui.service; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.provectus.kafka.ui.emitter.Cursor; +import java.util.Map; import java.util.Optional; import org.apache.commons.lang3.RandomStringUtils; @@ -22,4 +24,8 @@ public String register(Cursor cursor) { return id; } + @VisibleForTesting + public Map asMap() { + return cursorsCache.asMap(); + } } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java new file mode 100644 index 00000000000..e36be49320d --- /dev/null +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -0,0 +1,181 @@ +package com.provectus.kafka.ui.emitter; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.provectus.kafka.ui.AbstractIntegrationTest; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.producer.KafkaTestProducer; +import com.provectus.kafka.ui.serde.api.Serde; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import com.provectus.kafka.ui.serdes.PropertyResolverImpl; +import com.provectus.kafka.ui.serdes.builtin.StringSerde; +import com.provectus.kafka.ui.service.PollingCursorsStorage; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.BytesDeserializer; +import org.apache.kafka.common.utils.Bytes; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + + +public class CursorTest extends AbstractIntegrationTest { + + static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID(); + + static final int MSGS_IN_PARTITION = 20; + static final int PAGE_SIZE = 11; + + @BeforeAll + static void setup() { + createTopic(new NewTopic(TOPIC, 1, (short) 1)); + try (var producer = KafkaTestProducer.forKafka(kafka)) { + for (int i = 0; i < MSGS_IN_PARTITION; i++) { + producer.send(new ProducerRecord<>(TOPIC, "msg_" + i)); + } + } + } + + @AfterAll + static void cleanup() { + deleteTopic(TOPIC); + } + + @Test + void backwardEmitter() { + var cursorsStorage = new PollingCursorsStorage(); + var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null); + + var cursor = new Cursor.Tracking( + createRecordsDeserializer(), + consumerPosition, + m -> true, + PAGE_SIZE, + cursorsStorage::register + ); + + var emitter = createBackwardEmitter(consumerPosition, cursor); + verifyMessagesEmitted(emitter); + assertCursor( + cursorsStorage, + PollingModeDTO.TO_OFFSET, + offsets -> assertThat(offsets) + .hasSize(1) + .containsEntry(new TopicPartition(TOPIC, 0), 9L) + ); + } + + @Test + void forwardEmitter() { + var cursorsStorage = new PollingCursorsStorage(); + var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null); + + var cursor = new Cursor.Tracking( + createRecordsDeserializer(), + consumerPosition, + m -> true, + PAGE_SIZE, + cursorsStorage::register + ); + + var emitter = createForwardEmitter(consumerPosition, cursor); + verifyMessagesEmitted(emitter); + assertCursor( + cursorsStorage, + PollingModeDTO.FROM_OFFSET, + offsets -> assertThat(offsets) + .hasSize(1) + .containsEntry(new TopicPartition(TOPIC, 0), 11L) + ); + } + + private void assertCursor(PollingCursorsStorage storage, + PollingModeDTO expectedMode, + Consumer> offsetsAssert) { + Cursor registeredCursor = storage.asMap().values().stream().findFirst().orElse(null); + assertThat(registeredCursor).isNotNull(); + assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE); + assertThat(registeredCursor.deserializer()).isNotNull(); + assertThat(registeredCursor.filter()).isNotNull(); + + var cursorPosition = registeredCursor.consumerPosition(); + assertThat(cursorPosition).isNotNull(); + assertThat(cursorPosition.topic()).isEqualTo(TOPIC); + assertThat(cursorPosition.partitions()).isEqualTo(List.of()); + assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode); + + offsetsAssert.accept(cursorPosition.offsets().tpOffsets()); + } + + private void verifyMessagesEmitted(AbstractEmitter emitter) { + StepVerifier.create( + Flux.create(emitter) + .filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(e -> e.getMessage().getContent()) + ) + .expectNextCount(PAGE_SIZE) + .verifyComplete(); + } + + private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + return new BackwardRecordEmitter( + this::createConsumer, + position, + PAGE_SIZE, + new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PollingSettings.createDefault(), + cursor + ); + } + + private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + return new ForwardRecordEmitter( + this::createConsumer, + position, + new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PollingSettings.createDefault(), + cursor + ); + } + + private KafkaConsumer createConsumer() { + final Map map = Map.of( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), + ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1, // to check multiple polls + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class + ); + Properties props = new Properties(); + props.putAll(map); + return new KafkaConsumer<>(props); + } + + private static ConsumerRecordDeserializer createRecordsDeserializer() { + Serde s = new StringSerde(); + s.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty()); + return new ConsumerRecordDeserializer( + StringSerde.name(), + s.deserializer(null, Serde.Target.KEY), + StringSerde.name(), + s.deserializer(null, Serde.Target.VALUE), + StringSerde.name(), + s.deserializer(null, Serde.Target.KEY), + s.deserializer(null, Serde.Target.VALUE) + ); + } + +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fbaa0748bdb..fcc94fd5df3 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -1,5 +1,7 @@ package com.provectus.kafka.ui.service; +import static org.assertj.core.api.Assertions.assertThat; + import com.provectus.kafka.ui.AbstractIntegrationTest; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.model.ConsumerPosition; @@ -10,11 +12,17 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serdes.builtin.StringSerde; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.clients.admin.NewTopic; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import org.springframework.beans.factory.annotation.Autowired; import reactor.core.publisher.Flux; import reactor.test.StepVerifier; @@ -29,6 +37,8 @@ class MessagesServiceTest extends AbstractIntegrationTest { KafkaCluster cluster; + Set createdTopics = new HashSet<>(); + @BeforeEach void init() { cluster = applicationContext @@ -37,6 +47,11 @@ void init() { .get(); } + @AfterEach + void deleteCreatedTopics() { + createdTopics.forEach(MessagesServiceTest::deleteTopic); + } + @Test void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of())) @@ -64,31 +79,84 @@ void loadMessagesReturnsExceptionWhenTopicNotFound() { @Test void maskingAppliedOnConfiguredClusters() throws Exception { String testTopic = MASKED_TOPICS_PREFIX + UUID.randomUUID(); + createTopicWithCleanup(new NewTopic(testTopic, 1, (short) 1)); + try (var producer = KafkaTestProducer.forKafka(kafka)) { - createTopic(new NewTopic(testTopic, 1, (short) 1)); producer.send(testTopic, "message1"); producer.send(testTopic, "message2").get(); + } + + Flux msgsFlux = messagesService.loadMessages( + cluster, + testTopic, + new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), + null, + null, + 100, + StringSerde.name(), + StringSerde.name() + ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(TopicMessageEventDTO::getMessage); + + // both messages should be masked + StepVerifier.create(msgsFlux) + .expectNextMatches(msg -> msg.getContent().equals("***")) + .expectNextMatches(msg -> msg.getContent().equals("***")) + .verifyComplete(); + } + + @ParameterizedTest + @CsvSource({"EARLIEST", "LATEST"}) + void cursorIsRegisteredAfterPollingIsDoneAndCanBeUsedForNextPagePolling(PollingModeDTO mode) { + String testTopic = MessagesServiceTest.class.getSimpleName() + UUID.randomUUID(); + createTopicWithCleanup(new NewTopic(testTopic, 5, (short) 1)); - Flux msgsFlux = messagesService.loadMessages( - cluster, - testTopic, - new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null), - null, - null, - 100, - StringSerde.name(), - StringSerde.name() - ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) - .map(TopicMessageEventDTO::getMessage); - - // both messages should be masked - StepVerifier.create(msgsFlux) - .expectNextMatches(msg -> msg.getContent().equals("***")) - .expectNextMatches(msg -> msg.getContent().equals("***")) - .verifyComplete(); - } finally { - deleteTopic(testTopic); + int msgsToGenerate = 100; + int pageSize = (msgsToGenerate / 2) + 1; + + try (var producer = KafkaTestProducer.forKafka(kafka)) { + for (int i = 0; i < msgsToGenerate; i++) { + producer.send(testTopic, "message_" + i); + } } + + var cursorIdCatcher = new AtomicReference(); + Flux msgsFlux = messagesService.loadMessages( + cluster, testTopic, + new ConsumerPosition(mode, testTopic, List.of(), null, null), + null, null, pageSize, StringSerde.name(), StringSerde.name()) + .doOnNext(evt -> { + if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) { + assertThat(evt.getCursor()).isNotNull(); + cursorIdCatcher.set(evt.getCursor().getId()); + } + }) + .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(evt -> evt.getMessage().getContent()); + + StepVerifier.create(msgsFlux) + .expectNextCount(pageSize) + .verifyComplete(); + + assertThat(cursorIdCatcher.get()).isNotNull(); + + Flux remainingMsgs = messagesService.loadMessages(cluster, testTopic, cursorIdCatcher.get()) + .doOnNext(evt -> { + if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) { + assertThat(evt.getCursor()).isNull(); + } + }) + .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) + .map(evt -> evt.getMessage().getContent()); + + StepVerifier.create(remainingMsgs) + .expectNextCount(msgsToGenerate - pageSize) + .verifyComplete(); + } + + private void createTopicWithCleanup(NewTopic newTopic) { + createTopic(newTopic); + createdTopics.add(newTopic.name()); } } From f0f4bf682391a21c668e98a3f961988cbe265d6f Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:40:49 +0400 Subject: [PATCH 11/29] new tests --- .../kafka/ui/emitter/CursorTest.java | 105 +++++++++++------- 1 file changed, 63 insertions(+), 42 deletions(-) diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index e36be49320d..cd849bcfd92 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -31,13 +31,13 @@ import reactor.core.publisher.Flux; import reactor.test.StepVerifier; - -public class CursorTest extends AbstractIntegrationTest { +class CursorTest extends AbstractIntegrationTest { static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID(); - static final int MSGS_IN_PARTITION = 20; - static final int PAGE_SIZE = 11; + static final int PAGE_SIZE = (MSGS_IN_PARTITION / 2) + 1; //to poll fill data set in 2 iterations + + final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); @BeforeAll static void setup() { @@ -56,56 +56,45 @@ static void cleanup() { @Test void backwardEmitter() { - var cursorsStorage = new PollingCursorsStorage(); var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null); - - var cursor = new Cursor.Tracking( - createRecordsDeserializer(), - consumerPosition, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); - - var emitter = createBackwardEmitter(consumerPosition, cursor); - verifyMessagesEmitted(emitter); - assertCursor( - cursorsStorage, + var emitter = createBackwardEmitter(consumerPosition); + emitMessages(emitter, PAGE_SIZE); + var cursor = assertCursor( PollingModeDTO.TO_OFFSET, offsets -> assertThat(offsets) .hasSize(1) .containsEntry(new TopicPartition(TOPIC, 0), 9L) ); + + // polling remaining records using registered cursor + emitter = createBackwardEmitterWithCursor(cursor); + emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE); + //checking no new cursors registered + assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor); } @Test void forwardEmitter() { - var cursorsStorage = new PollingCursorsStorage(); var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null); - - var cursor = new Cursor.Tracking( - createRecordsDeserializer(), - consumerPosition, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); - - var emitter = createForwardEmitter(consumerPosition, cursor); - verifyMessagesEmitted(emitter); - assertCursor( - cursorsStorage, + var emitter = createForwardEmitter(consumerPosition); + emitMessages(emitter, PAGE_SIZE); + var cursor = assertCursor( PollingModeDTO.FROM_OFFSET, offsets -> assertThat(offsets) .hasSize(1) .containsEntry(new TopicPartition(TOPIC, 0), 11L) ); + + //polling remaining records using registered cursor + emitter = createForwardEmitterWithCursor(cursor); + emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE); + //checking no new cursors registered + assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor); } - private void assertCursor(PollingCursorsStorage storage, - PollingModeDTO expectedMode, - Consumer> offsetsAssert) { - Cursor registeredCursor = storage.asMap().values().stream().findFirst().orElse(null); + private Cursor assertCursor(PollingModeDTO expectedMode, + Consumer> offsetsAssert) { + Cursor registeredCursor = cursorsStorage.asMap().values().stream().findFirst().orElse(null); assertThat(registeredCursor).isNotNull(); assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE); assertThat(registeredCursor.deserializer()).isNotNull(); @@ -118,36 +107,68 @@ private void assertCursor(PollingCursorsStorage storage, assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode); offsetsAssert.accept(cursorPosition.offsets().tpOffsets()); + return registeredCursor; } - private void verifyMessagesEmitted(AbstractEmitter emitter) { + private void emitMessages(AbstractEmitter emitter, int expectedCnt) { StepVerifier.create( Flux.create(emitter) .filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) .map(e -> e.getMessage().getContent()) ) - .expectNextCount(PAGE_SIZE) + .expectNextCount(expectedCnt) .verifyComplete(); } - private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position) { return new BackwardRecordEmitter( this::createConsumer, position, PAGE_SIZE, new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), PollingSettings.createDefault(), - cursor + createCursor(position) ); } - private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position, Cursor.Tracking cursor) { + private BackwardRecordEmitter createBackwardEmitterWithCursor(Cursor cursor) { + return new BackwardRecordEmitter( + this::createConsumer, + cursor.consumerPosition(), + cursor.limit(), + new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + PollingSettings.createDefault(), + createCursor(cursor.consumerPosition()) + ); + } + + private ForwardRecordEmitter createForwardEmitterWithCursor(Cursor cursor) { + return new ForwardRecordEmitter( + this::createConsumer, + cursor.consumerPosition(), + new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + PollingSettings.createDefault(), + createCursor(cursor.consumerPosition()) + ); + } + + private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position) { return new ForwardRecordEmitter( this::createConsumer, position, new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), PollingSettings.createDefault(), - cursor + createCursor(position) + ); + } + + private Cursor.Tracking createCursor(ConsumerPosition position) { + return new Cursor.Tracking( + createRecordsDeserializer(), + position, + m -> true, + PAGE_SIZE, + cursorsStorage::register ); } From fc999212ce74f39c98eaa0c038fbbf5f1f163caa Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 24 Apr 2023 23:54:14 +0400 Subject: [PATCH 12/29] minor improvements --- .../provectus/kafka/ui/emitter/BackwardRecordEmitter.java | 7 +------ .../com/provectus/kafka/ui/service/MessagesService.java | 2 +- .../provectus/kafka/ui/service/PollingCursorsStorage.java | 4 +++- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java index ceb77d3b54e..4b017b41431 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java @@ -78,12 +78,7 @@ public void accept(FluxSink sink) { log.debug("sink is cancelled after partitions poll iteration"); } } - sendFinishStatsAndCompleteSink( - sink, - readUntilOffsets.isEmpty() - ? null - : cursor - ); + sendFinishStatsAndCompleteSink(sink, readUntilOffsets.isEmpty() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index b83da33bd74..09007babdeb 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -67,7 +67,7 @@ public class MessagesService { private final int defaultPageSize; private final Cache> registeredFilters = CacheBuilder.newBuilder() - .maximumSize(5_000) + .maximumSize(PollingCursorsStorage.MAX_SIZE) .build(); private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index 30f6ef92e03..654a6dd4bee 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -10,8 +10,10 @@ public class PollingCursorsStorage { + public static final int MAX_SIZE = 10_000; + private final Cache cursorsCache = CacheBuilder.newBuilder() - .maximumSize(10_000) + .maximumSize(MAX_SIZE) .build(); public Optional getCursor(String id) { From 68ee46d7897c9d2e603ad925eefdf65a17b15cdc Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 11 Jul 2023 12:03:32 +0400 Subject: [PATCH 13/29] PR comments fx --- .../java/com/provectus/kafka/ui/model/ConsumerPosition.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java index 6d09b20b3a5..51f4e51f7c6 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java @@ -1,5 +1,6 @@ package com.provectus.kafka.ui.model; +import com.google.common.base.Preconditions; import com.provectus.kafka.ui.exception.ValidationException; import java.util.List; import java.util.Map; @@ -14,9 +15,12 @@ public record ConsumerPosition(PollingModeDTO pollingMode, @Nullable Long timestamp, @Nullable Offsets offsets) { - // one of properties will be null public record Offsets(@Nullable Long offset, //should be applied to all partitions @Nullable Map tpOffsets) { + public Offsets { + // only one of properties should be set + Preconditions.checkArgument((offset == null && tpOffsets != null) || (offset != null && tpOffsets == null)); + } } public static ConsumerPosition create(PollingModeDTO pollingMode, From 5e5ac5ff8675ca3f8367a495141d265880eaa1a6 Mon Sep 17 00:00:00 2001 From: iliax Date: Tue, 1 Aug 2023 15:51:37 +0400 Subject: [PATCH 14/29] master merge --- .../ui/controller/MessagesController.java | 9 +- .../kafka/ui/service/MessagesService.java | 110 ++++++++++++------ .../kafka/ui/service/MessagesServiceTest.java | 3 + 3 files changed, 86 insertions(+), 36 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index fed0544fff6..d9c4016ee6f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -95,11 +95,12 @@ public Mono>> getTopicMessagesV2(Strin String valueSerde, String cursor, ServerWebExchange exchange) { - final Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + var context = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) - .build()); + .operationName("getTopicMessages") + .build(); Flux messagesFlux; if (cursor != null) { @@ -116,7 +117,9 @@ public Mono>> getTopicMessagesV2(Strin valueSerde ); } - return validateAccess.then(Mono.just(ResponseEntity.ok(messagesFlux))); + return accessControlService.validateAccess(context) + .then(Mono.just(ResponseEntity.ok(messagesFlux))) + .doOnEach(sig -> auditService.audit(context, sig)); } @Override diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 09007babdeb..194cd81300f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -17,13 +17,20 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.SeekDirectionDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; import com.provectus.kafka.ui.util.SslPropertiesUtil; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.List; import java.util.Map; import java.util.Optional; @@ -94,6 +101,37 @@ private Mono withExistingTopic(KafkaCluster cluster, String to .switchIfEmpty(Mono.error(new TopicNotFoundException())); } + public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) { + Predicate predicate; + try { + predicate = MessageFilters.groovyScriptFilter(execData.getFilterCode()); + } catch (Exception e) { + log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e); + return new SmartFilterTestExecutionResultDTO() + .error("Compilation error : " + e.getMessage()); + } + try { + var result = predicate.test( + new TopicMessageDTO() + .key(execData.getKey()) + .content(execData.getValue()) + .headers(execData.getHeaders()) + .offset(execData.getOffset()) + .partition(execData.getPartition()) + .timestamp( + Optional.ofNullable(execData.getTimestampMs()) + .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC)) + .orElse(null)) + ); + return new SmartFilterTestExecutionResultDTO() + .result(result); + } catch (Exception e) { + log.info("Smart filter {} execution error", execData, e); + return new SmartFilterTestExecutionResultDTO() + .error("Execution error : " + e.getMessage()); + } + } + public Mono deleteTopicMessages(KafkaCluster cluster, String topicName, List partitionsToInclude) { return withExistingTopic(cluster, topicName) @@ -140,13 +178,7 @@ private Mono sendMessageImpl(KafkaCluster cluster, msg.getValueSerde().get() ); - Properties properties = new Properties(); - SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties); - properties.putAll(cluster.getProperties()); - properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); - properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - try (KafkaProducer producer = new KafkaProducer<>(properties)) { + try (KafkaProducer producer = createProducer(cluster, Map.of())) { ProducerRecord producerRecord = producerRecordCreator.create( topicDescription.name(), msg.getPartition(), @@ -168,34 +200,26 @@ private Mono sendMessageImpl(KafkaCluster cluster, } } - private int fixPageSize(@Nullable Integer pageSize) { - return Optional.ofNullable(pageSize) - .filter(ps -> ps > 0 && ps <= maxPageSize) - .orElse(defaultPageSize); - } - - private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { - var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); - var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); - return evt -> { - if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { - return evt; - } - return evt.message( - evt.getMessage() - .key(keyMasker.apply(evt.getMessage().getKey())) - .content(valMasker.apply(evt.getMessage().getContent()))); - }; + public static KafkaProducer createProducer(KafkaCluster cluster, + Map additionalProps) { + Properties properties = new Properties(); + SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties); + properties.putAll(cluster.getProperties()); + properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); + properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + properties.putAll(additionalProps); + return new KafkaProducer<>(properties); } public Flux loadMessages(KafkaCluster cluster, - String topic, - ConsumerPosition consumerPosition, - @Nullable String containsStringFilter, - @Nullable String filterId, - @Nullable Integer limit, - @Nullable String keySerde, - @Nullable String valueSerde) { + String topic, + ConsumerPosition consumerPosition, + @Nullable String containsStringFilter, + @Nullable String filterId, + @Nullable Integer limit, + @Nullable String keySerde, + @Nullable String valueSerde) { return loadMessages( cluster, topic, @@ -228,7 +252,7 @@ private Flux loadMessages(KafkaCluster cluster, return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, fixPageSize(limit))); + .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, limit)); } private Flux loadMessagesImpl(KafkaCluster cluster, @@ -271,6 +295,12 @@ private Flux loadMessagesImpl(KafkaCluster cluster, .map(throttleUiPublish(consumerPosition.pollingMode())); } + private int fixPageSize(@Nullable Integer pageSize) { + return Optional.ofNullable(pageSize) + .filter(ps -> ps > 0 && ps <= maxPageSize) + .orElse(defaultPageSize); + } + public String registerMessageFilter(String groovyCode) { String saltedCode = groovyCode + SALT_FOR_HASHING; String filterId = Hashing.sha256() @@ -283,6 +313,20 @@ public String registerMessageFilter(String groovyCode) { return filterId; } + private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { + var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); + var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); + return evt -> { + if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { + return evt; + } + return evt.message( + evt.getMessage() + .key(keyMasker.apply(evt.getMessage().getKey())) + .content(valMasker.apply(evt.getMessage().getContent()))); + }; + } + private Predicate getMsgFilter(@Nullable String containsStrFilter, @Nullable String smartFilterId) { Predicate messageFilter = MessageFilters.noop(); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java index fcc94fd5df3..d2f0990cb0f 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java @@ -1,5 +1,6 @@ package com.provectus.kafka.ui.service; +import static com.provectus.kafka.ui.service.MessagesService.execSmartFilterTest; import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.AbstractIntegrationTest; @@ -8,12 +9,14 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; import com.provectus.kafka.ui.model.PollingModeDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serdes.builtin.StringSerde; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; From 3b8548aa304d161bcfc1bf7f5a07d77d491250a0 Mon Sep 17 00:00:00 2001 From: iliax Date: Wed, 2 Aug 2023 12:47:42 +0400 Subject: [PATCH 15/29] master merge --- .../kafka/ui/emitter/AbstractEmitter.java | 1 + .../ui/emitter/ForwardRecordEmitter.java | 2 -- .../kafka/ui/emitter/PolledRecords.java | 5 +++++ .../kafka/ui/emitter/TailingEmitter.java | 16 +++++++-------- .../kafka/ui/emitter/CursorTest.java | 20 ++++++------------- 5 files changed, 19 insertions(+), 25 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 1b9a0efa064..7b387138a2c 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -1,6 +1,7 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import jakarta.annotation.Nullable; import java.time.Duration; import java.time.Instant; import javax.annotation.Nullable; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java index cd73aa08450..776140faa60 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java @@ -5,8 +5,6 @@ import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.utils.Bytes; diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java index bc6bd95d5f6..94169f1b634 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java @@ -3,6 +3,7 @@ import java.time.Duration; import java.util.Iterator; import java.util.List; +import java.util.Set; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.TopicPartition; @@ -32,6 +33,10 @@ public Iterator> iterator() { return records.iterator(); } + public Set partitions() { + return records.partitions(); + } + private static int calculatePolledRecSize(Iterable> recs) { int polledBytes = 0; for (ConsumerRecord rec : recs) { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index dee522b01ea..024e8ba399f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -2,25 +2,23 @@ import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.errors.InterruptException; -import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @Slf4j -public class TailingEmitter extends AbstractEmitter { +public class TailingEmitter extends AbstractEmitter + implements java.util.function.Consumer> { - private final Supplier> consumerSupplier; + private final Supplier consumerSupplier; private final ConsumerPosition consumerPosition; - public TailingEmitter(Supplier> consumerSupplier, + public TailingEmitter(Supplier consumerSupplier, ConsumerPosition consumerPosition, - ConsumerRecordDeserializer recordDeserializer, + MessagesProcessing messagesProcessing, PollingSettings pollingSettings) { - super(recordDeserializer, pollingSettings); + super(messagesProcessing, pollingSettings); this.consumerSupplier = consumerSupplier; this.consumerPosition = consumerPosition; } @@ -28,7 +26,7 @@ public TailingEmitter(Supplier> consumerSupplier, @Override public void accept(FluxSink sink) { log.debug("Starting tailing polling for {}", consumerPosition); - try (KafkaConsumer consumer = consumerSupplier.get()) { + try (EnhancedConsumer consumer = consumerSupplier.get()) { SeekOperations.create(consumer, consumerPosition) .assignAndSeek(); while (!sink.isCancelled()) { diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index cd849bcfd92..06dffac83f4 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -12,7 +12,7 @@ import com.provectus.kafka.ui.serdes.PropertyResolverImpl; import com.provectus.kafka.ui.serdes.builtin.StringSerde; import com.provectus.kafka.ui.service.PollingCursorsStorage; -import java.io.Serializable; +import com.provectus.kafka.ui.util.ApplicationMetrics; import java.util.List; import java.util.Map; import java.util.Properties; @@ -20,11 +20,8 @@ import java.util.function.Consumer; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -172,17 +169,12 @@ private Cursor.Tracking createCursor(ConsumerPosition position) { ); } - private KafkaConsumer createConsumer() { - final Map map = Map.of( - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), - ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), - ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1, // to check multiple polls - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class - ); + private EnhancedConsumer createConsumer() { Properties props = new Properties(); - props.putAll(map); - return new KafkaConsumer<>(props); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers()); + props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1); // to check multiple polls + return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop()); } private static ConsumerRecordDeserializer createRecordsDeserializer() { From 6a62fb87c61be52e84d407abf8852af9d172e992 Mon Sep 17 00:00:00 2001 From: iliax Date: Mon, 21 Aug 2023 17:40:52 +0400 Subject: [PATCH 16/29] merged with master --- .../ui/controller/MessagesController.java | 53 +++++---- .../kafka/ui/emitter/AbstractEmitter.java | 39 ++----- .../kafka/ui/emitter/BackwardEmitter.java | 13 +-- .../kafka/ui/emitter/ConsumingStats.java | 41 +++---- .../provectus/kafka/ui/emitter/Cursor.java | 41 +++++-- .../kafka/ui/emitter/ForwardEmitter.java | 13 +-- .../kafka/ui/emitter/MessagesProcessing.java | 103 ++++++++++++------ .../kafka/ui/emitter/RangePollingEmitter.java | 13 ++- .../kafka/ui/emitter/SeekOperations.java | 7 +- .../kafka/ui/emitter/TailingEmitter.java | 24 ++-- .../kafka/ui/service/MessagesService.java | 86 ++++++--------- .../ui/service/PollingCursorsStorage.java | 12 ++ .../service/analyze/TopicAnalysisService.java | 6 +- .../kafka/ui/emitter/CursorTest.java | 41 +++---- .../kafka/ui/service/RecordEmitterTest.java | 96 +++++++++------- 15 files changed, 324 insertions(+), 264 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index d9c4016ee6f..709efcdc185 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -16,13 +16,15 @@ import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SeekTypeDTO; import com.provectus.kafka.ui.model.SerdeUsageDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; +import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO; import com.provectus.kafka.ui.model.rbac.AccessContext; +import com.provectus.kafka.ui.model.rbac.permission.AuditAction; import com.provectus.kafka.ui.model.rbac.permission.TopicAction; import com.provectus.kafka.ui.service.DeserializationService; import com.provectus.kafka.ui.service.MessagesService; -import com.provectus.kafka.ui.service.rbac.AccessControlService; import java.util.List; import java.util.Optional; import javax.validation.Valid; @@ -43,26 +45,33 @@ public class MessagesController extends AbstractController implements MessagesAp private final MessagesService messagesService; private final DeserializationService deserializationService; - private final AccessControlService accessControlService; @Override public Mono> deleteTopicMessages( String clusterName, String topicName, @Valid List partitions, ServerWebExchange exchange) { - Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + var context = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_DELETE) - .build()); + .build(); - return validateAccess.then( + return validateAccess(context).>then( messagesService.deleteTopicMessages( getCluster(clusterName), topicName, Optional.ofNullable(partitions).orElse(List.of()) ).thenReturn(ResponseEntity.ok().build()) - ); + ).doOnEach(sig -> audit(context, sig)); + } + + @Override + public Mono> executeSmartFilterTest( + Mono smartFilterTestExecutionDto, ServerWebExchange exchange) { + return smartFilterTestExecutionDto + .map(MessagesService::execSmartFilterTest) + .map(ResponseEntity::ok); } @Deprecated @@ -95,12 +104,17 @@ public Mono>> getTopicMessagesV2(Strin String valueSerde, String cursor, ServerWebExchange exchange) { - var context = AccessContext.builder() + var contextBuilder = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_READ) - .operationName("getTopicMessages") - .build(); + .operationName("getTopicMessages"); + + if (auditService.isAuditTopic(getCluster(clusterName), topicName)) { + contextBuilder.auditActions(AuditAction.VIEW); + } + + var accessContext = contextBuilder.build(); Flux messagesFlux; if (cursor != null) { @@ -117,9 +131,9 @@ public Mono>> getTopicMessagesV2(Strin valueSerde ); } - return accessControlService.validateAccess(context) + return accessControlService.validateAccess(accessContext) .then(Mono.just(ResponseEntity.ok(messagesFlux))) - .doOnEach(sig -> auditService.audit(context, sig)); + .doOnEach(sig -> auditService.audit(accessContext, sig)); } @Override @@ -127,17 +141,18 @@ public Mono> sendTopicMessages( String clusterName, String topicName, @Valid Mono createTopicMessage, ServerWebExchange exchange) { - Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + var context = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(MESSAGES_PRODUCE) - .build()); + .operationName("sendTopicMessages") + .build(); - return validateAccess.then( + return validateAccess(context).then( createTopicMessage.flatMap(msg -> messagesService.sendMessage(getCluster(clusterName), topicName, msg).then() ).map(ResponseEntity::ok) - ); + ).doOnEach(sig -> audit(context, sig)); } @Override @@ -145,12 +160,12 @@ public Mono> getSerdes(String clusterNam String topicName, SerdeUsageDTO use, ServerWebExchange exchange) { - - Mono validateAccess = accessControlService.validateAccess(AccessContext.builder() + var context = AccessContext.builder() .cluster(clusterName) .topic(topicName) .topicActions(TopicAction.VIEW) - .build()); + .operationName("getSerdes") + .build(); TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO() .key(use == SerdeUsageDTO.SERIALIZE @@ -160,7 +175,7 @@ public Mono> getSerdes(String clusterNam ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE) : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE)); - return validateAccess.then( + return validateAccess(context).then( Mono.just(dto) .subscribeOn(Schedulers.boundedElastic()) .map(ResponseEntity::ok) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java index 7b387138a2c..21ef0b43adb 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java @@ -2,39 +2,23 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO; import jakarta.annotation.Nullable; -import java.time.Duration; -import java.time.Instant; -import javax.annotation.Nullable; -import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; -public abstract class AbstractEmitter implements java.util.function.Consumer> { +abstract class AbstractEmitter implements java.util.function.Consumer> { private final MessagesProcessing messagesProcessing; - private final PollingThrottler throttler; - protected final PollingSettings pollingSettings; + private final PollingSettings pollingSettings; protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) { this.messagesProcessing = messagesProcessing; this.pollingSettings = pollingSettings; - this.throttler = pollingSettings.getPollingThrottler(); } - protected ConsumerRecords poll( - FluxSink sink, Consumer consumer) { - return poll(sink, consumer, pollingSettings.getPollTimeout()); - } - - protected ConsumerRecords poll( - FluxSink sink, Consumer consumer, Duration timeout) { - Instant start = Instant.now(); - ConsumerRecords records = consumer.poll(timeout); - Instant finish = Instant.now(); - int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis()); - throttler.throttleAfterPoll(polledBytes); + protected PolledRecords poll(FluxSink sink, EnhancedConsumer consumer) { + var records = consumer.pollEnhanced(pollingSettings.getPollTimeout()); + sendConsuming(sink, records); return records; } @@ -42,19 +26,18 @@ protected boolean isSendLimitReached() { return messagesProcessing.limitReached(); } - protected void sendMessage(FluxSink sink, - ConsumerRecord msg) { - messagesProcessing.sendMsg(sink, msg); + protected void send(FluxSink sink, + Iterable> records, + @Nullable Cursor.Tracking cursor) { + messagesProcessing.send(sink, records, cursor); } protected void sendPhase(FluxSink sink, String name) { messagesProcessing.sendPhase(sink, name); } - protected int sendConsuming(FluxSink sink, - ConsumerRecords records, - long elapsed) { - return messagesProcessing.sentConsumingInfo(sink, records, elapsed); + protected void sendConsuming(FluxSink sink, PolledRecords records) { + messagesProcessing.sentConsumingInfo(sink, records); } // cursor is null if target partitions were fully polled (no, need to do paging) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java index cdc45336e46..75aa21bdf83 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java @@ -18,18 +18,15 @@ public BackwardEmitter(Supplier consumerSupplier, int messagesPerPage, ConsumerRecordDeserializer deserializer, Predicate filter, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super( consumerSupplier, consumerPosition, messagesPerPage, - new MessagesProcessing( - deserializer, - filter, - false, - messagesPerPage - ), - pollingSettings + new MessagesProcessing(deserializer, filter, false, messagesPerPage), + pollingSettings, + cursor ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java index b4ed63dafa4..17b519434b4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java @@ -3,10 +3,7 @@ import com.provectus.kafka.ui.model.TopicMessageConsumingDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO; -import com.provectus.kafka.ui.util.ConsumerRecordsUtil; import javax.annotation.Nullable; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; class ConsumingStats { @@ -14,27 +11,24 @@ class ConsumingStats { private long bytes = 0; private int records = 0; private long elapsed = 0; + private int filterApplyErrors = 0; - /** - * returns bytes polled. - */ - int sendConsumingEvt(FluxSink sink, - ConsumerRecords polledRecords, - long elapsed, - int filterApplyErrors) { - int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords); - bytes += polledBytes; - this.records += polledRecords.count(); - this.elapsed += elapsed; + void sendConsumingEvt(FluxSink sink, PolledRecords polledRecords) { + bytes += polledRecords.bytes(); + records += polledRecords.count(); + elapsed += polledRecords.elapsed().toMillis(); sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.CONSUMING) - .consuming(createConsumingStats(sink, filterApplyErrors)) + .consuming(createConsumingStats()) ); - return polledBytes; } - void sendFinishEvent(FluxSink sink, int filterApplyErrors, @Nullable Cursor.Tracking cursor) { + void incFilterApplyError() { + filterApplyErrors++; + } + + void sendFinishEvent(FluxSink sink, @Nullable Cursor.Tracking cursor) { sink.next( new TopicMessageEventDTO() .type(TopicMessageEventDTO.TypeEnum.DONE) @@ -43,17 +37,16 @@ void sendFinishEvent(FluxSink sink, int filterApplyErrors, ? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor()) : null ) - .consuming(createConsumingStats(sink, filterApplyErrors)) + .consuming(createConsumingStats()) ); } - private TopicMessageConsumingDTO createConsumingStats(FluxSink sink, - int filterApplyErrors) { + private TopicMessageConsumingDTO createConsumingStats() { return new TopicMessageConsumingDTO() - .bytesConsumed(this.bytes) - .elapsedMs(this.elapsed) - .isCancelled(sink.isCancelled()) + .bytesConsumed(bytes) + .elapsedMs(elapsed) + .isCancelled(false) .filterApplyErrors(filterApplyErrors) - .messagesConsumed(this.records); + .messagesConsumed(records); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java index d78a583e829..f0fd135bacf 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java @@ -1,5 +1,7 @@ package com.provectus.kafka.ui.emitter; +import com.google.common.collect.HashBasedTable; +import com.google.common.collect.Table; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.PollingModeDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; @@ -20,32 +22,41 @@ public static class Tracking { private final ConsumerPosition originalPosition; private final Predicate filter; private final int limit; - private final Function cursorRegistry; + private final Function registerAction; - private final Map trackingOffsets = new HashMap<>(); + //topic -> partition -> offset + private final Table trackingOffsets = HashBasedTable.create(); public Tracking(ConsumerRecordDeserializer deserializer, ConsumerPosition originalPosition, Predicate filter, int limit, - Function cursorRegistry) { + Function registerAction) { this.deserializer = deserializer; this.originalPosition = originalPosition; this.filter = filter; this.limit = limit; - this.cursorRegistry = cursorRegistry; + this.registerAction = registerAction; } - void trackOffset(TopicPartition tp, long offset) { - trackingOffsets.put(tp, offset); + void trackOffset(String topic, int partition, long offset) { + trackingOffsets.put(topic, partition, offset); } - void trackOffsets(Map offsets) { - this.trackingOffsets.putAll(offsets); + void initOffsets(Map initialSeekOffsets) { + initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off)); + } + + private Map getOffsetsMap(int offsetToAdd) { + Map result = new HashMap<>(); + trackingOffsets.rowMap() + .forEach((topic, partsMap) -> + partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd))); + return result; } String registerCursor() { - return cursorRegistry.apply( + return registerAction.apply( new Cursor( deserializer, new ConsumerPosition( @@ -57,7 +68,17 @@ String registerCursor() { originalPosition.topic(), originalPosition.partitions(), null, - new ConsumerPosition.Offsets(null, trackingOffsets) + new ConsumerPosition.Offsets( + null, + getOffsetsMap( + switch (originalPosition.pollingMode()) { + case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0; + // when doing forward polling we need to start from latest msg's offset + 1 + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1; + case TAILING -> throw new IllegalStateException(); + } + ) + ) ), filter, limit diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java index 5c915fb2e8c..6627bc45c10 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java @@ -18,18 +18,15 @@ public ForwardEmitter(Supplier consumerSupplier, int messagesPerPage, ConsumerRecordDeserializer deserializer, Predicate filter, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super( consumerSupplier, consumerPosition, messagesPerPage, - new MessagesProcessing( - deserializer, - filter, - true, - messagesPerPage - ), - pollingSettings + new MessagesProcessing(deserializer, filter, true, messagesPerPage), + pollingSettings, + cursor ); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java index 59848fac042..8b8332e0398 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java @@ -1,71 +1,80 @@ package com.provectus.kafka.ui.emitter; +import static java.util.stream.Collectors.collectingAndThen; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toList; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Iterables; +import com.google.common.collect.Streams; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicMessagePhaseDTO; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; import java.util.function.Predicate; import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.utils.Bytes; import reactor.core.publisher.FluxSink; @Slf4j -public class MessagesProcessing { +@RequiredArgsConstructor +class MessagesProcessing { private final ConsumingStats consumingStats = new ConsumingStats(); private long sentMessages = 0; - private int filterApplyErrors = 0; private final ConsumerRecordDeserializer deserializer; private final Predicate filter; + private final boolean ascendingSortBeforeSend; private final @Nullable Integer limit; - public MessagesProcessing(ConsumerRecordDeserializer deserializer, - Predicate filter, - @Nullable Integer limit) { - this.deserializer = deserializer; - this.filter = filter; - this.limit = limit; - } - boolean limitReached() { return limit != null && sentMessages >= limit; } - void sendMsg(FluxSink sink, ConsumerRecord rec) { - if (!sink.isCancelled() && !limitReached()) { - TopicMessageDTO topicMessage = deserializer.deserialize(rec); - try { - if (filter.test(topicMessage)) { - sink.next( - new TopicMessageEventDTO() - .type(TopicMessageEventDTO.TypeEnum.MESSAGE) - .message(topicMessage) - ); - sentMessages++; - } - } catch (Exception e) { - filterApplyErrors++; - log.trace("Error applying filter for message {}", topicMessage); - } - } + void send(FluxSink sink, + Iterable> polled, + @Nullable Cursor.Tracking cursor) { + sortForSending(polled, ascendingSortBeforeSend) + .forEach(rec -> { + if (!limitReached() && !sink.isCancelled()) { + TopicMessageDTO topicMessage = deserializer.deserialize(rec); + try { + if (filter.test(topicMessage)) { + sink.next( + new TopicMessageEventDTO() + .type(TopicMessageEventDTO.TypeEnum.MESSAGE) + .message(topicMessage) + ); + sentMessages++; + } + if (cursor != null) { + cursor.trackOffset(rec.topic(), rec.partition(), rec.offset()); + } + } catch (Exception e) { + consumingStats.incFilterApplyError(); + log.trace("Error applying filter for message {}", topicMessage); + } + } + }); } - int sentConsumingInfo(FluxSink sink, - ConsumerRecords polledRecords, - long elapsed) { + void sentConsumingInfo(FluxSink sink, PolledRecords polledRecords) { if (!sink.isCancelled()) { - return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors); + consumingStats.sendConsumingEvt(sink, polledRecords); } - return 0; } void sendFinishEvents(FluxSink sink, @Nullable Cursor.Tracking cursor) { if (!sink.isCancelled()) { - consumingStats.sendFinishEvent(sink, filterApplyErrors, cursor); + consumingStats.sendFinishEvent(sink, cursor); } } @@ -79,4 +88,30 @@ void sendPhase(FluxSink sink, String name) { } } + /* + * Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets. + */ + @VisibleForTesting + static Iterable> sortForSending(Iterable> records, + boolean asc) { + Comparator offsetComparator = asc + ? Comparator.comparingLong(ConsumerRecord::offset) + : Comparator.comparingLong(ConsumerRecord::offset).reversed(); + + // partition -> sorted by offsets records + Map>> perPartition = Streams.stream(records) + .collect( + groupingBy( + ConsumerRecord::partition, + TreeMap::new, + collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList()))); + + Comparator tsComparator = asc + ? Comparator.comparing(ConsumerRecord::timestamp) + : Comparator.comparingLong(ConsumerRecord::timestamp).reversed(); + + // merge-sorting records from partitions one by one using timestamp comparator + return Iterables.mergeSorted(perPartition.values(), tsComparator); + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java index af6dc7d0693..8abcd4772e4 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java @@ -17,6 +17,7 @@ abstract class RangePollingEmitter extends AbstractEmitter { private final Supplier consumerSupplier; + private final Cursor.Tracking cursor; protected final ConsumerPosition consumerPosition; protected final int messagesPerPage; @@ -24,11 +25,13 @@ protected RangePollingEmitter(Supplier consumerSupplier, ConsumerPosition consumerPosition, int messagesPerPage, MessagesProcessing messagesProcessing, - PollingSettings pollingSettings) { + PollingSettings pollingSettings, + Cursor.Tracking cursor) { super(messagesProcessing, pollingSettings); this.consumerPosition = consumerPosition; this.messagesPerPage = messagesPerPage; this.consumerSupplier = consumerSupplier; + this.cursor = cursor; } protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) { @@ -46,18 +49,20 @@ public void accept(FluxSink sink) { try (EnhancedConsumer consumer = consumerSupplier.get()) { sendPhase(sink, "Consumer created"); var seekOperations = SeekOperations.create(consumer, consumerPosition); + cursor.initOffsets(seekOperations.getOffsetsForSeek()); + TreeMap pollRange = nextPollingRange(new TreeMap<>(), seekOperations); log.debug("Starting from offsets {}", pollRange); - while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) { + while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) { var polled = poll(consumer, sink, pollRange); - send(sink, polled); + send(sink, polled, cursor); pollRange = nextPollingRange(pollRange, seekOperations); } if (sink.isCancelled()) { log.debug("Polling finished due to sink cancellation"); } - sendFinishStatsAndCompleteSink(sink); + sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor); log.debug("Polling finished"); } catch (InterruptException kafkaInterruptException) { log.debug("Polling finished due to thread interruption"); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java index f10be11c2d3..e02889d288e 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java @@ -20,7 +20,7 @@ class SeekOperations { private final OffsetsInfo offsetsInfo; private final Map offsetsForSeek; //only contains non-empty partitions! - static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { + public static SeekOperations create(Consumer consumer, ConsumerPosition consumerPosition) { OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty() ? new OffsetsInfo(consumer, consumerPosition.topic()) : new OffsetsInfo(consumer, consumerPosition.partitions()); @@ -28,7 +28,7 @@ static SeekOperations create(Consumer consumer, ConsumerPosition consumerP return new SeekOperations(consumer, offsetsInfo, offsetsToSeek); } - void assignAndSeek() { + public void assignAndSeekNonEmptyPartitions() { consumer.assign(offsetsForSeek.keySet()); offsetsForSeek.forEach(consumer::seek); } @@ -69,8 +69,7 @@ private static Map fixOffsets(OffsetsInfo offsetsInfo, if (positionOffset.offset() != null) { offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset())); } else { - requireNonNull(positionOffset.tpOffsets()); - offsets.putAll(positionOffset.tpOffsets()); + offsets.putAll(requireNonNull(positionOffset.tpOffsets())); offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions()); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java index 024e8ba399f..dd73f743710 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java @@ -1,24 +1,28 @@ package com.provectus.kafka.ui.emitter; import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; +import java.util.HashMap; +import java.util.function.Predicate; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.common.errors.InterruptException; import reactor.core.publisher.FluxSink; @Slf4j -public class TailingEmitter extends AbstractEmitter - implements java.util.function.Consumer> { +public class TailingEmitter extends AbstractEmitter { private final Supplier consumerSupplier; private final ConsumerPosition consumerPosition; public TailingEmitter(Supplier consumerSupplier, ConsumerPosition consumerPosition, - MessagesProcessing messagesProcessing, + ConsumerRecordDeserializer deserializer, + Predicate filter, PollingSettings pollingSettings) { - super(messagesProcessing, pollingSettings); + super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings); this.consumerSupplier = consumerSupplier; this.consumerPosition = consumerPosition; } @@ -27,12 +31,11 @@ public TailingEmitter(Supplier consumerSupplier, public void accept(FluxSink sink) { log.debug("Starting tailing polling for {}", consumerPosition); try (EnhancedConsumer consumer = consumerSupplier.get()) { - SeekOperations.create(consumer, consumerPosition) - .assignAndSeek(); + assignAndSeek(consumer); while (!sink.isCancelled()) { sendPhase(sink, "Polling"); var polled = poll(sink, consumer); - polled.forEach(r -> sendMessage(sink, r)); + send(sink, polled, null); } sink.complete(); log.debug("Tailing finished"); @@ -45,4 +48,11 @@ public void accept(FluxSink sink) { } } + private void assignAndSeek(EnhancedConsumer consumer) { + var seekOperations = SeekOperations.create(consumer, consumerPosition); + var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end + seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions + consumer.assign(seekOffsets.keySet()); + seekOffsets.forEach(consumer::seek); + } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java index 194cd81300f..b14b885c56a 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -6,25 +6,21 @@ import com.google.common.hash.Hashing; import com.google.common.util.concurrent.RateLimiter; import com.provectus.kafka.ui.config.ClustersProperties; -import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.BackwardEmitter; import com.provectus.kafka.ui.emitter.Cursor; -import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; +import com.provectus.kafka.ui.emitter.ForwardEmitter; import com.provectus.kafka.ui.emitter.MessageFilters; -import com.provectus.kafka.ui.emitter.MessagesProcessing; import com.provectus.kafka.ui.emitter.TailingEmitter; import com.provectus.kafka.ui.exception.TopicNotFoundException; import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.MessageFilterTypeDTO; import com.provectus.kafka.ui.model.PollingModeDTO; -import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO; import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO; import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.ProducerRecordCreator; import com.provectus.kafka.ui.util.SslPropertiesUtil; @@ -252,81 +248,45 @@ private Flux loadMessages(KafkaCluster cluster, return withExistingTopic(cluster, topic) .flux() .publishOn(Schedulers.boundedElastic()) - .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, limit)); + .flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit)); } private Flux loadMessagesImpl(KafkaCluster cluster, - String topic, ConsumerRecordDeserializer deserializer, ConsumerPosition consumerPosition, Predicate filter, int limit) { - var processing = new MessagesProcessing( - deserializer, - filter, - consumerPosition.pollingMode() == PollingModeDTO.TAILING ? null : limit - ); - var emitter = switch (consumerPosition.pollingMode()) { - case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter( + case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, limit, - processing, + deserializer, + filter, cluster.getPollingSettings(), - new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) + cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit) ); - case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter( + case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - processing, + limit, + deserializer, + filter, cluster.getPollingSettings(), - new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register) + cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit) ); case TAILING -> new TailingEmitter( () -> consumerGroupService.createConsumer(cluster), consumerPosition, - processing, + deserializer, + filter, cluster.getPollingSettings() ); }; return Flux.create(emitter) - .map(getDataMasker(cluster, topic)) .map(throttleUiPublish(consumerPosition.pollingMode())); } - private int fixPageSize(@Nullable Integer pageSize) { - return Optional.ofNullable(pageSize) - .filter(ps -> ps > 0 && ps <= maxPageSize) - .orElse(defaultPageSize); - } - - public String registerMessageFilter(String groovyCode) { - String saltedCode = groovyCode + SALT_FOR_HASHING; - String filterId = Hashing.sha256() - .hashString(saltedCode, Charsets.UTF_8) - .toString() - .substring(0, 8); - if (registeredFilters.getIfPresent(filterId) == null) { - registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode)); - } - return filterId; - } - - private UnaryOperator getDataMasker(KafkaCluster cluster, String topicName) { - var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY); - var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE); - return evt -> { - if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) { - return evt; - } - return evt.message( - evt.getMessage() - .key(keyMasker.apply(evt.getMessage().getKey())) - .content(valMasker.apply(evt.getMessage().getContent()))); - }; - } - private Predicate getMsgFilter(@Nullable String containsStrFilter, @Nullable String smartFilterId) { Predicate messageFilter = MessageFilters.noop(); @@ -356,4 +316,22 @@ private UnaryOperator throttleUiPublish(PollingModeDTO pollingMode) { return UnaryOperator.identity(); } + private int fixPageSize(@Nullable Integer pageSize) { + return Optional.ofNullable(pageSize) + .filter(ps -> ps > 0 && ps <= maxPageSize) + .orElse(defaultPageSize); + } + + public String registerMessageFilter(String groovyCode) { + String saltedCode = groovyCode + SALT_FOR_HASHING; + String filterId = Hashing.sha256() + .hashString(saltedCode, Charsets.UTF_8) + .toString() + .substring(0, 8); + if (registeredFilters.getIfPresent(filterId) == null) { + registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode)); + } + return filterId; + } + } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java index 654a6dd4bee..98094b5113b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java @@ -4,8 +4,12 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.provectus.kafka.ui.emitter.Cursor; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import java.util.Map; import java.util.Optional; +import java.util.function.Predicate; import org.apache.commons.lang3.RandomStringUtils; public class PollingCursorsStorage { @@ -16,6 +20,14 @@ public class PollingCursorsStorage { .maximumSize(MAX_SIZE) .build(); + + public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer, + ConsumerPosition originalPosition, + Predicate filter, + int limit) { + return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register); + } + public Optional getCursor(String id) { return Optional.ofNullable(cursorsCache.getIfPresent(id)); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java index 2523aae89ec..692c63109fa 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java @@ -1,6 +1,6 @@ package com.provectus.kafka.ui.service.analyze; -import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING; +import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST; import com.provectus.kafka.ui.emitter.EnhancedConsumer; import com.provectus.kafka.ui.emitter.SeekOperations; @@ -14,6 +14,7 @@ import java.time.Duration; import java.time.Instant; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import lombok.RequiredArgsConstructor; @@ -104,7 +105,8 @@ public void run() { consumer.partitionsFor(topicId.topicName) .forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats())); - var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null)); + var seekOperations = + SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null)); long summaryOffsetsRange = seekOperations.summaryOffsetsRange(); seekOperations.assignAndSeekNonEmptyPartitions(); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java index 06dffac83f4..88be63fe67b 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java @@ -117,56 +117,56 @@ private void emitMessages(AbstractEmitter emitter, int expectedCnt) { .verifyComplete(); } - private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position) { - return new BackwardRecordEmitter( + private BackwardEmitter createBackwardEmitter(ConsumerPosition position) { + return new BackwardEmitter( this::createConsumer, position, PAGE_SIZE, - new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + createRecordsDeserializer(), + m -> true, PollingSettings.createDefault(), createCursor(position) ); } - private BackwardRecordEmitter createBackwardEmitterWithCursor(Cursor cursor) { - return new BackwardRecordEmitter( + private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) { + return new BackwardEmitter( this::createConsumer, cursor.consumerPosition(), cursor.limit(), - new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + cursor.deserializer(), + cursor.filter(), PollingSettings.createDefault(), createCursor(cursor.consumerPosition()) ); } - private ForwardRecordEmitter createForwardEmitterWithCursor(Cursor cursor) { - return new ForwardRecordEmitter( + private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) { + return new ForwardEmitter( this::createConsumer, cursor.consumerPosition(), - new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE), + cursor.limit(), + cursor.deserializer(), + cursor.filter(), PollingSettings.createDefault(), createCursor(cursor.consumerPosition()) ); } - private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position) { - return new ForwardRecordEmitter( + private ForwardEmitter createForwardEmitter(ConsumerPosition position) { + return new ForwardEmitter( this::createConsumer, position, - new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE), + PAGE_SIZE, + createRecordsDeserializer(), + m -> true, PollingSettings.createDefault(), createCursor(position) ); } private Cursor.Tracking createCursor(ConsumerPosition position) { - return new Cursor.Tracking( - createRecordsDeserializer(), - position, - m -> true, - PAGE_SIZE, - cursorsStorage::register - ); + return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE); } private EnhancedConsumer createConsumer() { @@ -187,7 +187,8 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { s.deserializer(null, Serde.Target.VALUE), StringSerde.name(), s.deserializer(null, Serde.Target.KEY), - s.deserializer(null, Serde.Target.VALUE) + s.deserializer(null, Serde.Target.VALUE), + msg -> msg ); } diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java index 92d896a2967..9c26e78f2a9 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java @@ -9,19 +9,21 @@ import static org.assertj.core.api.Assertions.assertThat; import com.provectus.kafka.ui.AbstractIntegrationTest; -import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.BackwardEmitter; import com.provectus.kafka.ui.emitter.Cursor; -import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; -import com.provectus.kafka.ui.emitter.MessagesProcessing; +import com.provectus.kafka.ui.emitter.EnhancedConsumer; +import com.provectus.kafka.ui.emitter.ForwardEmitter; import com.provectus.kafka.ui.emitter.PollingSettings; +import com.provectus.kafka.ui.emitter.PollingThrottler; import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.ConsumerPosition.Offsets; +import com.provectus.kafka.ui.model.TopicMessageDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.producer.KafkaTestProducer; import com.provectus.kafka.ui.serde.api.Serde; import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer; import com.provectus.kafka.ui.serdes.PropertyResolverImpl; import com.provectus.kafka.ui.serdes.builtin.StringSerde; +import com.provectus.kafka.ui.util.ApplicationMetrics; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; @@ -32,17 +34,15 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Predicate; import java.util.stream.Collectors; import lombok.Value; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.internals.RecordHeader; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.utils.Bytes; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -62,6 +62,7 @@ class RecordEmitterTest extends AbstractIntegrationTest { static final List SENT_RECORDS = new ArrayList<>(); static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer(); static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class); + static final Predicate NOOP_FILTER = m -> true; @BeforeAll static void generateMsgs() throws Exception { @@ -98,6 +99,7 @@ static void generateMsgs() throws Exception { static void cleanup() { deleteTopic(TOPIC); deleteTopic(EMPTY_TOPIC); + SENT_RECORDS.clear(); } private static ConsumerRecordDeserializer createRecordsDeserializer() { @@ -110,29 +112,29 @@ private static ConsumerRecordDeserializer createRecordsDeserializer() { s.deserializer(null, Serde.Target.VALUE), StringSerde.name(), s.deserializer(null, Serde.Target.KEY), - s.deserializer(null, Serde.Target.VALUE) + s.deserializer(null, Serde.Target.VALUE), + msg -> msg ); } - private MessagesProcessing createMessagesProcessing() { - return new MessagesProcessing(RECORD_DESERIALIZER, msg -> true, null); - } - @Test void pollNothingOnEmptyTopic() { - var forwardEmitter = new ForwardRecordEmitter( + var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), - createMessagesProcessing(), + 100, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null), 100, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -152,19 +154,22 @@ void pollNothingOnEmptyTopic() { @Test void pollFullTopicFromBeginning() { - var forwardEmitter = new ForwardRecordEmitter( + var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null), - createMessagesProcessing(), + PARTITIONS * MSGS_PER_PARTITION, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(LATEST, TOPIC, List.of(), null, null), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -183,21 +188,24 @@ void pollWithOffsets() { targetOffsets.put(new TopicPartition(TOPIC, i), offset); } - var forwardEmitter = new ForwardRecordEmitter( + var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), - createMessagesProcessing(), + new ConsumerPosition.Offsets(null, targetOffsets)), + PARTITIONS * MSGS_PER_PARTITION, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), + new ConsumerPosition.Offsets(null, targetOffsets)), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -223,10 +231,12 @@ void pollWithTimestamps() { //choosing ts in the middle long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2); - var forwardEmitter = new ForwardRecordEmitter( + var forwardEmitter = new ForwardEmitter( this::createConsumer, new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), - createMessagesProcessing(), + PARTITIONS * MSGS_PER_PARTITION, + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -239,11 +249,12 @@ void pollWithTimestamps() { .collect(Collectors.toList()) ); - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null), PARTITIONS * MSGS_PER_PARTITION, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -265,12 +276,13 @@ void backwardEmitterSeekToEnd() { targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION); } - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null, - new Offsets(null, targetOffsets)), + new ConsumerPosition.Offsets(null, targetOffsets)), numMessages, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -293,11 +305,13 @@ void backwardEmitterSeekToBegin() { offsets.put(new TopicPartition(TOPIC, i), 0L); } - var backwardEmitter = new BackwardRecordEmitter( + var backwardEmitter = new BackwardEmitter( this::createConsumer, - new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)), + new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, + new ConsumerPosition.Offsets(null, offsets)), 100, - createMessagesProcessing(), + RECORD_DESERIALIZER, + NOOP_FILTER, PollingSettings.createDefault(), CURSOR_MOCK ); @@ -338,22 +352,20 @@ private void expectEmitter( assertionsConsumer.accept(step.expectComplete().verifyThenAssertThat()); } - private KafkaConsumer createConsumer() { + private EnhancedConsumer createConsumer() { return createConsumer(Map.of()); } - private KafkaConsumer createConsumer(Map properties) { + private EnhancedConsumer createConsumer(Map properties) { final Map map = Map.of( ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(), ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(), - ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class, - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19 // to check multiple polls ); Properties props = new Properties(); props.putAll(map); props.putAll(properties); - return new KafkaConsumer<>(props); + return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop()); } @Value From c6b3342d8f7656c60fc8e0c673b6ede6ce330e03 Mon Sep 17 00:00:00 2001 From: gokhanimral Date: Fri, 23 Feb 2024 15:02:52 +0400 Subject: [PATCH 17/29] Change the docker iamge repo name --- kafka-ui-api/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml index 1e0f952eb16..b7246f86f6e 100644 --- a/kafka-ui-api/pom.xml +++ b/kafka-ui-api/pom.xml @@ -485,7 +485,7 @@ true - provectuslabs/kafka-ui:${git.revision} + gimral/kafka-ui:${git.revision} ${project.basedir} From d2f8d652c173242603b59ff83d507364d8da4e44 Mon Sep 17 00:00:00 2001 From: gokhanimral Date: Sun, 25 Feb 2024 01:58:14 +0400 Subject: [PATCH 18/29] Case insensitive basic string filters --- .../java/com/provectus/kafka/ui/emitter/MessageFilters.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java index f109289fe49..45dbf27e2b3 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java @@ -26,8 +26,8 @@ public static Predicate noop() { } public static Predicate containsStringFilter(String string) { - return msg -> StringUtils.contains(msg.getKey(), string) - || StringUtils.contains(msg.getContent(), string); + return msg -> StringUtils.containsIgnoreCase(msg.getKey(), string) + || StringUtils.containsIgnoreCase(msg.getContent(), string); } public static Predicate groovyScriptFilter(String script) { From 757c8f468f6d11ed33daa1e7f15e16098caa0b1e Mon Sep 17 00:00:00 2001 From: gokhanimral Date: Sun, 25 Feb 2024 14:04:01 +0400 Subject: [PATCH 19/29] Enable MessageV2 API to fix sorting and paging issues --- .../src/main/resources/application-local.yml | 158 ++++++------- .../src/main/resources/application.yml | 118 ++++++++-- .../kafka/ui/emitter/MessageFiltersTest.java | 2 +- .../main/resources/swagger/kafka-ui-api.yaml | 52 +++++ kafka-ui-react-app/package.json | 4 +- kafka-ui-react-app/pnpm-lock.yaml | 34 +-- .../Topics/Topic/Messages/Filters/Filters.tsx | 215 ++++++++++-------- .../Messages/Filters/FiltersContainer.ts | 12 + .../Topics/Topic/Messages/Messages.tsx | 52 +++-- .../Topics/Topic/Messages/MessagesTable.tsx | 26 +-- .../src/components/common/Search/Search.tsx | 5 +- .../contexts/TopicMessagesContext.ts | 2 + .../src/lib/hooks/api/topics.ts | 28 ++- .../src/redux/interfaces/topic.ts | 5 + .../redux/reducers/topicMessages/selectors.ts | 16 ++ .../topicMessages/topicMessagesSlice.ts | 41 +++- 16 files changed, 526 insertions(+), 244 deletions(-) diff --git a/kafka-ui-api/src/main/resources/application-local.yml b/kafka-ui-api/src/main/resources/application-local.yml index 7848f1fdc49..fea2029ecdb 100644 --- a/kafka-ui-api/src/main/resources/application-local.yml +++ b/kafka-ui-api/src/main/resources/application-local.yml @@ -10,22 +10,22 @@ logging: #server: # port: 8080 #- Port in which kafka-ui will run. -spring: - jmx: - enabled: true - ldap: - urls: ldap://localhost:10389 - base: "cn={0},ou=people,dc=planetexpress,dc=com" - admin-user: "cn=admin,dc=planetexpress,dc=com" - admin-password: "GoodNewsEveryone" - user-filter-search-base: "dc=planetexpress,dc=com" - user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))" - group-filter-search-base: "ou=people,dc=planetexpress,dc=com" +#spring: +# jmx: +# enabled: true +# ldap: +# urls: ldap://localhost:10389 +# base: "cn={0},ou=people,dc=planetexpress,dc=com" +# admin-user: "cn=admin,dc=planetexpress,dc=com" +# admin-password: "GoodNewsEveryone" +# user-filter-search-base: "dc=planetexpress,dc=com" +# user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))" +# group-filter-search-base: "ou=people,dc=planetexpress,dc=com" kafka: clusters: - name: local - bootstrapServers: localhost:9092 + bootstrapServers: localhost:9096 schemaRegistry: http://localhost:8085 ksqldbServer: http://localhost:8088 kafkaConnect: @@ -80,70 +80,70 @@ auth: custom-params: type: github -rbac: - roles: - - name: "memelords" - clusters: - - local - subjects: - - provider: oauth_google - type: domain - value: "provectus.com" - - provider: oauth_google - type: user - value: "name@provectus.com" - - - provider: oauth_github - type: organization - value: "provectus" - - provider: oauth_github - type: user - value: "memelord" - - - provider: oauth_cognito - type: user - value: "username" - - provider: oauth_cognito - type: group - value: "memelords" - - - provider: ldap - type: group - value: "admin_staff" - - # NOT IMPLEMENTED YET - # - provider: ldap_ad - # type: group - # value: "admin_staff" - - permissions: - - resource: applicationconfig - actions: all - - - resource: clusterconfig - actions: all - - - resource: topic - value: ".*" - actions: all - - - resource: consumer - value: ".*" - actions: all - - - resource: schema - value: ".*" - actions: all - - - resource: connect - value: "*" - actions: all - - - resource: ksql - actions: all - - - resource: acl - actions: all - - - resource: audit - actions: all +#rbac: +# roles: +# - name: "memelords" +# clusters: +# - local +# subjects: +# - provider: oauth_google +# type: domain +# value: "provectus.com" +# - provider: oauth_google +# type: user +# value: "name@provectus.com" +# +# - provider: oauth_github +# type: organization +# value: "provectus" +# - provider: oauth_github +# type: user +# value: "memelord" +# +# - provider: oauth_cognito +# type: user +# value: "username" +# - provider: oauth_cognito +# type: group +# value: "memelords" +# +# - provider: ldap +# type: group +# value: "admin_staff" +# +# # NOT IMPLEMENTED YET +# # - provider: ldap_ad +# # type: group +# # value: "admin_staff" +# +# permissions: +# - resource: applicationconfig +# actions: all +# +# - resource: clusterconfig +# actions: all +# +# - resource: topic +# value: ".*" +# actions: all +# +# - resource: consumer +# value: ".*" +# actions: all +# +# - resource: schema +# value: ".*" +# actions: all +# +# - resource: connect +# value: "*" +# actions: all +# +# - resource: ksql +# actions: all +# +# - resource: acl +# actions: all +# +# - resource: audit +# actions: all diff --git a/kafka-ui-api/src/main/resources/application.yml b/kafka-ui-api/src/main/resources/application.yml index e8799206132..cf23d9ebf9a 100644 --- a/kafka-ui-api/src/main/resources/application.yml +++ b/kafka-ui-api/src/main/resources/application.yml @@ -1,21 +1,109 @@ -auth: - type: DISABLED - -management: - endpoint: - info: - enabled: true - health: - enabled: true - endpoints: - web: - exposure: - include: "info,health,prometheus" - logging: level: root: INFO com.provectus: DEBUG + #org.springframework.http.codec.json.Jackson2JsonEncoder: DEBUG + #org.springframework.http.codec.json.Jackson2JsonDecoder: DEBUG reactor.netty.http.server.AccessLog: INFO - org.hibernate.validator: WARN + org.springframework.security: DEBUG + +#server: +# port: 8080 #- Port in which kafka-ui will run. + +#spring: +# jmx: +# enabled: true +# ldap: +# urls: ldap://localhost:10389 +# base: "cn={0},ou=people,dc=planetexpress,dc=com" +# admin-user: "cn=admin,dc=planetexpress,dc=com" +# admin-password: "GoodNewsEveryone" +# user-filter-search-base: "dc=planetexpress,dc=com" +# user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))" +# group-filter-search-base: "ou=people,dc=planetexpress,dc=com" + +kafka: + clusters: + - name: local + bootstrapServers: localhost:9096 +# schemaRegistry: http://localhost:8085 +# ksqldbServer: http://localhost:8088 +# kafkaConnect: +# - name: first +# address: http://localhost:8083 +# metrics: +# port: 9997 +# type: JMX + +auth: + type: DISABLED + +dynamic.config.enabled: true +#rbac: +# roles: +# - name: "memelords" +# clusters: +# - local +# subjects: +# - provider: oauth_google +# type: domain +# value: "provectus.com" +# - provider: oauth_google +# type: user +# value: "name@provectus.com" +# +# - provider: oauth_github +# type: organization +# value: "provectus" +# - provider: oauth_github +# type: user +# value: "memelord" +# +# - provider: oauth_cognito +# type: user +# value: "username" +# - provider: oauth_cognito +# type: group +# value: "memelords" +# +# - provider: ldap +# type: group +# value: "admin_staff" +# +# # NOT IMPLEMENTED YET +# # - provider: ldap_ad +# # type: group +# # value: "admin_staff" +# +# permissions: +# - resource: applicationconfig +# actions: all +# +# - resource: clusterconfig +# actions: all +# +# - resource: topic +# value: ".*" +# actions: all +# +# - resource: consumer +# value: ".*" +# actions: all +# +# - resource: schema +# value: ".*" +# actions: all +# +# - resource: connect +# value: "*" +# actions: all +# +# - resource: ksql +# actions: all +# +# - resource: acl +# actions: all +# +# - resource: audit +# actions: all diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java index 4e9f5034cd2..73264c5d8b4 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java @@ -51,7 +51,7 @@ void returnsFalseOtherwise() { filter.test(msg().key(null).content(null)) ); - assertFalse( + assertTrue( filter.test(msg().key("aBc").content("AbC")) ); } diff --git a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml index dcf26d58e68..0084aafc9c6 100644 --- a/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml +++ b/kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml @@ -857,6 +857,33 @@ paths: items: $ref: '#/components/schemas/TopicMessageEvent' + /api/clusters/{clusterName}/topics/{topicName}/activeproducers: + get: + tags: + - Topics + summary: get producer states for topic + operationId: getActiveProducerStates + parameters: + - name: clusterName + in: path + required: true + schema: + type: string + - name: topicName + in: path + required: true + schema: + type: string + responses: + 200: + description: OK + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/TopicProducerState' + /api/clusters/{clusterName}/topics/{topicName}/consumer-groups: get: tags: @@ -2499,6 +2526,31 @@ components: - PROTOBUF - UNKNOWN + TopicProducerState: + type: object + properties: + partition: + type: integer + format: int32 + producerId: + type: integer + format: int64 + producerEpoch: + type: integer + format: int32 + lastSequence: + type: integer + format: int32 + lastTimestampMs: + type: integer + format: int64 + coordinatorEpoch: + type: integer + format: int32 + currentTransactionStartOffset: + type: integer + format: int64 + ConsumerGroup: type: object properties: diff --git a/kafka-ui-react-app/package.json b/kafka-ui-react-app/package.json index 172ec4466ab..491e3abacb5 100644 --- a/kafka-ui-react-app/package.json +++ b/kafka-ui-react-app/package.json @@ -21,7 +21,7 @@ "fetch-mock": "^9.11.0", "jest": "^29.4.3", "jest-watch-typeahead": "^2.2.2", - "json-schema-faker": "^0.5.0-rcv.44", + "json-schema-faker": "^0.5.6", "jsonpath-plus": "^7.2.0", "lodash": "^4.17.21", "lossless-json": "^2.0.8", @@ -109,4 +109,4 @@ "node": "v18.17.1", "pnpm": "^8.6.12" } -} +} \ No newline at end of file diff --git a/kafka-ui-react-app/pnpm-lock.yaml b/kafka-ui-react-app/pnpm-lock.yaml index 01862dd3bbe..eb396184bf9 100644 --- a/kafka-ui-react-app/pnpm-lock.yaml +++ b/kafka-ui-react-app/pnpm-lock.yaml @@ -57,8 +57,8 @@ dependencies: specifier: ^2.2.2 version: 2.2.2(jest@29.6.4) json-schema-faker: - specifier: ^0.5.0-rcv.44 - version: 0.5.3 + specifier: ^0.5.6 + version: 0.5.6 jsonpath-plus: specifier: ^7.2.0 version: 7.2.0 @@ -91,7 +91,7 @@ dependencies: version: 7.43.1(react@18.2.0) react-hot-toast: specifier: ^2.4.0 - version: 2.4.1(csstype@3.1.2)(react-dom@18.1.0)(react@18.2.0) + version: 2.4.1(csstype@3.1.3)(react-dom@18.1.0)(react@18.2.0) react-is: specifier: ^18.2.0 version: 18.2.0 @@ -2606,7 +2606,7 @@ packages: normalize-path: 3.0.0 readdirp: 3.6.0 optionalDependencies: - fsevents: 2.3.2 + fsevents: 2.3.3 /ci-info@3.3.1: resolution: {integrity: sha512-SXgeMX9VwDe7iFFaEWkA5AstuER9YKqy4EhHqr4DVqkwmD9rpVimkMKWHdjn30Ja45txyjhSn63lVX69eVCckg==} @@ -2808,6 +2808,10 @@ packages: /csstype@3.1.2: resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} + /csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + dev: false + /damerau-levenshtein@1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} dev: true @@ -3741,8 +3745,8 @@ packages: /fs.realpath@1.0.0: resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} - /fsevents@2.3.2: - resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + /fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] requiresBuild: true @@ -3903,12 +3907,12 @@ packages: /globrex@0.1.2: resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} - /goober@2.1.10(csstype@3.1.2): + /goober@2.1.10(csstype@3.1.3): resolution: {integrity: sha512-7PpuQMH10jaTWm33sQgBQvz45pHR8N4l3Cu3WMGEWmHShAcTuuP7I+5/DwKo39fwti5A80WAjvqgz6SSlgWmGA==} peerDependencies: csstype: ^3.0.10 dependencies: - csstype: 3.1.2 + csstype: 3.1.3 dev: false /gopd@1.0.1: @@ -4544,7 +4548,7 @@ packages: micromatch: 4.0.5 walker: 1.0.8 optionalDependencies: - fsevents: 2.3.2 + fsevents: 2.3.3 dev: false /jest-leak-detector@29.6.3: @@ -4903,8 +4907,8 @@ packages: /json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} - /json-schema-faker@0.5.3: - resolution: {integrity: sha512-BeIrR0+YSrTbAR9dOMnjbFl1MvHyXnq+Wpdw1FpWZDHWKLzK229hZ5huyPcmzFUfVq1ODwf40WdGVoE266UBUg==} + /json-schema-faker@0.5.6: + resolution: {integrity: sha512-u/cFC26/GDxh2vPiAC8B8xVvpXAW+QYtG2mijEbKrimCk8IHtiwQBjCE8TwvowdhALWq9IcdIWZ+/8ocXvdL3Q==} hasBin: true dependencies: json-schema-ref-parser: 6.1.0 @@ -5711,14 +5715,14 @@ packages: react: 18.2.0 dev: false - /react-hot-toast@2.4.1(csstype@3.1.2)(react-dom@18.1.0)(react@18.2.0): + /react-hot-toast@2.4.1(csstype@3.1.3)(react-dom@18.1.0)(react@18.2.0): resolution: {integrity: sha512-j8z+cQbWIM5LY37pR6uZR6D4LfseplqnuAO4co4u8917hBUvXlEqyP1ZzqVLcqoyUesZZv/ImreoCeHVDpE5pQ==} engines: {node: '>=10'} peerDependencies: react: '>=16' react-dom: '>=16' dependencies: - goober: 2.1.10(csstype@3.1.2) + goober: 2.1.10(csstype@3.1.3) react: 18.2.0 react-dom: 18.1.0(react@18.2.0) transitivePeerDependencies: @@ -6022,7 +6026,7 @@ packages: engines: {node: '>=14.18.0', npm: '>=8.0.0'} hasBin: true optionalDependencies: - fsevents: 2.3.2 + fsevents: 2.3.3 /run-async@2.4.1: resolution: {integrity: sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==} @@ -6755,7 +6759,7 @@ packages: rollup: 3.7.3 sass: 1.66.1 optionalDependencies: - fsevents: 2.3.2 + fsevents: 2.3.3 /w3c-hr-time@1.0.2: resolution: {integrity: sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==} diff --git a/kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx b/kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx index 347623d2226..8f1399b8cb8 100644 --- a/kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx +++ b/kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx @@ -3,6 +3,7 @@ import 'react-datepicker/dist/react-datepicker.css'; import { MessageFilterType, Partition, + PollingMode, SeekDirection, SeekType, SerdeUsage, @@ -10,6 +11,7 @@ import { TopicMessageConsuming, TopicMessageEvent, TopicMessageEventTypeEnum, + TopicMessageNextPageCursor, } from 'generated-sources'; import React, { useContext } from 'react'; import omitBy from 'lodash/omitBy'; @@ -35,7 +37,7 @@ import CloseIcon from 'components/common/Icons/CloseIcon'; import ClockIcon from 'components/common/Icons/ClockIcon'; import ArrowDownIcon from 'components/common/Icons/ArrowDownIcon'; import FileIcon from 'components/common/Icons/FileIcon'; -import { useTopicDetails } from 'lib/hooks/api/topics'; +import { useRegisterFilter, useTopicDetails } from 'lib/hooks/api/topics'; import { InputLabel } from 'components/common/Input/InputLabel.styled'; import { getSerdeOptions } from 'components/Topics/Topic/SendMessage/utils'; import { useSerdes } from 'lib/hooks/api/topicMessages'; @@ -47,6 +49,9 @@ import { getSelectedPartitionsFromSeekToParam, getTimestampFromSeekToParam, } from './utils'; +import { getTopicMessgesLastLoadedPage } from 'redux/reducers/topicMessages/selectors'; +import { useAppSelector } from 'lib/hooks/redux'; +import { getDefaultSerdeName } from '../getDefaultSerdeName'; type Query = Record; @@ -55,12 +60,18 @@ export interface FiltersProps { meta: TopicMessageConsuming; isFetching: boolean; messageEventType?: string; + cursor?: TopicMessageNextPageCursor; + currentPage: number; addMessage(content: { message: TopicMessage; prepend: boolean }): void; resetMessages(): void; updatePhase(phase: string): void; updateMeta(meta: TopicMessageConsuming): void; setIsFetching(status: boolean): void; setMessageType(messageType: string): void; + updateCursor(cursor?: TopicMessageNextPageCursor): void; + setCurrentPage(page: number): void; + setLastLoadedPage(page: number): void; + resetAllMessages(): void; } export interface MessageFilters { @@ -85,6 +96,7 @@ const Filters: React.FC = ({ phaseMessage, meta: { elapsedMs, bytesConsumed, messagesConsumed, filterApplyErrors }, isFetching, + currentPage, addMessage, resetMessages, updatePhase, @@ -92,19 +104,25 @@ const Filters: React.FC = ({ setIsFetching, setMessageType, messageEventType, + updateCursor, + setCurrentPage, + setLastLoadedPage, + resetAllMessages, }) => { const { clusterName, topicName } = useAppParams(); const location = useLocation(); const navigate = useNavigate(); const [searchParams] = useSearchParams(); - const page = searchParams.get('page'); - const { data: topic } = useTopicDetails({ clusterName, topicName }); + const registerFilter = useRegisterFilter({ clusterName, topicName }); + + const lastLoadedPage = useAppSelector(getTopicMessgesLastLoadedPage); + const partitions = topic?.partitions || []; - const { seekDirection, isLive, changeSeekDirection } = + const { seekDirection, isLive, changeSeekDirection, page, setPage } = useContext(TopicMessagesContext); const { value: isOpen, toggle } = useBoolean(); @@ -131,11 +149,18 @@ const Filters: React.FC = ({ const [timestamp, setTimestamp] = React.useState( getTimestampFromSeekToParam(searchParams) ); + + const { data: serdes = {} } = useSerdes({ + clusterName, + topicName, + use: SerdeUsage.DESERIALIZE, + }); + const [keySerde, setKeySerde] = React.useState( - searchParams.get('keySerde') || '' + searchParams.get('keySerde') || getDefaultSerdeName(serdes.key || []) ); const [valueSerde, setValueSerde] = React.useState( - searchParams.get('valueSerde') || '' + searchParams.get('valueSerde') || getDefaultSerdeName(serdes.value || []) ); const [savedFilters, setSavedFilters] = React.useState( @@ -155,7 +180,7 @@ const Filters: React.FC = ({ ? MessageFilterType.GROOVY_SCRIPT : MessageFilterType.STRING_CONTAINS ); - const [query, setQuery] = React.useState(searchParams.get('q') || ''); + const [stringFilter, setStringFilter] = React.useState(''); const [isTailing, setIsTailing] = React.useState(isLive); const isSeekTypeControlVisible = React.useMemo( @@ -173,23 +198,12 @@ const Filters: React.FC = ({ return false; }, [isSeekTypeControlVisible, currentSeekType, timestamp, isTailing]); - const partitionMap = React.useMemo( - () => - partitions.reduce>( - (acc, partition) => ({ - ...acc, - [partition.partition]: partition, - }), - {} - ), - [partitions] - ); - const handleClearAllFilters = () => { setCurrentSeekType(SeekType.OFFSET); setOffset(''); setTimestamp(null); - setQuery(''); + setStringFilter(''); + setPage(1); changeSeekDirection(SeekDirection.FORWARD); getSelectedPartitionsFromSeekToParam(searchParams, partitions); setSelectedPartitions( @@ -202,65 +216,60 @@ const Filters: React.FC = ({ ); }; - const handleFiltersSubmit = (currentOffset: string) => { - const nextAttempt = Number(searchParams.get('attempt') || 0) + 1; + const getPollingMode = (seekDirection: SeekDirection, seekType: SeekType): PollingMode => { + if (seekDirection == SeekDirection.FORWARD) { + if (offset && currentSeekType === SeekType.OFFSET) + return PollingMode.FROM_OFFSET; + if (timestamp && currentSeekType === SeekType.TIMESTAMP) + return PollingMode.FROM_TIMESTAMP; + return PollingMode.EARLIEST; + } + if (seekDirection == SeekDirection.BACKWARD) { + if (offset && currentSeekType === SeekType.OFFSET) + return PollingMode.TO_OFFSET; + if (timestamp && currentSeekType === SeekType.TIMESTAMP) + return PollingMode.TO_TIMESTAMP; + return PollingMode.LATEST; + } + if (seekDirection == SeekDirection.TAILING) + return PollingMode.TAILING; + return PollingMode.LATEST; + } + + const getSmartFilterId = async (code: string) => { + try { + const filterId = await registerFilter.mutateAsync({ + filterCode: code + }); + return filterId; + } catch (e) { + // do nothing + } + } + + const handleFiltersSubmit = async (cursor?: TopicMessageNextPageCursor) => { + + if (!keySerde || !valueSerde) + return; const props: Query = { - q: - queryType === MessageFilterType.GROOVY_SCRIPT - ? activeFilter.code - : query, - filterQueryType: queryType, - attempt: nextAttempt, + mode: getPollingMode(seekDirection, currentSeekType), limit: PER_PAGE, - page: page || 0, - seekDirection, + stringFilter: stringFilter, + offset: offset, + timestamp: timestamp?.getTime() || 0, keySerde: keySerde || searchParams.get('keySerde') || '', valueSerde: valueSerde || searchParams.get('valueSerde') || '', }; - if (isSeekTypeControlVisible) { - switch (seekDirection) { - case SeekDirection.FORWARD: - props.seekType = SeekType.BEGINNING; - break; - case SeekDirection.BACKWARD: - case SeekDirection.TAILING: - props.seekType = SeekType.LATEST; - break; - default: - props.seekType = currentSeekType; - } + if (cursor?.id) + props.cursor = cursor?.id; - if (offset && currentSeekType === SeekType.OFFSET) { - props.seekType = SeekType.OFFSET; - } - - if (timestamp && currentSeekType === SeekType.TIMESTAMP) { - props.seekType = SeekType.TIMESTAMP; - } + if (selectedPartitions.length !== partitions.length) { + props.partitions = selectedPartitions.map((p) => p.value); + } - const isSeekTypeWithSeekTo = - props.seekType === SeekType.TIMESTAMP || - props.seekType === SeekType.OFFSET; - - if ( - selectedPartitions.length !== partitions.length || - isSeekTypeWithSeekTo - ) { - // not everything in the partition is selected - props.seekTo = selectedPartitions.map(({ value }) => { - const offsetProperty = - seekDirection === SeekDirection.FORWARD ? 'offsetMin' : 'offsetMax'; - const offsetBasedSeekTo = - currentOffset || partitionMap[value][offsetProperty]; - const seekToOffset = - currentSeekType === SeekType.OFFSET - ? offsetBasedSeekTo - : timestamp?.getTime(); - - return `${value}::${seekToOffset || '0'}`; - }); - } + if (queryType === MessageFilterType.GROOVY_SCRIPT) { + props.smartFilterId = (await getSmartFilterId(activeFilter.code))?.id || ''; } const newProps = omitBy(props, (v) => v === undefined || v === ''); @@ -272,6 +281,12 @@ const Filters: React.FC = ({ }); }; + const handleSubmit = async () => { + setPage(1); + resetAllMessages(); + handleFiltersSubmit(); + } + const handleSSECancel = () => { if (!source.current) return; setIsFetching(false); @@ -345,9 +360,16 @@ const Filters: React.FC = ({ // eslint-disable-next-line consistent-return React.useEffect(() => { if (location.search?.length !== 0) { + if (page === currentPage) + return () => { }; + if (page <= lastLoadedPage) { + setCurrentPage(page); + return () => { }; + } + const url = `${BASE_PARAMS.basePath}/api/clusters/${encodeURIComponent( clusterName - )}/topics/${topicName}/messages${location.search}`; + )}/topics/${topicName}/messages/v2${location.search}`; const sse = new EventSource(url); source.current = sse; @@ -358,7 +380,7 @@ const Filters: React.FC = ({ setIsFetching(true); }; sse.onmessage = ({ data }) => { - const { type, message, phase, consuming }: TopicMessageEvent = + const { type, message, phase, consuming, cursor }: TopicMessageEvent = JSON.parse(data); switch (type) { case TopicMessageEventTypeEnum.MESSAGE: @@ -381,13 +403,17 @@ const Filters: React.FC = ({ if (consuming && type) { setMessageType(type); updateMeta(consuming); + updateCursor(cursor); + setCurrentPage(page); + setLastLoadedPage(page); + handleFiltersSubmit(cursor); } break; default: } }; - sse.onerror = () => { + sse.onerror = (e) => { setIsFetching(false); sse.close(); }; @@ -407,10 +433,15 @@ const Filters: React.FC = ({ setIsFetching, updateMeta, updatePhase, + updateCursor, + setLastLoadedPage ]); + React.useEffect(() => { if (location.search?.length === 0) { - handleFiltersSubmit(offset); + setPage(1); + resetAllMessages(); + handleFiltersSubmit(); } }, [ seekDirection, @@ -418,32 +449,38 @@ const Filters: React.FC = ({ activeFilter, currentSeekType, timestamp, - query, + stringFilter, location, ]); + React.useEffect(() => { - handleFiltersSubmit(offset); + setPage(1); + resetAllMessages(); + handleFiltersSubmit(); }, [ seekDirection, queryType, - activeFilter, currentSeekType, - timestamp, - query, seekDirection, - page, + keySerde, + valueSerde + ]); + + React.useEffect(() => { + setPage(1); + resetAllMessages(); + }, [ + selectedPartitions, + offset, + timestamp, + stringFilter, + activeFilter, ]); React.useEffect(() => { setIsTailing(isLive); }, [isLive]); - const { data: serdes = {} } = useSerdes({ - clusterName, - topicName, - use: SerdeUsage.DESERIALIZE, - }); - return (
@@ -531,7 +568,7 @@ const Filters: React.FC = ({ buttonSize="M" disabled={isSubmitDisabled} onClick={() => - isFetching ? handleSSECancel() : handleFiltersSubmit(offset) + isFetching ? handleSSECancel() : handleSubmit() } style={{ fontWeight: 500 }} > @@ -548,7 +585,7 @@ const Filters: React.FC = ({ />
- + + + - onChange && onChange(val)} + /> + ); + if (typeof DatePicker === typeof inputType) + return ( + onChange && onChange(date as Date)} + /> + ); + return null; +}; + +export default SelectSubForm; diff --git a/kafka-ui-react-app/src/components/common/table/TableHeaderCell/TableHeaderCell.tsx b/kafka-ui-react-app/src/components/common/table/TableHeaderCell/TableHeaderCell.tsx index 380c8e3c791..23e27660447 100644 --- a/kafka-ui-react-app/src/components/common/table/TableHeaderCell/TableHeaderCell.tsx +++ b/kafka-ui-react-app/src/components/common/table/TableHeaderCell/TableHeaderCell.tsx @@ -67,7 +67,6 @@ const TableHeaderCell: React.FC> = ( )} {children} - ); }; diff --git a/kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts b/kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts index 9c38e4a0f55..568f2f84da6 100644 --- a/kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts +++ b/kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts @@ -7,10 +7,10 @@ export interface ContextProps { page: number; setPage(page: number): void; isLive: boolean; - keySerde:string; - setKeySerde(val:string):void; - valueSerde:string; - setValueSerde(val:string):void; + keySerde: string; + setKeySerde(val: string): void; + valueSerde: string; + setValueSerde(val: string): void; serdes: TopicSerdeSuggestion; } diff --git a/kafka-ui-react-app/src/lib/constants.ts b/kafka-ui-react-app/src/lib/constants.ts index 7ed3df1f176..78a8d863724 100644 --- a/kafka-ui-react-app/src/lib/constants.ts +++ b/kafka-ui-react-app/src/lib/constants.ts @@ -1,5 +1,9 @@ import { SelectOption } from 'components/common/Select/Select'; -import { ConfigurationParameters, ConsumerGroupState, PollingMode } from 'generated-sources'; +import { + ConfigurationParameters, + ConsumerGroupState, + PollingMode, +} from 'generated-sources'; declare global { interface Window { @@ -108,8 +112,7 @@ export const CONSUMER_GROUP_STATE_TOOLTIPS: Record = UNKNOWN: '', } as const; - - export const PollingModeOptionsObj = { +export const PollingModeOptionsObj = { [PollingMode.LATEST]: { value: PollingMode.LATEST, label: 'Newest', @@ -120,17 +123,17 @@ export const CONSUMER_GROUP_STATE_TOOLTIPS: Record = label: 'Oldest', isLive: false, }, - [PollingMode.TAILING]: { + [PollingMode.TAILING]: { value: PollingMode.TAILING, label: 'Live Mode', isLive: true, }, - [PollingMode.FROM_OFFSET]: { + [PollingMode.FROM_OFFSET]: { value: PollingMode.FROM_OFFSET, label: 'From Offset', isLive: false, }, - [PollingMode.TO_OFFSET]: { + [PollingMode.TO_OFFSET]: { value: PollingMode.TO_OFFSET, label: 'To Offset', isLive: false, @@ -140,11 +143,11 @@ export const CONSUMER_GROUP_STATE_TOOLTIPS: Record = label: 'From Time', isLive: false, }, - [PollingMode.TO_TIMESTAMP]: { + [PollingMode.TO_TIMESTAMP]: { value: PollingMode.TO_TIMESTAMP, label: 'To Time', isLive: false, }, }; -export const PollingModeOptions = Object.values(PollingModeOptionsObj) +export const PollingModeOptions = Object.values(PollingModeOptionsObj); diff --git a/kafka-ui-react-app/src/lib/fixtures/filter.ts b/kafka-ui-react-app/src/lib/fixtures/filter.ts index 9c7bff0db97..1facaa0b914 100644 --- a/kafka-ui-react-app/src/lib/fixtures/filter.ts +++ b/kafka-ui-react-app/src/lib/fixtures/filter.ts @@ -1,3 +1,3 @@ -import { MessageFilterId } from "generated-sources"; +import { MessageFilterId } from 'generated-sources'; -export const filterRegistrationPayload : MessageFilterId = { id: 'xrysu'} \ No newline at end of file +export const filterRegistrationPayload: MessageFilterId = { id: 'xrysu' }; diff --git a/kafka-ui-react-app/src/theme/theme.ts b/kafka-ui-react-app/src/theme/theme.ts index 79a4cf3376d..6597732282d 100644 --- a/kafka-ui-react-app/src/theme/theme.ts +++ b/kafka-ui-react-app/src/theme/theme.ts @@ -751,6 +751,26 @@ export const theme = { }, }, }, + datePicker: { + color: { + normal: { + background: Colors.neutral[0], + text: Colors.neutral[90], + }, + active: { + background: Colors.brand[50], + text: Colors.neutral[0], + }, + gray: Colors.neutral[30], + }, + borderColor: { + normal: Colors.neutral[30], + active: Colors.neutral[70], + }, + navigationIcon: { + color: Colors.brand[50], + }, + }, }; export type ThemeType = typeof theme; @@ -1264,4 +1284,24 @@ export const darkTheme: ThemeType = { }, }, }, + datePicker: { + color: { + normal: { + background: Colors.neutral[85], + text: Colors.neutral[0], + }, + active: { + background: Colors.brand[30], + text: Colors.neutral[0], + }, + gray: Colors.neutral[30], + }, + borderColor: { + normal: Colors.neutral[30], + active: Colors.neutral[70], + }, + navigationIcon: { + color: Colors.brand[50], + }, + }, }; From dc56d668bfa234924811db31a400b0faa72195e6 Mon Sep 17 00:00:00 2001 From: gokhanimral Date: Sat, 2 Mar 2024 16:48:32 +0400 Subject: [PATCH 29/29] Disable Sonarr and AWS connections --- .github/workflows/e2e-automation.yml | 42 ++++++++++++++-------------- .github/workflows/frontend.yaml | 16 +++++------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/workflows/e2e-automation.yml b/.github/workflows/e2e-automation.yml index b3bb2f266fc..5ad9c450214 100644 --- a/.github/workflows/e2e-automation.yml +++ b/.github/workflows/e2e-automation.yml @@ -23,12 +23,12 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ github.sha }} - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v3 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: eu-central-1 + # - name: Configure AWS credentials + # uses: aws-actions/configure-aws-credentials@v3 + # with: + # aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + # aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-central-1 - name: Set up environment id: set_env_values run: | @@ -68,21 +68,21 @@ jobs: allure_report: allure-report subfolder: allure-results report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com" - - uses: jakejarvis/s3-sync-action@master - if: always() - env: - AWS_S3_BUCKET: 'kafkaui-allure-reports' - AWS_REGION: 'eu-central-1' - SOURCE_DIR: 'allure-history/allure-results' - - name: Deploy report to Amazon S3 - if: always() - uses: Sibz/github-status-action@v1.1.6 - with: - authToken: ${{secrets.GITHUB_TOKEN}} - context: "Click Details button to open Allure report" - state: "success" - sha: ${{ github.sha }} - target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }} + # - uses: jakejarvis/s3-sync-action@master + # if: always() + # env: + # AWS_S3_BUCKET: 'kafkaui-allure-reports' + # AWS_REGION: 'eu-central-1' + # SOURCE_DIR: 'allure-history/allure-results' + # - name: Deploy report to Amazon S3 + # if: always() + # uses: Sibz/github-status-action@v1.1.6 + # with: + # authToken: ${{secrets.GITHUB_TOKEN}} + # context: "Click Details button to open Allure report" + # state: "success" + # sha: ${{ github.sha }} + # target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }} - name: Dump Docker logs on failure if: failure() uses: jwalton/gh-docker-logs@v2.2.1 diff --git a/.github/workflows/frontend.yaml b/.github/workflows/frontend.yaml index 9d7300448c9..daae8d83e8c 100644 --- a/.github/workflows/frontend.yaml +++ b/.github/workflows/frontend.yaml @@ -48,11 +48,11 @@ jobs: run: | cd kafka-ui-react-app/ pnpm test:CI - - name: SonarCloud Scan - uses: sonarsource/sonarcloud-github-action@master - with: - projectBaseDir: ./kafka-ui-react-app - args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_FRONTEND }} + # - name: SonarCloud Scan + # uses: sonarsource/sonarcloud-github-action@master + # with: + # projectBaseDir: ./kafka-ui-react-app + # args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }} + # env: + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_FRONTEND }}