-
Couldn't load subscription status.
- Fork 14.7k
MINOR: Initialize fetchPartitionStatus as a Map type to reduce collection conversions #20768
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: trunk
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1628,7 +1628,7 @@ class ReplicaManager(val config: KafkaConfig, | |
| params: FetchParams, | ||
| responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit, | ||
| logReadResults: util.LinkedHashMap[TopicIdPartition, LogReadResult], | ||
| fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)]): Unit = { | ||
| fetchPartitionStatus: util.LinkedHashMap[TopicIdPartition, FetchPartitionStatus]): Unit = { | ||
| val remoteFetchTasks = new util.HashMap[TopicIdPartition, Future[Void]] | ||
| val remoteFetchResults = new util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]] | ||
|
|
||
|
|
@@ -1643,7 +1643,7 @@ class ReplicaManager(val config: KafkaConfig, | |
| remoteFetchResults, | ||
| remoteFetchInfos, | ||
| remoteFetchMaxWaitMs, | ||
| fetchPartitionStatus.toMap.asJava, | ||
| fetchPartitionStatus, | ||
| params, | ||
| logReadResults, | ||
| tp => getPartitionOrException(tp), | ||
|
|
@@ -1710,17 +1710,17 @@ class ReplicaManager(val config: KafkaConfig, | |
| responseCallback(fetchPartitionData) | ||
| } else { | ||
| // construct the fetch results from the read results | ||
| val fetchPartitionStatus = new mutable.ArrayBuffer[(TopicIdPartition, FetchPartitionStatus)] | ||
| val fetchPartitionStatus = new util.LinkedHashMap[TopicIdPartition, FetchPartitionStatus] | ||
| fetchInfos.foreach { case (topicIdPartition, partitionData) => | ||
| val logReadResult = logReadResultMap.get(topicIdPartition) | ||
| if (logReadResult != null) { | ||
| val logOffsetMetadata = logReadResult.info.fetchOffsetMetadata | ||
| fetchPartitionStatus += (topicIdPartition -> new FetchPartitionStatus(logOffsetMetadata, partitionData)) | ||
| fetchPartitionStatus.put(topicIdPartition, new FetchPartitionStatus(logOffsetMetadata, partitionData)) | ||
| } | ||
| } | ||
|
|
||
| if (!remoteFetchInfos.isEmpty) { | ||
| processRemoteFetches(remoteFetchInfos, params, responseCallback, logReadResultMap, fetchPartitionStatus.toSeq) | ||
| processRemoteFetches(remoteFetchInfos, params, responseCallback, logReadResultMap, fetchPartitionStatus) | ||
| } else { | ||
| // If there is not enough data to respond and there is no remote data, we will let the fetch request | ||
| // wait for new data. | ||
|
|
@@ -1733,7 +1733,7 @@ class ReplicaManager(val config: KafkaConfig, | |
| ) | ||
|
|
||
| // create a list of (topic, partition) pairs to use as keys for this delayed fetch operation | ||
| val delayedFetchKeys = fetchPartitionStatus.map { case (tp, _) => new TopicPartitionOperationKey(tp) }.toList | ||
| val delayedFetchKeys = fetchPartitionStatus.asScala.map { case (tp, _) => new TopicPartitionOperationKey(tp) }.toList | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we convert to a Scala List if we convert to a Java collection right after? We should probably use Java's Stream here and avoid the Scala collections altogether. |
||
|
|
||
| // try to complete the request immediately, otherwise put it into the purgatory; | ||
| // this is because while the delayed fetch operation is being created, new requests | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it possible to have duplicate TPs in this "fetch" path?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Also, we are doing two conversions here - you can avoid one by using a view or iterator after
asScalaand usingtoBufferinstead oftoSeq(the latter can result in a lazy collection being created which can result in problems).