Skip to content

Commit

Permalink
Kafka adaptive timeout implementation to handle empty topic cases (ap…
Browse files Browse the repository at this point in the history
…ache#29400)

* Kafka adaptive timeout implementation to handle empty topic cases

* Format fix with spotless

---------

Co-authored-by: tuyarer <tuyarer@paloaltonetworks.com>
  • Loading branch information
talatuyarer and tuyarer authored Nov 21, 2023
1 parent 0972bc0 commit 444e10d
Showing 1 changed file with 15 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,9 @@ public long getSplitBacklogBytes() {
*/
private static final Duration KAFKA_POLL_TIMEOUT = Duration.millis(1000);

private static final Duration RECORDS_DEQUEUE_POLL_TIMEOUT = Duration.millis(10);
private Duration recordsDequeuePollTimeout;
private static final Duration RECORDS_DEQUEUE_POLL_TIMEOUT_MIN = Duration.millis(1);
private static final Duration RECORDS_DEQUEUE_POLL_TIMEOUT_MAX = Duration.millis(20);
private static final Duration RECORDS_ENQUEUE_POLL_TIMEOUT = Duration.millis(100);

// Use a separate thread to read Kafka messages. Kafka Consumer does all its work including
Expand Down Expand Up @@ -543,6 +545,7 @@ Instant updateAndGetWatermark() {
bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId);
backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId);
backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId);
recordsDequeuePollTimeout = Duration.millis(10);
}

private void consumerPollLoop() {
Expand Down Expand Up @@ -614,8 +617,7 @@ private void nextBatch() throws IOException {
try {
// poll available records, wait (if necessary) up to the specified timeout.
records =
availableRecordsQueue.poll(
RECORDS_DEQUEUE_POLL_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS);
availableRecordsQueue.poll(recordsDequeuePollTimeout.getMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("{}: Unexpected", this, e);
Expand All @@ -627,9 +629,19 @@ private void nextBatch() throws IOException {
if (consumerPollException.get() != null) {
throw new IOException("Exception while reading from Kafka", consumerPollException.get());
}
if (recordsDequeuePollTimeout.isLongerThan(RECORDS_DEQUEUE_POLL_TIMEOUT_MIN)) {
recordsDequeuePollTimeout = recordsDequeuePollTimeout.minus(Duration.millis(1));
LOG.debug("Reducing poll timeout for reader to " + recordsDequeuePollTimeout.getMillis());
}
return;
}

if (recordsDequeuePollTimeout.isShorterThan(RECORDS_DEQUEUE_POLL_TIMEOUT_MAX)) {
recordsDequeuePollTimeout = recordsDequeuePollTimeout.plus(Duration.millis(1));
LOG.debug("Increasing poll timeout for reader to " + recordsDequeuePollTimeout.getMillis());
LOG.debug("Record count: " + records.count());
}

partitionStates.forEach(p -> p.recordIter = records.records(p.topicPartition).iterator());

// cycle through the partitions in order to interleave records from each.
Expand Down

0 comments on commit 444e10d

Please sign in to comment.