Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Concurrency limiter from Netflix to avoid throttling storage #2169

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
<!-- Be careful to set this as a provided dep, so that it doesn't interfere with dependencies
from other projects. For example, cassandra and spring boot set guava versions -->
<guava.version>19.0</guava.version>
<netflix.concurrency.limits.version>0.0.49</netflix.concurrency.limits.version>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@adriancole, concurrency-limits v0.2.0 is the latest tag but uses Java 8 features (such as Optional). Do you know of any current plans to get io.zipkin.zipkin2:zipkin off of 1.6 and up to at least 1.8? I saw #777 but it doesn't appear to have actually upped the main.signature.artifact.

It also looks like retrolambda does not support recompiling dependencies or I'd explore that option further.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Never mind, made a new artifact outside of zipkin-core so it's not subject to this limitation. Affords me some other luxuries around not-shading dependencies as well :)

<junit.version>4.12</junit.version>
<powermock.version>2.0.0-beta.5</powermock.version>
<mockito.version>2.21.0</mockito.version>
Expand Down Expand Up @@ -396,6 +397,14 @@
<artifactId>brave-instrumentation-cassandra-driver</artifactId>
<version>0.8.0</version>
</dependency>

<!-- Collector concurrency limits -->
<dependency>
<groupId>com.netflix.concurrency-limits</groupId>
<artifactId>concurrency-limits-core</artifactId>
<version>${netflix.concurrency.limits.version}</version>
</dependency>

</dependencies>
</dependencyManagement>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
*/
package zipkin2.autoconfigure.collector.kafka;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Condition;
Expand All @@ -22,6 +23,7 @@
import org.springframework.core.type.AnnotatedTypeMetadata;
import zipkin2.collector.CollectorMetrics;
import zipkin2.collector.CollectorSampler;
import zipkin2.collector.ConcurrencyLimiter;
import zipkin2.collector.kafka.KafkaCollector;
import zipkin2.storage.StorageComponent;

Expand All @@ -36,11 +38,12 @@ class ZipkinKafkaCollectorAutoConfiguration { // makes simple type name unique f

@Bean(initMethod = "start")
KafkaCollector kafka(
ZipkinKafkaCollectorProperties properties,
CollectorSampler sampler,
CollectorMetrics metrics,
StorageComponent storage) {
return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build();
ZipkinKafkaCollectorProperties properties,
CollectorSampler sampler,
CollectorMetrics metrics,
StorageComponent storage,
@Autowired(required = false) ConcurrencyLimiter limiter) {
return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).limiter(limiter).build();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ class ZipkinKafkaCollectorProperties {
private String topic;
/** Number of Kafka consumer threads to run. */
private Integer streams;

/** Additional Kafka consumer configuration. */
private Map<String, String> overrides = new LinkedHashMap<>();

Expand Down Expand Up @@ -78,10 +79,12 @@ public KafkaCollector.Builder toBuilder() {
if (topic != null) result.topic(topic);
if (streams != null) result.streams(streams);
if (overrides != null) result.overrides(overrides);

return result;
}

private static String emptyToNull(String s) {
return "".equals(s) ? null : s;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@
*/
package zipkin2.autoconfigure.collector.kafka08;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import zipkin2.collector.CollectorMetrics;
import zipkin2.collector.CollectorSampler;
import zipkin2.collector.ConcurrencyLimiter;
import zipkin2.collector.kafka08.KafkaCollector;
import zipkin2.storage.StorageComponent;

Expand All @@ -40,9 +42,10 @@ KafkaCollector kafka(
ZipkinKafkaCollectorProperties kafka,
CollectorSampler sampler,
CollectorMetrics metrics,
StorageComponent storage) {
StorageComponent storage,
@Autowired(required = false) ConcurrencyLimiter limiter) {
final KafkaCollector result =
kafka.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build();
kafka.toBuilder().sampler(sampler).metrics(metrics).storage(storage).limiter(limiter).build();

// don't use @Bean(initMethod = "start") as it can crash the process if zookeeper is down
Thread start =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ public void setOverrides(Map<String, String> overrides) {

public KafkaCollector.Builder toBuilder() {
return KafkaCollector.builder()
.topic(topic)
.zookeeper(zookeeper)
.groupId(groupId)
.streams(streams)
.maxMessageSize(maxMessageSize)
.overrides(overrides);
.topic(topic)
.zookeeper(zookeeper)
.groupId(groupId)
.streams(streams)
.maxMessageSize(maxMessageSize)
.overrides(overrides);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
import java.net.URISyntaxException;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Condition;
Expand All @@ -25,6 +27,7 @@
import org.springframework.core.type.AnnotatedTypeMetadata;
import zipkin2.collector.CollectorMetrics;
import zipkin2.collector.CollectorSampler;
import zipkin2.collector.ConcurrencyLimiter;
import zipkin2.collector.rabbitmq.RabbitMQCollector;
import zipkin2.storage.StorageComponent;

Expand All @@ -36,12 +39,13 @@ class ZipkinRabbitMQCollectorAutoConfiguration {

@Bean(initMethod = "start")
RabbitMQCollector rabbitMq(
ZipkinRabbitMQCollectorProperties properties,
CollectorSampler sampler,
CollectorMetrics metrics,
StorageComponent storage)
ZipkinRabbitMQCollectorProperties properties,
CollectorSampler sampler,
CollectorMetrics metrics,
StorageComponent storage,
@Autowired(required = false) ConcurrencyLimiter limiter)
throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException {
return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build();
return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).limiter(limiter).build();
}

/**
Expand Down
7 changes: 7 additions & 0 deletions zipkin-collector/core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,13 @@
</properties>

<dependencies>

<!-- Concurrency limits -->
<dependency>
<groupId>com.netflix.concurrency-limits</groupId>
<artifactId>concurrency-limits-core</artifactId>
</dependency>

<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>zipkin</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
*/
package zipkin2.collector;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
Expand Down Expand Up @@ -48,6 +49,7 @@ public static final class Builder {
StorageComponent storage = null;
CollectorSampler sampler = null;
CollectorMetrics metrics = null;
ConcurrencyLimiter limiter = null;

Builder(Logger logger) {
this.logger = logger;
Expand All @@ -74,6 +76,13 @@ public Builder sampler(CollectorSampler sampler) {
return this;
}

/** @see {@link CollectorComponent.Builder#limiter(ConcurrencyLimiter)} */
public Builder limiter(ConcurrencyLimiter limiter) {
if (limiter == null) throw new NullPointerException("limiter == null");
this.limiter = limiter;
return this;
}

public Collector build() {
return new Collector(this);
}
Expand All @@ -83,6 +92,7 @@ public Collector build() {
final CollectorMetrics metrics;
final CollectorSampler sampler;
final StorageComponent storage;
final ConcurrencyLimiter limiter;

Collector(Builder builder) {
if (builder.logger == null) throw new NullPointerException("logger == null");
Expand All @@ -91,6 +101,7 @@ public Collector build() {
if (builder.storage == null) throw new NullPointerException("storage == null");
this.storage = builder.storage;
this.sampler = builder.sampler == null ? CollectorSampler.ALWAYS_SAMPLE : builder.sampler;
this.limiter = builder.limiter;
}

public void accept(List<Span> spans, Callback<Void> callback) {
Expand Down Expand Up @@ -147,7 +158,20 @@ List<Span> decodeList(BytesDecoder<Span> decoder, byte[] serialized) {
}

void record(List<Span> sampled, Callback<Void> callback) {
storage.spanConsumer().accept(sampled).enqueue(callback);
if(limiter != null) {

limiter.execute(() -> {
try {
storage.spanConsumer().accept(sampled).execute();
callback.onSuccess(null);
} catch (IOException e) {
callback.onError(e);
}
});

} else {
storage.spanConsumer().accept(sampled).enqueue(callback);
}
}

String idString(Span span) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,15 @@ public abstract static class Builder {
*/
public abstract Builder sampler(CollectorSampler sampler);

/**
* Apply limits based on concurrency during sample record. Storage signals over capacity with a
* RejectedExecutionException.
*
* @param limiter limiter
* @return fluent builder instance.
*/
public abstract Builder limiter(ConcurrencyLimiter limiter);

public abstract CollectorComponent build();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
/*
* Copyright 2015-2018 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2.collector;

import com.netflix.concurrency.limits.Limiter;
import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor;

import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

public class ConcurrencyLimiter {

private int threads = 1;
private ExecutorService pool;
private Executor executor;

public void setThreads(int threads) {
this.threads = threads;
}

public void setLimiter(Limiter<Void> limiter) {
if(threads > 1) {
pool = Executors.newFixedThreadPool(threads);
} else {
pool = Executors.newSingleThreadExecutor();
}
this.executor = new BlockingAdaptiveExecutor(limiter, pool);
}

public void execute(Runnable call) {
executor.execute(call);
}

public void close() {
if (pool == null) return;
pool.shutdownNow();
try {
pool.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// do nothing
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import zipkin2.CheckResult;
import zipkin2.collector.Collector;
import zipkin2.collector.CollectorComponent;
import zipkin2.collector.CollectorMetrics;
import zipkin2.collector.CollectorSampler;
import zipkin2.collector.*;
import zipkin2.storage.SpanConsumer;
import zipkin2.storage.StorageComponent;

Expand Down Expand Up @@ -60,6 +57,7 @@ public static final class Builder extends CollectorComponent.Builder {
CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS;
String topic = "zipkin";
int streams = 1;
ConcurrencyLimiter limiter;

@Override
public Builder storage(StorageComponent storage) {
Expand All @@ -81,6 +79,12 @@ public Builder metrics(CollectorMetrics metrics) {
return this;
}

@Override
public Builder limiter(ConcurrencyLimiter limiter) {
this.limiter = limiter;
return this;
}

/**
* Topic zipkin spans will be consumed from. Defaults to "zipkin". Multiple topics may be
* specified if comma delimited.
Expand Down Expand Up @@ -148,9 +152,11 @@ public KafkaCollector build() {
}

final LazyKafkaWorkers kafkaWorkers;
final ConcurrencyLimiter limiter;

KafkaCollector(Builder builder) {
kafkaWorkers = new LazyKafkaWorkers(builder);
limiter = builder.limiter;
}

@Override
Expand All @@ -173,6 +179,9 @@ public CheckResult check() {
@Override
public void close() {
kafkaWorkers.close();
if(limiter != null) {
limiter.close();
}
}

static final class LazyKafkaWorkers {
Expand Down
Loading