Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] SpanReader sampling/processing interface #877

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions sdk/trace/pipelines.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
package trace

import (
"context"

"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/trace"
)

// pipelines are immutable
type pipelines struct {
provider *TracerProvider
readers []SpanReader
}

func (provider *TracerProvider) newPipelines(readers []SpanReader) *pipelines {
return &pipelines{
provider: provider,
readers: readers,
}
}

func (p pipelines) add(r SpanReader) *pipelines {
rs := make([]SpanReader, len(p.readers)+1)
copy(rs[:len(p.readers)], p.readers)
rs[len(p.readers)] = r
return p.provider.newPipelines(rs)
}

func (p pipelines) remove(remFunc func(SpanReader) bool) *pipelines {
rs := make([]SpanReader, 0, len(p.readers))
for _, r := range p.readers {
if !remFunc(r) {
rs = append(rs, r)
}
}
return p.provider.newPipelines(rs)
}

// shouldSample receives all the relevant inputs during the start of a
// new span
func (p pipelines) shouldSample(
ctx context.Context,
parent trace.SpanContext,
scope instrumentation.Scope,
tid trace.TraceID,
sid trace.SpanID,
name string,
config *trace.SpanConfig,
) (SamplingResult, SamplingParameters2) {

params1 := SamplingParameters{
ParentContext: ctx,
TraceID: tid,
Name: name,
Kind: config.SpanKind(),
Attributes: config.Attributes(),
Links: config.Links(),
}
params2 := SamplingParameters2{
parameters: params1,
scope: scope,
spanID: sid,
parent: parent,
}

// @@@ Would like to make this a params2 call, w/ a wrapper.
// This appears possible, but not straightforward because it
// can't simply use the ComposableSampler interface w/o
// somehow also modifying the span attributes and t-value.
primeResult := p.provider.sampler.ShouldSample(params1)
if primeResult.Decision == Drop {
return primeResult, params2
}

// @@@ Implement.
return primeResult, params2
}
131 changes: 58 additions & 73 deletions sdk/trace/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,11 @@ const (

// tracerProviderConfig.
type tracerProviderConfig struct {
// processors contains collection of SpanProcessors that are processing pipeline
// for spans in the trace signal.
// SpanProcessors registered with a TracerProvider and are called at the start
// and end of a Span's lifecycle, and are called in the order they are
// registered.
processors []SpanProcessor

// sampler is the default sampler used when creating new spans.
// readers are {V2 sampler, write-on-end processor, span processor/exporter}
readers []SpanReader

// sampler is the TraceProvider-wide Sampler; attributes and
// tracestate modifications apply to all span pipelines.
sampler Sampler

// idGenerator is used to generate all Span and Trace IDs when needed.
Expand All @@ -47,13 +44,13 @@ type tracerProviderConfig struct {
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
func (cfg tracerProviderConfig) MarshalLog() interface{} {
return struct {
SpanProcessors []SpanProcessor
Readers []SpanReader
SamplerType string
IDGeneratorType string
SpanLimits SpanLimits
Resource *resource.Resource
}{
SpanProcessors: cfg.processors,
Readers: cfg.readers,
SamplerType: fmt.Sprintf("%T", cfg.sampler),
IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
SpanLimits: cfg.spanLimits,
Expand All @@ -66,9 +63,9 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} {
type TracerProvider struct {
embedded.TracerProvider

mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
spanProcessors atomic.Pointer[spanProcessorStates]
mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
pipes atomic.Pointer[pipelines]

isShutdown atomic.Bool

Expand Down Expand Up @@ -113,11 +110,7 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
}
global.Info("TracerProvider created", "config", o)

spss := make(spanProcessorStates, 0, len(o.processors))
for _, sp := range o.processors {
spss = append(spss, newSpanProcessorState(sp))
}
tp.spanProcessors.Store(&spss)
tp.pipes.Store(tp.newPipelines(o.readers))

return tp
}
Expand Down Expand Up @@ -175,6 +168,11 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T

// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
p.RegisterSpanReader(NewSimpleSpanReader(nil, sp))
}

func (p *TracerProvider) RegisterSpanReader(sr SpanReader) {

// This check prevents calls during a shutdown.
if p.isShutdown.Load() {
return
Expand All @@ -186,15 +184,26 @@ func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
return
}

current := p.getSpanProcessors()
newSPS := make(spanProcessorStates, 0, len(current)+1)
newSPS = append(newSPS, current...)
newSPS = append(newSPS, newSpanProcessorState(sp))
p.spanProcessors.Store(&newSPS)
p.pipes.Store(p.getPipelines().add(sr))
}

// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
p.unregisterPipeline(func(r SpanReader) bool {
if pr, ok := r.(*spanReader); ok {
return pr.processor == sp
}
return false
})
}

func (p *TracerProvider) UnregisterSpanReader(sr SpanReader) {
p.unregisterPipeline(func(r SpanReader) bool {
return sr == r
})
}

func (p *TracerProvider) unregisterPipeline(f func(r SpanReader) bool) {
// This check prevents calls during a shutdown.
if p.isShutdown.Load() {
return
Expand All @@ -205,54 +214,25 @@ func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
if p.isShutdown.Load() {
return
}
old := p.getSpanProcessors()
if len(old) == 0 {
return
}
spss := make(spanProcessorStates, len(old))
copy(spss, old)

// stop the span processor if it is started and remove it from the list
var stopOnce *spanProcessorState
var idx int
for i, sps := range spss {
if sps.sp == sp {
stopOnce = sps
idx = i
}
}
if stopOnce != nil {
stopOnce.state.Do(func() {
if err := sp.Shutdown(context.Background()); err != nil {
otel.Handle(err)
}
})
}
if len(spss) > 1 {
copy(spss[idx:], spss[idx+1:])
}
spss[len(spss)-1] = nil
spss = spss[:len(spss)-1]

p.spanProcessors.Store(&spss)
p.pipes.Store(p.getPipelines().remove(f))
}

// ForceFlush immediately exports all spans that have not yet been exported for
// all the registered span processors.
func (p *TracerProvider) ForceFlush(ctx context.Context) error {
spss := p.getSpanProcessors()
if len(spss) == 0 {
ps := p.getPipelines()
if len(ps.readers) == 0 {
return nil
}

for _, sps := range spss {
for _, r := range ps.readers {
select {
case <-ctx.Done():
return ctx.Err()
default:
}

if err := sps.sp.ForceFlush(ctx); err != nil {
if err := r.ForceFlush(ctx); err != nil {
return err
}
}
Expand All @@ -275,18 +255,14 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
}

var retErr error
for _, sps := range p.getSpanProcessors() {
for _, sps := range p.getPipelines().readers {
select {
case <-ctx.Done():
return ctx.Err()
default:
}

var err error
sps.state.Do(func() {
err = sps.sp.Shutdown(ctx)
})
if err != nil {
if err := sps.Shutdown(ctx); err != nil {
if retErr == nil {
retErr = err
} else {
Expand All @@ -295,12 +271,12 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
}
}
}
p.spanProcessors.Store(&spanProcessorStates{})
p.pipes.Store(&pipelines{})
return retErr
}

func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
return *(p.spanProcessors.Load())
func (p *TracerProvider) getPipelines() pipelines {
return *(p.pipes.Load())
}

// TracerProviderOption configures a TracerProvider.
Expand All @@ -314,6 +290,17 @@ func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProvider
return fn(cfg)
}

// WithSpanReader registers a SpanReader, which is the most general
// form of registration for a combination Sampler/Processor/Exporter.
func WithSpanReader(r SpanReader) TracerProviderOption {
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
if r != nil {
cfg.readers = append(cfg.readers, r)
}
return cfg
})
}

// WithSyncer registers the exporter with the TracerProvider using a
// SimpleSpanProcessor.
//
Expand All @@ -334,10 +321,7 @@ func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProvide

// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
cfg.processors = append(cfg.processors, sp)
return cfg
})
return WithSpanReader(NewSimpleSpanReader(nil, sp))
}

// WithResource returns a TracerProviderOption that will configure the
Expand Down Expand Up @@ -480,9 +464,10 @@ func tracerProviderOptionsFromEnv() []TracerProviderOption {

// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
if cfg.sampler == nil {
cfg.sampler = ParentBased(AlwaysSample())
}
// @@@ Not sure what this default should be.
// if cfg.sampler == nil {
// cfg.sampler = ParentBased(AlwaysSample())
// }
if cfg.idGenerator == nil {
cfg.idGenerator = defaultIDGenerator()
}
Expand Down
Loading
Loading