Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
254 changes: 37 additions & 217 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,104 +21,11 @@ go get github.com/braintrustdata/braintrust-sdk-go
export BRAINTRUST_API_KEY="your-api-key" # Get from https://www.braintrust.dev/app/settings
```

## Quick Start

Braintrust uses [OpenTelemetry](https://opentelemetry.io/) for distributed tracing. Set up a TracerProvider and initialize the client:

```go
package main

import (
"context"
"log"

"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"

"github.com/braintrustdata/braintrust-sdk-go"
)

func main() {
tp := trace.NewTracerProvider()
defer tp.Shutdown(context.Background())
otel.SetTracerProvider(tp)

client, err := braintrust.New(tp, braintrust.WithProject("my-project"))
if err != nil {
log.Fatal(err)
}
_ = client // Your client is ready to use
}
```

## Usage

### Evaluations

Run systematic evaluations with custom test cases and scoring functions:

```go
package main

import (
"context"
"log"

"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"

"github.com/braintrustdata/braintrust-sdk-go"
"github.com/braintrustdata/braintrust-sdk-go/eval"
)

func main() {
ctx := context.Background()

// Set up OpenTelemetry tracer
tp := trace.NewTracerProvider()
defer tp.Shutdown(ctx)
otel.SetTracerProvider(tp)

// Initialize Braintrust
client, err := braintrust.New(tp)
if err != nil {
log.Fatal(err)
}

// Create an evaluator with your task's input and output types
evaluator := braintrust.NewEvaluator[string, string](client)

// Run an evaluation
_, err = evaluator.Run(ctx, eval.Opts[string, string]{
Experiment: "greeting-experiment",
Dataset: eval.NewDataset([]eval.Case[string, string]{
{Input: "World", Expected: "Hello World"},
{Input: "Alice", Expected: "Hello Alice"},
}),
Task: eval.T(func(ctx context.Context, input string) (string, error) {
return "Hello " + input, nil
}),
Scorers: []eval.Scorer[string, string]{
eval.NewScorer("exact_match", func(ctx context.Context, r eval.TaskResult[string, string]) (eval.Scores, error) {
score := 0.0
if r.Expected == r.Output {
score = 1.0
}
return eval.S(score), nil
}),
},
})
if err != nil {
log.Fatal(err)
}
}
```

### Tracing LLM Calls
## Instrumentation

Trace LLM calls with **automatic** or **manual** instrumentation.

#### Automatic Instrumentation (Recommended)
### Automatic Instrumentation

Use [Orchestrion](https://github.com/DataDog/orchestrion) to automatically inject tracing at compile time—no code changes required.

Expand Down Expand Up @@ -152,113 +59,82 @@ import (

**3. Build with orchestrion:**
```bash
# Build with orchestrion
orchestrion go build ./...

# Or configure GOFLAGS to use orchestrion automatically
export GOFLAGS="-toolexec='orchestrion toolexec'"
go build ./...
```

That's it! Your LLM client calls are now automatically traced. No middleware or wrapper code needed in your application.

#### Manual Instrumentation
### Manual Instrumentation

Alternatively, add tracing middleware explicitly to your clients:
If you prefer explicit control, you can add tracing middleware manually to your LLM clients. See the [Manual Instrumentation Guide](./trace/contrib/README.md) for detailed examples with OpenAI, Anthropic, Google Gemini, and other providers.

**OpenAI:**
```go
package main
## Evaluations

import (
"context"
"log"
Run [evals](https://www.braintrust.dev/docs/guides/evals) with custom test cases and scoring functions:

"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"

"github.com/braintrustdata/braintrust-sdk-go"
traceopenai "github.com/braintrustdata/braintrust-sdk-go/trace/contrib/openai"
)

func main() {
// Set up OpenTelemetry tracer
tp := trace.NewTracerProvider()
defer tp.Shutdown(context.Background())
otel.SetTracerProvider(tp)

// Initialize Braintrust
_, err := braintrust.New(tp)
if err != nil {
log.Fatal(err)
}

// Create OpenAI client with tracing middleware
client := openai.NewClient(
option.WithMiddleware(traceopenai.NewMiddleware()),
)

// Make API calls - they'll be automatically traced and logged to Braintrust
_, err = client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Hello!"),
},
Model: openai.ChatModelGPT4oMini,
})
if err != nil {
log.Fatal(err)
}
}
```

**Anthropic:**
```go
package main

import (
"context"
"log"

"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"

"github.com/braintrustdata/braintrust-sdk-go"
traceanthropic "github.com/braintrustdata/braintrust-sdk-go/trace/contrib/anthropic"
"github.com/braintrustdata/braintrust-sdk-go/eval"
)

func main() {
ctx := context.Background()

// Set up OpenTelemetry tracer
tp := trace.NewTracerProvider()
defer tp.Shutdown(context.Background())
defer tp.Shutdown(ctx)
otel.SetTracerProvider(tp)

// Initialize Braintrust
_, err := braintrust.New(tp,
braintrust.WithProject("my-project"),
)
client, err := braintrust.New(tp)
if err != nil {
log.Fatal(err)
}

// Create Anthropic client with tracing middleware
client := anthropic.NewClient(
option.WithMiddleware(traceanthropic.NewMiddleware()),
)
// Create an evaluator with your task's input and output types
evaluator := braintrust.NewEvaluator[string, string](client)

// Make API calls - they'll be automatically traced and logged to Braintrust
_, err = client.Messages.New(context.Background(), anthropic.MessageNewParams{
Model: anthropic.ModelClaude3_7SonnetLatest,
Messages: []anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello!")),
// Run an evaluation
_, err = evaluator.Run(ctx, eval.Opts[string, string]{
Experiment: "greeting-experiment",
Dataset: eval.NewDataset([]eval.Case[string, string]{
{Input: "World", Expected: "Hello World"},
{Input: "Alice", Expected: "Hello Alice"},
}),
Task: eval.T(func(ctx context.Context, input string) (string, error) {
return "Hello " + input, nil
}),
Scorers: []eval.Scorer[string, string]{
eval.NewScorer("exact_match", func(ctx context.Context, r eval.TaskResult[string, string]) (eval.Scores, error) {
score := 0.0
if r.Expected == r.Output {
score = 1.0
}
return eval.S(score), nil
}),
},
MaxTokens: 1024,
})
if err != nil {
log.Fatal(err)
}
}
```

### API Client
## API Client

Manage Braintrust resources programmatically:

Expand Down Expand Up @@ -327,62 +203,6 @@ func main() {
}
```

**Google Gemini:**
```go
package main

import (
"context"
"log"
"os"

"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"
"google.golang.org/genai"

"github.com/braintrustdata/braintrust-sdk-go"
tracegenai "github.com/braintrustdata/braintrust-sdk-go/trace/contrib/genai"
)

func main() {
// Set up OpenTelemetry tracer
tp := trace.NewTracerProvider()
defer tp.Shutdown(context.Background())
otel.SetTracerProvider(tp)

// Initialize Braintrust
_, err := braintrust.New(tp,
braintrust.WithProject("my-project"),
)
if err != nil {
log.Fatal(err)
}

// Create Gemini client with tracing
client, err := genai.NewClient(context.Background(), &genai.ClientConfig{
HTTPClient: tracegenai.Client(),
APIKey: os.Getenv("GOOGLE_API_KEY"),
Backend: genai.BackendGeminiAPI,
})
if err != nil {
log.Fatal(err)
}

// Make API calls - they'll be automatically traced and logged to Braintrust
_, err = client.Models.GenerateContent(context.Background(),
"gemini-1.5-flash",
genai.Text("Hello!"),
nil,
)
if err != nil {
log.Fatal(err)
}
}
```

**LangChainGo:**
The SDK provides comprehensive tracing for [LangChainGo](https://github.com/tmc/langchaingo) applications. See [`examples/langchaingo`](./examples/langchaingo/main.go) for examples.

## Examples

Complete working examples are available in [`examples/`](./examples/):
Expand Down
Loading