@@ -76,6 +76,28 @@ const response = await client.chat.completions.create({
7676
7777</PlatformSection >
7878
79+ <PlatformSection supported = { [" javascript.nextjs" ]} >
80+ For Next.js applications, you need to manually instrument the OpenAI client using the ` instrumentOpenAiClient ` helper:
81+
82+ ``` javascript
83+ import * as Sentry from " @sentry/nextjs" ;
84+ import OpenAI from " openai" ;
85+
86+ const openai = new OpenAI ();
87+ const client = Sentry .instrumentOpenAiClient (openai, {
88+ recordInputs: true ,
89+ recordOutputs: true ,
90+ });
91+
92+ // Use the wrapped client instead of the original openai instance
93+ const response = await client .chat .completions .create ({
94+ model: " gpt-4o" ,
95+ messages: [{ role: " user" , content: " Hello!" }],
96+ });
97+ ```
98+
99+ </PlatformSection >
100+
79101## Options
80102
81103### ` recordInputs `
@@ -115,31 +137,6 @@ By default this integration adds tracing support to OpenAI API method calls incl
115137
116138The integration will automatically detect streaming vs non-streaming requests and handle them appropriately.
117139
118- <PlatformSection supported = { [' javascript.nextjs' ]} >
119-
120- ## Node.js and Edge Runtime
121-
122- For Next.js applications, you need to manually instrument the OpenAI client:
123-
124- ``` javascript
125- import * as Sentry from " @sentry/nextjs" ;
126- import OpenAI from " openai" ;
127-
128- const openai = new OpenAI ();
129- const client = Sentry .instrumentOpenAiClient (openai, {
130- recordInputs: true ,
131- recordOutputs: true ,
132- });
133-
134- // Use the wrapped client instead of the original openai instance
135- const response = await client .chat .completions .create ({
136- model: " gpt-4o" ,
137- messages: [{ role: " user" , content: " Hello!" }],
138- });
139- ```
140-
141- </PlatformSection >
142-
143140## Supported Versions
144141
145142- ` openai ` : ` >=4.0.0 <6 `
0 commit comments