@@ -32,7 +32,7 @@ The full API of this library can be found in [api.md file](api.md) along with ma
32
32
``` js
33
33
import OpenAI from ' openai' ;
34
34
35
- const openai = new OpenAI ({
35
+ const client = new OpenAI ({
36
36
apiKey: process .env [' OPENAI_API_KEY' ], // This is the default and can be omitted
37
37
});
38
38
@@ -53,7 +53,7 @@ We provide support for streaming responses using Server Sent Events (SSE).
53
53
``` ts
54
54
import OpenAI from ' openai' ;
55
55
56
- const openai = new OpenAI ();
56
+ const client = new OpenAI ();
57
57
58
58
async function main() {
59
59
const stream = await openai .chat .completions .create ({
@@ -80,7 +80,7 @@ This library includes TypeScript definitions for all request params and response
80
80
``` ts
81
81
import OpenAI from ' openai' ;
82
82
83
- const openai = new OpenAI ({
83
+ const client = new OpenAI ({
84
84
apiKey: process .env [' OPENAI_API_KEY' ], // This is the default and can be omitted
85
85
});
86
86
@@ -301,7 +301,7 @@ import fs from 'fs';
301
301
import fetch from ' node-fetch' ;
302
302
import OpenAI , { toFile } from ' openai' ;
303
303
304
- const openai = new OpenAI ();
304
+ const client = new OpenAI ();
305
305
306
306
// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
307
307
await openai .files .create ({ file: fs .createReadStream (' input.jsonl' ), purpose: ' fine-tune' });
@@ -399,7 +399,7 @@ You can use the `maxRetries` option to configure or disable this:
399
399
<!-- prettier-ignore -->
400
400
``` js
401
401
// Configure the default for all requests:
402
- const openai = new OpenAI ({
402
+ const client = new OpenAI ({
403
403
maxRetries: 0 , // default is 2
404
404
});
405
405
@@ -416,7 +416,7 @@ Requests time out after 10 minutes by default. You can configure this with a `ti
416
416
<!-- prettier-ignore -->
417
417
``` ts
418
418
// Configure the default for all requests:
419
- const openai = new OpenAI ({
419
+ const client = new OpenAI ({
420
420
timeout: 20 * 1000 , // 20 seconds (default is 10 minutes)
421
421
});
422
422
@@ -471,7 +471,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
471
471
472
472
<!-- prettier-ignore -->
473
473
``` ts
474
- const openai = new OpenAI ();
474
+ const client = new OpenAI ();
475
475
476
476
const response = await openai .chat .completions
477
477
.create ({ messages: [{ role: ' user' , content: ' Say this is a test' }], model: ' gpt-3.5-turbo' })
@@ -582,7 +582,7 @@ import http from 'http';
582
582
import { HttpsProxyAgent } from ' https-proxy-agent' ;
583
583
584
584
// Configure the default for all requests:
585
- const openai = new OpenAI ({
585
+ const client = new OpenAI ({
586
586
httpAgent: new HttpsProxyAgent (process .env .PROXY_URL ),
587
587
});
588
588
0 commit comments