-
Notifications
You must be signed in to change notification settings - Fork 870
/
stream-to-client-next.ts
executable file
·38 lines (33 loc) · 1.21 KB
/
stream-to-client-next.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import OpenAI from 'openai';
import type { NextApiRequest, NextApiResponse } from 'next';
// This file demonstrates how to stream from a Next.JS server as
// a new-line separated JSON-encoded stream. This file cannot be run
// without Next.JS scaffolding.
export const runtime = 'edge';
// This endpoint can be called with:
//
// curl 127.0.0.1:3000 -N -X POST -H 'Content-Type: text/plain' \
// --data 'Can you explain why dogs are better than cats?'
//
// Or consumed with fetch:
//
// fetch('http://localhost:3000', {
// method: 'POST',
// body: 'Tell me why dogs are better than cats',
// }).then(async res => {
// const runner = ChatCompletionStreamingRunner.fromReadableStream(res)
// })
//
// See examples/stream-to-client-browser.ts for a more complete example.
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
const openai = new OpenAI();
const stream = openai.beta.chat.completions.stream({
model: 'gpt-3.5-turbo',
stream: true,
// @ts-ignore
messages: [{ role: 'user', content: await req.text() }],
});
return res.send(stream.toReadableStream());
// @ts-ignore -- Or, for the app router:
return new Response(stream.toReadableStream());
}