3
3
import * as Core from 'openai/core' ;
4
4
import { APIPromise } from 'openai/core' ;
5
5
import { APIResource } from 'openai/resource' ;
6
+ import * as Completions_ from 'openai/resources/completions' ;
6
7
import * as API from './index' ;
7
8
import { Stream } from 'openai/streaming' ;
8
9
@@ -25,60 +26,122 @@ export class Completions extends APIResource {
25
26
}
26
27
}
27
28
29
+ /**
30
+ * Represents a chat completion response returned by model, based on the provided
31
+ * input.
32
+ */
28
33
export interface ChatCompletion {
34
+ /**
35
+ * A unique identifier for the chat completion.
36
+ */
29
37
id : string ;
30
38
39
+ /**
40
+ * A list of chat completion choices. Can be more than one if `n` is greater
41
+ * than 1.
42
+ */
31
43
choices : Array < ChatCompletion . Choice > ;
32
44
45
+ /**
46
+ * A unix timestamp of when the chat completion was created.
47
+ */
33
48
created : number ;
34
49
50
+ /**
51
+ * The model used for the chat completion.
52
+ */
35
53
model : string ;
36
54
55
+ /**
56
+ * The object type, which is always `chat.completion`.
57
+ */
37
58
object : string ;
38
59
39
- usage ?: ChatCompletion . Usage ;
60
+ /**
61
+ * Usage statistics for the completion request.
62
+ */
63
+ usage ?: Completions_ . CompletionUsage ;
40
64
}
41
65
42
66
export namespace ChatCompletion {
43
67
export interface Choice {
68
+ /**
69
+ * The reason the model stopped generating tokens. This will be `stop` if the model
70
+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
71
+ * number of tokens specified in the request was reached, or `function_call` if the
72
+ * model called a function.
73
+ */
44
74
finish_reason : 'stop' | 'length' | 'function_call' ;
45
75
76
+ /**
77
+ * The index of the choice in the list of choices.
78
+ */
46
79
index : number ;
47
80
81
+ /**
82
+ * A chat completion message generated by the model.
83
+ */
48
84
message : ChatCompletionMessage ;
49
85
}
50
-
51
- export interface Usage {
52
- completion_tokens : number ;
53
-
54
- prompt_tokens : number ;
55
-
56
- total_tokens : number ;
57
- }
58
86
}
59
87
88
+ /**
89
+ * Represents a streamed chunk of a chat completion response returned by model,
90
+ * based on the provided input.
91
+ */
60
92
export interface ChatCompletionChunk {
93
+ /**
94
+ * A unique identifier for the chat completion chunk.
95
+ */
61
96
id : string ;
62
97
98
+ /**
99
+ * A list of chat completion choices. Can be more than one if `n` is greater
100
+ * than 1.
101
+ */
63
102
choices : Array < ChatCompletionChunk . Choice > ;
64
103
104
+ /**
105
+ * A unix timestamp of when the chat completion chunk was created.
106
+ */
65
107
created : number ;
66
108
109
+ /**
110
+ * The model to generate the completion.
111
+ */
67
112
model : string ;
68
113
114
+ /**
115
+ * The object type, which is always `chat.completion.chunk`.
116
+ */
69
117
object : string ;
70
118
}
71
119
72
120
export namespace ChatCompletionChunk {
73
121
export interface Choice {
122
+ /**
123
+ * A chat completion delta generated by streamed model responses.
124
+ */
74
125
delta : Choice . Delta ;
75
126
127
+ /**
128
+ * The reason the model stopped generating tokens. This will be `stop` if the model
129
+ * hit a natural stop point or a provided stop sequence, `length` if the maximum
130
+ * number of tokens specified in the request was reached, or `function_call` if the
131
+ * model called a function.
132
+ */
76
133
finish_reason : 'stop' | 'length' | 'function_call' | null ;
77
134
135
+ /**
136
+ * The index of the choice in the list of choices.
137
+ */
78
138
index : number ;
79
139
}
80
140
81
141
export namespace Choice {
142
+ /**
143
+ * A chat completion delta generated by streamed model responses.
144
+ */
82
145
export interface Delta {
83
146
/**
84
147
* The contents of the chunk message.
@@ -120,6 +183,9 @@ export namespace ChatCompletionChunk {
120
183
}
121
184
}
122
185
186
+ /**
187
+ * A chat completion message generated by the model.
188
+ */
123
189
export interface ChatCompletionMessage {
124
190
/**
125
191
* The contents of the message.
0 commit comments