Skip to content

Commit a380166

Browse files
Merge pull request #170 from GitPaulo/gitpaulo/update-deprecated-max-tokens
Update deprecated max_tokens to max_completion_tokens
2 parents 95f4a27 + b07a08c commit a380166

File tree

4 files changed

+8
-8
lines changed

4 files changed

+8
-8
lines changed

__tests__/inference.test.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ describe('inference.ts', () => {
7272
content: 'Hello, AI!',
7373
},
7474
],
75-
max_tokens: 100,
75+
max_completion_tokens: 100,
7676
model: 'gpt-4',
7777
})
7878

@@ -176,7 +176,7 @@ describe('inference.ts', () => {
176176
content: 'Hello, AI!',
177177
},
178178
],
179-
max_tokens: 100,
179+
max_completion_tokens: 100,
180180
model: 'gpt-4',
181181
response_format: requestWithResponseFormat.responseFormat,
182182
})
@@ -228,7 +228,7 @@ describe('inference.ts', () => {
228228
expect(callArgs.tools).toEqual(mockMcpClient.tools)
229229
expect(callArgs.response_format).toBeUndefined()
230230
expect(callArgs.model).toBe('gpt-4')
231-
expect(callArgs.max_tokens).toBe(100)
231+
expect(callArgs.max_completion_tokens).toBe(100)
232232

233233
// Verify OpenAI client was initialized with empty custom headers
234234
expect(mockOpenAIClient).toHaveBeenCalledWith({

dist/index.js

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

dist/index.js.map

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/inference.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ export async function simpleInference(request: InferenceRequest): Promise<string
4747

4848
const chatCompletionRequest: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
4949
messages: request.messages as OpenAI.Chat.Completions.ChatCompletionMessageParam[],
50-
max_tokens: request.maxTokens,
50+
max_completion_tokens: request.maxTokens,
5151
model: request.modelName,
5252
temperature: request.temperature,
5353
top_p: request.topP,
@@ -95,7 +95,7 @@ export async function mcpInference(
9595

9696
const chatCompletionRequest: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
9797
messages: messages as OpenAI.Chat.Completions.ChatCompletionMessageParam[],
98-
max_tokens: request.maxTokens,
98+
max_completion_tokens: request.maxTokens,
9999
model: request.modelName,
100100
temperature: request.temperature,
101101
top_p: request.topP,

0 commit comments

Comments
 (0)