Merge pull request #3993 from janhq/fix/openai-preview-models-transform-payload-update
fix: update the payload transform for OpenAI preview models
This commit is contained in:
commit
4e91c80bcc
@ -70,16 +70,17 @@ export default class JanInferenceOpenAIExtension extends RemoteOAIEngine {
|
|||||||
* Tranform the payload before sending it to the inference endpoint.
|
* Tranform the payload before sending it to the inference endpoint.
|
||||||
* The new preview models such as o1-mini and o1-preview replaced max_tokens by max_completion_tokens parameter.
|
* The new preview models such as o1-mini and o1-preview replaced max_tokens by max_completion_tokens parameter.
|
||||||
* Others do not.
|
* Others do not.
|
||||||
* @param payload
|
* @param payload
|
||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
transformPayload = (payload: OpenAIPayloadType): OpenAIPayloadType => {
|
transformPayload = (payload: OpenAIPayloadType): OpenAIPayloadType => {
|
||||||
// Transform the payload for preview models
|
// Transform the payload for preview models
|
||||||
if (this.previewModels.includes(payload.model)) {
|
if (this.previewModels.includes(payload.model)) {
|
||||||
const { max_tokens, ...params } = payload
|
const { max_tokens, temperature, top_p, stop, ...params } = payload
|
||||||
return {
|
return {
|
||||||
...params,
|
...params,
|
||||||
max_completion_tokens: max_tokens,
|
max_completion_tokens: max_tokens,
|
||||||
|
stream: false // o1 only support stream = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pass through for non-preview models
|
// Pass through for non-preview models
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user