diff --git a/.changeset/curly-owls-visit.md b/.changeset/curly-owls-visit.md new file mode 100644 index 0000000..414b3bc --- /dev/null +++ b/.changeset/curly-owls-visit.md @@ -0,0 +1,5 @@ +--- +"overide": minor +--- + +Now we are able to use response_format in case of OpenAi api diff --git a/package.json b/package.json index 00319ff..1541e07 100644 --- a/package.json +++ b/package.json @@ -35,7 +35,7 @@ "groq-sdk": "^0.7.0", "inquirer": "^11.1.0", "open": "^10.1.0", - "openai": "^4.67.2", + "openai": "^4.77.0", "three": "^0.170.0", "tree-sitter": "^0.22.0", "tree-sitter-c": "^0.23.1", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e546a17..c64e3ba 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -42,8 +42,8 @@ importers: specifier: ^10.1.0 version: 10.1.0 openai: - specifier: ^4.67.2 - version: 4.69.0 + specifier: ^4.77.0 + version: 4.91.1 three: specifier: ^0.170.0 version: 0.170.0 @@ -1485,12 +1485,15 @@ packages: resolution: {integrity: sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==} engines: {node: '>=18'} - openai@4.69.0: - resolution: {integrity: sha512-S3hOHSkk609KqwgH+7dwFrSvO3Gm3Nk0YWGyPHNscoMH/Y2tH1qunMi7gtZnLbUv4/N1elqCp6bDior2401kCQ==} + openai@4.91.1: + resolution: {integrity: sha512-DbjrR0hIMQFbxz8+3qBsfPJnh3+I/skPgoSlT7f9eiZuhGBUissPQULNgx6gHNkLoZ3uS0uYS6eXPUdtg4nHzw==} hasBin: true peerDependencies: + ws: ^8.18.0 zod: ^3.23.8 peerDependenciesMeta: + ws: + optional: true zod: optional: true @@ -3509,7 +3512,7 @@ snapshots: is-inside-container: 1.0.0 is-wsl: 3.1.0 - openai@4.69.0: + openai@4.91.1: dependencies: '@types/node': 18.19.61 '@types/node-fetch': 2.6.11 diff --git a/src/models/model.request.ts b/src/models/model.request.ts index b59f882..3eaeb6e 100644 --- a/src/models/model.request.ts +++ b/src/models/model.request.ts @@ -1,3 +1,4 @@ +import { ResponseFormatJSONSchema } from 'openai/resources'; import { ChatCompletionMessageParam as OpenAIChatCompletionMessageParam } from 'openai/resources/chat/completions'; import { ChatCompletionMessageParam as GroqChatCompletionMessageParam } from 'groq-sdk/resources/chat/completions'; import { ActivePlatformDetails } from './model.config'; @@ -16,6 +17,7 @@ export interface OpenAiRequestObject { stream?: boolean; presence_penalty?: number; frequency_penalty?: number; + response_format: ResponseFormatJSONSchema; } export interface DeepSeekRequestObject { diff --git a/src/services/service.network.ts b/src/services/service.network.ts index fef4809..1c3827c 100644 --- a/src/services/service.network.ts +++ b/src/services/service.network.ts @@ -98,10 +98,7 @@ class NetworkServiceImpl extends NetworkService { // Handle requests based on the selected platform switch (platform) { case 'openai': - return this.handleOpenAIRequest(activeServiceDetails, { - ...metadata, - messages: metadata.messages as OpenAIChatCompletionMessageParam[] - }); + return this.handleOpenAIRequest(activeServiceDetails, metadata as OpenAiRequestObject); case 'deepseek': return this.handleDeepSeekRequest(activeServiceDetails, { ...metadata, @@ -141,6 +138,7 @@ class NetworkServiceImpl extends NetworkService { ...metadata, stream: false }); + console.log(completions.choices[0]); return (completions.choices[0] as ChatCompletion.Choice).message.content || ''; // Return the content string from OpenAI completion } catch (error) { if (error instanceof Error) { diff --git a/src/services/service.process/process.request.ts b/src/services/service.process/process.request.ts index d8aad9e..537a2e9 100644 --- a/src/services/service.process/process.request.ts +++ b/src/services/service.process/process.request.ts @@ -81,14 +81,45 @@ class ProcessRequest { } const metadata: OpenAiRequestObject = { - model: 'gpt-4o', // Specify the model to use + model: 'gpt-4o', messages: messages, - temperature: 0.5, // Adjust temperature for creativity (lower = more deterministic) - max_tokens: 2500, // Max tokens for the response - n: 1, // Number of completions to generate - stream: false, // Whether to stream results - presence_penalty: 0, // Adjusts frequency of introducing new ideas - frequency_penalty: 0 // Adjusts repetition + temperature: 0.5, + max_tokens: 2500, + n: 1, + stream: false, + presence_penalty: 0, + frequency_penalty: 0, + response_format: { + type: 'json_schema', + json_schema: { + name: 'changes', + schema: { + type: 'object', + properties: { + changes: { + type: 'array', + items: { + type: 'object', + properties: { + find: { + type: 'array', + items: { type: 'string' } + }, + replace: { + type: 'array', + items: { type: 'string' } + } + }, + required: ['find', 'replace'], + additionalProperties: false + } + } + }, + required: ['changes'], + additionalProperties: false + } + } + } }; // Construct the request body for OpenAI API diff --git a/src/services/service.process/process.response.ts b/src/services/service.process/process.response.ts index 4a52147..063b273 100644 --- a/src/services/service.process/process.response.ts +++ b/src/services/service.process/process.response.ts @@ -20,6 +20,13 @@ class ProcessResponse { response: string, verbose: boolean = false ): Promise { + // IN case of OpenAi we receive a proper json format. + try { + return JSON.parse(response)['changes'] as ReplacementBlock[]; + } catch (error) { + console.error(`Error in formatting response: ${(error as Error).message}`); + } + try { const replacementObject: ReplacementBlock[] = serviceDev.extractCodeBlock(response, verbose); for (const bloc of replacementObject) {