11import { Request , Response } from 'express' ;
22
3+ import { config } from '../config' ;
34import { generateText } from '../services/llmWrapper' ;
45import { initializedRagService } from '../services/ragService' ;
5- import { LLMChatResponse , LLMStreamChunk } from '../types' ;
6+ import { GeminiQueryRequest , LLMChatResponse , LLMStreamChunk } from '../types' ;
67
78export const handleGeminiBatch = async ( req : Request , res : Response ) => {
89 const model = req . params . model ;
10+ let userQuery = '' ; // Define userQuery here to be accessible in the catch block
911
1012 try {
11- const { query , n_results , rag_type : raw_rag_type } = req . body ;
13+ const { contents } = req . body as GeminiQueryRequest ;
1214
13- if ( ! query || typeof query !== 'string' || query . trim ( ) === '' ) {
14- return res . status ( 400 ) . json ( { error : 'Bad Request: query is required and must be a non-empty string.' } ) ;
15+ // Validate contents structure
16+ if (
17+ ! contents ||
18+ ! Array . isArray ( contents ) ||
19+ contents . length === 0 ||
20+ ! contents [ 0 ] . parts ||
21+ ! Array . isArray ( contents [ 0 ] . parts ) ||
22+ contents [ 0 ] . parts . length === 0 ||
23+ ! contents [ 0 ] . parts [ 0 ] . text ||
24+ typeof contents [ 0 ] . parts [ 0 ] . text !== 'string' ||
25+ contents [ 0 ] . parts [ 0 ] . text . trim ( ) === ''
26+ ) {
27+ return res . status ( 400 ) . json ( {
28+ error :
29+ 'Bad Request: contents is required and must be an array with at least one part containing a non-empty text string.' ,
30+ } ) ;
1531 }
32+ userQuery = contents [ 0 ] . parts [ 0 ] . text ; // Assign userQuery after validation
1633
17- let rag_type = 'basic' ; // Default rag_type to 'basic'
18- if ( raw_rag_type !== undefined ) {
19- if ( raw_rag_type === 'basic' || raw_rag_type === 'advanced' ) {
20- rag_type = raw_rag_type ;
21- } else {
22- return res . status ( 400 ) . json ( { error : "Bad Request: rag_type must be 'basic' or 'advanced'." } ) ;
23- }
24- }
34+ const rag_type = config . geminiRagType ;
35+ const numberOfResults = config . geminiNResults ;
2536
26- const numberOfResults = typeof n_results === 'number' && n_results > 0 ? n_results : 3 ;
37+ console . log (
38+ `INFO: Gemini Batch Request. Model: ${ model } . RAG Type (from config): ${ rag_type } . N Results (from config): ${ numberOfResults } .`
39+ ) ;
2740
2841 const ragService = await initializedRagService ;
29- const chunks = await ragService . queryChunks ( query , numberOfResults ) ;
42+ const chunks = await ragService . queryChunks ( userQuery , numberOfResults ) ;
3043
3144 let augmentedPrompt : string ;
3245 if ( ! chunks || chunks . length === 0 ) {
3346 console . warn (
34- `No relevant chunks found for query: "${ query } " with model ${ model } . Querying LLM directly without RAG context.`
47+ `No relevant chunks found for query: "${ userQuery } " with model ${ model } . Querying LLM directly without RAG context.`
3548 ) ;
36- augmentedPrompt = query ;
49+ augmentedPrompt = userQuery ;
3750 } else {
3851 let contextContent : string [ ] = [ ] ;
3952 if ( rag_type === 'advanced' ) {
@@ -59,18 +72,18 @@ export const handleGeminiBatch = async (req: Request, res: Response) => {
5972
6073 if ( contextContent . length === 0 ) {
6174 console . warn (
62- `Chunks were found for query "${ query } " (rag_type : ${ rag_type } , model: ${ model } ), but no relevant content could be extracted. Querying LLM directly.`
75+ `Chunks were found for query "${ userQuery } " (RAG Type from config : ${ rag_type } , model: ${ model } ), but no relevant content could be extracted. Querying LLM directly.`
6376 ) ;
64- augmentedPrompt = query ;
77+ augmentedPrompt = userQuery ;
6578 } else {
6679 const context = contextContent . join ( '\n---\n' ) ;
6780 const contextDescription =
6881 rag_type === 'advanced' ? 'Relevant Information from Parent Documents' : 'Relevant Text Chunks' ;
69- augmentedPrompt = `User Query: ${ query } \n\n${ contextDescription } :\n---\n${ context } \n---\nBased on the relevant information above, answer the user query.` ;
82+ augmentedPrompt = `User Query: ${ userQuery } \n\n${ contextDescription } :\n---\n${ context } \n---\nBased on the relevant information above, answer the user query.` ;
7083 }
7184 }
7285
73- console . log ( `INFO: Gemini Batch Request. Model: ${ model } . RAG Type: ${ rag_type } .` ) ;
86+ // console.log(`INFO: Gemini Batch Request. Model: ${model}. RAG Type: ${rag_type}.`); // Already logged above with more details
7487
7588 try {
7689 const llmResponse = ( await generateText ( {
@@ -86,7 +99,7 @@ export const handleGeminiBatch = async (req: Request, res: Response) => {
8699 . json ( { details : llmError . message , error : `Failed to get response from LLM provider Gemini.` } ) ;
87100 }
88101 } catch ( error : any ) {
89- console . error ( `Error in handleGeminiBatch for model ${ model } , query "${ req . body . query } ":` , error ) ;
102+ console . error ( `Error in handleGeminiBatch for model ${ model } , query "${ userQuery } ":` , error ) ; // Use userQuery for logging
90103 if ( error . message && error . message . includes ( 'ChromaDB collection is not initialized' ) ) {
91104 return res . status ( 503 ) . json ( { error : 'Service Unavailable: RAG service is not ready.' } ) ;
92105 }
@@ -99,43 +112,65 @@ export const handleGeminiBatch = async (req: Request, res: Response) => {
99112
100113export const handleGeminiStream = async ( req : Request , res : Response ) => {
101114 const model = req . params . model ;
115+ let userQuery = '' ; // Define userQuery here to be accessible in the catch block
102116
103117 try {
104- const { query, n_results, rag_type : raw_rag_type } = req . body ;
105-
106- if ( ! query || typeof query !== 'string' || query . trim ( ) === '' ) {
107- // Cannot send JSON error if headers already sent, but here they are not.
108- return res . status ( 400 ) . json ( { error : 'Bad Request: query is required and must be a non-empty string.' } ) ;
109- }
118+ const { contents } = req . body as GeminiQueryRequest ;
110119
111120 res . setHeader ( 'Content-Type' , 'text/event-stream' ) ;
112121 res . setHeader ( 'Cache-Control' , 'no-cache' ) ;
113122 res . setHeader ( 'Connection' , 'keep-alive' ) ;
114- res . flushHeaders ( ) ; // Send headers immediately
123+ // res.flushHeaders(); // Flush headers after initial validation
115124
116- let rag_type = 'basic' ; // Default rag_type to 'basic'
117- if ( raw_rag_type !== undefined ) {
118- if ( raw_rag_type === 'basic' || raw_rag_type === 'advanced' ) {
119- rag_type = raw_rag_type ;
125+ // Validate contents structure
126+ if (
127+ ! contents ||
128+ ! Array . isArray ( contents ) ||
129+ contents . length === 0 ||
130+ ! contents [ 0 ] . parts ||
131+ ! Array . isArray ( contents [ 0 ] . parts ) ||
132+ contents [ 0 ] . parts . length === 0 ||
133+ ! contents [ 0 ] . parts [ 0 ] . text ||
134+ typeof contents [ 0 ] . parts [ 0 ] . text !== 'string' ||
135+ contents [ 0 ] . parts [ 0 ] . text . trim ( ) === ''
136+ ) {
137+ // If headers not sent, can send 400
138+ if ( ! res . headersSent ) {
139+ return res . status ( 400 ) . json ( {
140+ error :
141+ 'Bad Request: contents is required and must be an array with at least one part containing a non-empty text string.' ,
142+ } ) ;
120143 } else {
121- // Headers sent, so must write error to stream
122- res . write ( `data: ${ JSON . stringify ( { error : "Bad Request: rag_type must be 'basic' or 'advanced'." } ) } \n\n` ) ;
144+ // Headers sent, write error to stream
145+ res . write (
146+ `data: ${ JSON . stringify ( {
147+ error :
148+ 'Bad Request: contents is required and must be an array with at least one part containing a non-empty text string.' ,
149+ } ) } \n\n`
150+ ) ;
123151 res . end ( ) ;
124152 return ;
125153 }
126154 }
155+ userQuery = contents [ 0 ] . parts [ 0 ] . text ; // Assign userQuery after validation
156+ res . flushHeaders ( ) ; // Send headers now that initial validation passed
157+
158+ const rag_type = config . geminiRagType ;
159+ const numberOfResults = config . geminiNResults ;
127160
128- const numberOfResults = typeof n_results === 'number' && n_results > 0 ? n_results : 3 ;
161+ console . log (
162+ `INFO: Gemini Stream Request. Model: ${ model } . RAG Type (from config): ${ rag_type } . N Results (from config): ${ numberOfResults } .`
163+ ) ;
129164
130165 const ragService = await initializedRagService ;
131- const chunks = await ragService . queryChunks ( query , numberOfResults ) ;
166+ const chunks = await ragService . queryChunks ( userQuery , numberOfResults ) ;
132167
133168 let augmentedPrompt : string ;
134169 if ( ! chunks || chunks . length === 0 ) {
135170 console . warn (
136- `No relevant chunks found for query: "${ query } " with model ${ model } (stream). Querying LLM directly without RAG context.`
171+ `No relevant chunks found for query: "${ userQuery } " with model ${ model } (stream). Querying LLM directly without RAG context.`
137172 ) ;
138- augmentedPrompt = query ;
173+ augmentedPrompt = userQuery ;
139174 } else {
140175 let contextContent : string [ ] = [ ] ;
141176 if ( rag_type === 'advanced' ) {
@@ -161,18 +196,18 @@ export const handleGeminiStream = async (req: Request, res: Response) => {
161196
162197 if ( contextContent . length === 0 ) {
163198 console . warn (
164- `Chunks were found for query "${ query } " (rag_type : ${ rag_type } , model: ${ model } , stream), but no relevant content could be extracted. Querying LLM directly.`
199+ `Chunks were found for query "${ userQuery } " (RAG Type from config : ${ rag_type } , model: ${ model } , stream), but no relevant content could be extracted. Querying LLM directly.`
165200 ) ;
166- augmentedPrompt = query ;
201+ augmentedPrompt = userQuery ;
167202 } else {
168203 const context = contextContent . join ( '\n---\n' ) ;
169204 const contextDescription =
170205 rag_type === 'advanced' ? 'Relevant Information from Parent Documents' : 'Relevant Text Chunks' ;
171- augmentedPrompt = `User Query: ${ query } \n\n${ contextDescription } :\n---\n${ context } \n---\nBased on the relevant information above, answer the user query.` ;
206+ augmentedPrompt = `User Query: ${ userQuery } \n\n${ contextDescription } :\n---\n${ context } \n---\nBased on the relevant information above, answer the user query.` ;
172207 }
173208 }
174209
175- console . log ( `INFO: Gemini Stream Request. Model: ${ model } . RAG Type: ${ rag_type } .` ) ;
210+ // console.log(`INFO: Gemini Stream Request. Model: ${model}. RAG Type: ${rag_type}.`); // Already logged above
176211
177212 try {
178213 await generateText ( {
@@ -195,7 +230,7 @@ export const handleGeminiStream = async (req: Request, res: Response) => {
195230 }
196231 }
197232 } catch ( error : any ) {
198- console . error ( `Error in handleGeminiStream for model ${ model } , query "${ req . body . query } ":` , error ) ;
233+ console . error ( `Error in handleGeminiStream for model ${ model } , query "${ userQuery } ":` , error ) ; // Use userQuery for logging
199234 if ( ! res . headersSent ) {
200235 // This case should ideally not be reached if query validation is first.
201236 // However, for other early errors (like RAG service init), this is a fallback.
0 commit comments