Skip to content

Commit 29303ba

Browse files
committed
lint: Fix lint errors
1 parent e09c32e commit 29303ba

9 files changed

Lines changed: 19 additions & 29 deletions

File tree

src/controllers/__tests__/geminiQuery.test.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { Request, Response } from 'express';
44
import * as configModule from '../../config';
55
import { generateText } from '../../services/llmWrapper';
66
import { initializedRagService } from '../../services/ragService';
7-
import { GeminiChatCompletionResponse, GeminiStreamChunk } from '../../types';
7+
import { GeminiChatCompletionResponse } from '../../types';
88
import { handleGeminiBatch, handleGeminiStream } from '../geminiQuery';
99

1010
// Mock src/config
@@ -19,12 +19,12 @@ jest.mock('../../config', () => ({
1919

2020
chromaPort: '8000',
2121

22+
chromaUrl: 'http://localhost',
23+
2224
geminiApiKey: 'test-api-key',
2325

2426
geminiChatModel: 'gemini-pro',
2527

26-
chromaUrl: 'http://localhost',
27-
2828
// Default mock, can be overridden per test
2929
geminiEmbeddingModel: 'text-embedding-004',
3030
// Default for most tests
@@ -291,10 +291,10 @@ describe('Gemini RAG Query Controllers', () => {
291291
const res = mockResponse() as Response;
292292
mockRagService.queryChunks.mockResolvedValue([]); // No RAG context
293293

294-
const streamChunk1: GeminiStreamChunk = {
294+
const streamChunk1 = {
295295
candidates: [{ content: { parts: [{ text: 'Stream chunk 1' }], role: 'model' }, index: 0 }],
296296
};
297-
const streamChunk2: GeminiStreamChunk = {
297+
const streamChunk2 = {
298298
candidates: [{ content: { parts: [{ text: 'Stream chunk 2' }], role: 'model' }, index: 0 }],
299299
};
300300

src/controllers/geminiQuery.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import { Request, Response } from 'express';
33
import { config } from '../config';
44
import { generateText } from '../services/llmWrapper';
55
import { initializedRagService } from '../services/ragService';
6-
import { GeminiQueryRequest, LLMChatResponse, LLMStreamChunk } from '../types';
6+
import { GeminiQueryRequest, LLMChatResponse } from '../types';
77

88
export const handleGeminiBatch = async (req: Request, res: Response) => {
99
const model = req.params.model;

src/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ const allowedOrigins = process.env.FRONTEND_WEBSITE_URLS?.split(',').map((origin
1818
// handle cors with a dynamic origin function
1919
app.use(
2020
cors({
21+
credentials: true,
2122
origin: (origin, callback) => {
2223
// allow requests with no origin (like mobile apps, curl requests)
2324
if (!origin) return callback(null, true);
@@ -31,7 +32,6 @@ app.use(
3132
return callback(new Error(`Not allowed by CORS: ${origin}`));
3233
}
3334
},
34-
credentials: true,
3535
})
3636
);
3737
app.use(`${API_PREFIX}/rag/manage`, ragManagementRouter);

src/routers/geminiQuery.ts

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,20 +6,20 @@ import { queryApiKeyAuth } from '../middleware/auth';
66
const geminiRouter = Router();
77

88
// gemini proxy endpoints
9-
geminiRouter.post('/gemini/models/:model', (req, res) => {
10-
const model = req.params.model;
9+
geminiRouter.post('/gemini/models/:model', queryApiKeyAuth, (req, res) => {
10+
const model = req.params.model;
1111

12-
if (model.endsWith(':generateContent')) {
13-
req.params.model = model.replace(':generateContent', '');
14-
return handleGeminiBatch(req, res);
15-
}
12+
if (model.endsWith(':generateContent')) {
13+
req.params.model = model.replace(':generateContent', '');
14+
return handleGeminiBatch(req, res);
15+
}
1616

17-
if (model.endsWith(':streamGenerateContent')) {
18-
req.params.model = model.replace(':streamGenerateContent', '');
19-
return handleGeminiStream(req, res);
20-
}
17+
if (model.endsWith(':streamGenerateContent')) {
18+
req.params.model = model.replace(':streamGenerateContent', '');
19+
return handleGeminiStream(req, res);
20+
}
2121

22-
return res.status(404).json({ error: 'Unsupported Gemini operation' });
22+
return res.status(404).json({ error: 'Unsupported Gemini operation' });
2323
});
2424

2525
export { geminiRouter };

src/services/__tests__/llmWrapper.test.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
11
import { config } from '../../config';
22
import {
3-
// LLMStreamChunk is GeminiStreamChunk, already imported
43
GeminiBatchEmbeddingsRequest,
54
GeminiBatchEmbeddingsResponse,
65
GeminiChatCompletionResponse,
76
GeminiContent,
87
GeminiEmbedding,
9-
GeminiStreamChunk,
108
LLMChatResponse,
119
LLMEmbeddingsResponse,
1210
} from '../../types';
@@ -64,7 +62,7 @@ describe('LLM Wrapper Service (Gemini-only)', () => {
6462

6563
it('should call streamGenerateContent and trigger onChunk for streaming requests', async () => {
6664
const mockOnChunk = jest.fn();
67-
const mockChunk: GeminiStreamChunk = {
65+
const mockChunk = {
6866
candidates: [{ content: { parts: [{ text: 'Hello' }], role: 'model' }, index: 0 }], // Added index
6967
};
7068

src/services/gemini.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import {
55
GeminiChatCompletionResponse,
66
GeminiChatModel,
77
GeminiContent,
8-
GeminiStreamChunk,
98
} from '../types';
109

1110
const GEMINI_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta';

src/services/llmWrapper.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,10 @@ import {
44
GeminiChatModel as GeminiModelName,
55
GeminiContent,
66
GeminiEmbeddingModel,
7-
GeminiStreamChunk,
87
LLMChatRequestOptions,
98
LLMChatResponse,
109
LLMEmbeddingsRequestOptions,
1110
LLMEmbeddingsResponse,
12-
LLMStreamChunk,
1311
} from '../types';
1412
import {
1513
batchEmbedContents as batchGeminiEmbeddings,

src/types/gemini.ts

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,6 @@ export type GeminiChatPayload = {
7979
// `model` is part of the URL for Gemini, not typically in the payload body for generateContent.
8080
};
8181

82-
// This will be GeminiChatCompletionResponse itself, as Gemini streams full response objects.
83-
export type GeminiStreamChunk = GeminiChatCompletionResponse;
84-
8582
export type GeminiQueryRequest = {
8683
contents: GeminiContent[];
8784
};

src/types/llmWrapper.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,5 +21,3 @@ export type LLMEmbeddingsRequestOptions = {
2121
export type LLMChatResponse = Gemini.GeminiChatCompletionResponse;
2222

2323
export type LLMEmbeddingsResponse = Gemini.GeminiBatchEmbeddingsResponse;
24-
25-
export type LLMStreamChunk = Gemini.GeminiStreamChunk;

0 commit comments

Comments
 (0)