Skip to content

Commit d595725

Browse files
committed
Fix lint issues, remove unittest class, add proper type annotations
1 parent f1e3865 commit d595725

2 files changed

Lines changed: 1 addition & 52 deletions

File tree

src/openai/lib/_parsing/_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def type_to_response_format_param(
262262
else:
263263
raise TypeError(f"Unsupported response_format type - {response_format}")
264264

265-
schema_param = {
265+
schema_param: ResponseFormatParam = {
266266
"type": "json_schema",
267267
"json_schema": {
268268
"schema": to_strict_json_schema(json_schema_type),

tests/lib/_parsing/test_memory_leak.py

Lines changed: 0 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,56 +1,12 @@
1-
import unittest
21
import gc
3-
import sys
4-
from unittest.mock import AsyncMock, patch, MagicMock
52
from typing import List
63

74
import pytest
85
from pydantic import Field, create_model
96

10-
from openai.resources.beta.chat.completions import AsyncCompletions
117
from openai.lib._parsing import type_to_response_format_param
128
from openai.lib._parsing._completions import _schema_cache
139

14-
class TestMemoryLeak(unittest.TestCase):
15-
def setUp(self):
16-
# Clear the schema cache before each test
17-
_schema_cache.clear()
18-
19-
def test_schema_cache_with_models(self):
20-
"""Test if schema cache properly handles dynamic models and prevents memory leak"""
21-
22-
StepModel = create_model(
23-
"Step",
24-
explanation=(str, Field()),
25-
output=(str, Field()),
26-
)
27-
28-
# Create several models and ensure they're cached properly
29-
models = []
30-
for i in range(5):
31-
model = create_model(
32-
f"MathResponse{i}",
33-
steps=(List[StepModel], Field()),
34-
final_answer=(str, Field()),
35-
)
36-
models.append(model)
37-
38-
# Convert model to response format param
39-
param = type_to_response_format_param(model)
40-
41-
# Check if the model is in the cache
42-
self.assertIn(model, _schema_cache)
43-
44-
# Test that all models are in the cache
45-
self.assertEqual(len(_schema_cache), 5)
46-
47-
# Let the models go out of scope and trigger garbage collection
48-
models = None
49-
gc.collect()
50-
51-
# After garbage collection, the cache should be empty or reduced
52-
# since we're using weakref.WeakKeyDictionary
53-
self.assertLess(len(_schema_cache), 5)
5410

5511
@pytest.mark.asyncio
5612
async def test_async_completions_parse_memory():
@@ -65,13 +21,6 @@ async def test_async_completions_parse_memory():
6521
_schema_cache.clear()
6622
initial_cache_size = len(_schema_cache)
6723

68-
# Create a mock client
69-
mock_client = MagicMock()
70-
mock_client.chat.completions.create = AsyncMock()
71-
72-
# Create the AsyncCompletions instance with our mock client
73-
completions = AsyncCompletions(mock_client)
74-
7524
# Simulate the issue by creating multiple models and making calls
7625
models = []
7726
for i in range(10):

0 commit comments

Comments
 (0)