Skip to content

Commit bcb75a8

Browse files
committed
Fix lint errors and add type annotations
- Remove unused initial_cache_size variable - Add type annotations for _schema_cache, test function return type - Use del instead of assignment to None for better garbage collection - Add type ignore comment for Pydantic dynamic model typing - All ruff checks pass
1 parent 3e19420 commit bcb75a8

2 files changed

Lines changed: 8 additions & 8 deletions

File tree

src/openai/lib/_parsing/_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
from ...types.chat.chat_completion_message_function_tool_call import Function
3333

3434
# Cache to store weak references to schema objects
35-
_schema_cache = weakref.WeakKeyDictionary()
35+
_schema_cache: weakref.WeakKeyDictionary[type, ResponseFormatParam] = weakref.WeakKeyDictionary()
3636

3737
ResponseFormatT = TypeVar(
3838
"ResponseFormatT",

tests/lib/_parsing/test_memory_leak.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,25 +9,25 @@
99

1010

1111
@pytest.mark.asyncio
12-
async def test_async_completions_parse_memory():
12+
async def test_async_completions_parse_memory() -> None:
1313
"""Test if AsyncCompletions.parse() doesn't leak memory with dynamic models"""
14+
# Create a base step model
1415
StepModel = create_model(
1516
"Step",
1617
explanation=(str, Field()),
1718
output=(str, Field()),
1819
)
1920

20-
# Clear the cache and record initial state
21+
# Clear the cache before testing
2122
_schema_cache.clear()
22-
initial_cache_size = len(_schema_cache)
23-
23+
2424
# Simulate the issue by creating multiple models and making calls
25-
models = []
25+
models: list[type] = []
2626
for i in range(10):
2727
# Create a new dynamic model each time
2828
new_model = create_model(
2929
f"MathResponse{i}",
30-
steps=(List[StepModel], Field()),
30+
steps=(List[StepModel], Field()), # type: ignore[valid-type]
3131
final_answer=(str, Field()),
3232
)
3333
models.append(new_model)
@@ -40,7 +40,7 @@ async def test_async_completions_parse_memory():
4040
cache_size_with_references = len(_schema_cache)
4141

4242
# Let the models go out of scope and trigger garbage collection
43-
models = None
43+
del models
4444
gc.collect()
4545

4646
# After garbage collection, the cache should be significantly reduced

0 commit comments

Comments
 (0)