Skip to content

Commit 65b87e9

Browse files
fix: deal with case sensitivity on file extension derivation
1 parent 41ff768 commit 65b87e9

5 files changed

Lines changed: 6 additions & 6 deletions

File tree

src/dve/core_engine/backends/base/contract.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ def read_raw_entities(
339339
reader_metadata = contract_metadata.reader_metadata[entity_name]
340340
extension = "." + (
341341
get_file_suffix(resource) or ""
342-
) # Already checked that extension supported.
342+
).lower() # Already checked that extension supported.
343343

344344
reader_config = reader_metadata[extension]
345345
reader_type = get_reader(reader_config.reader)

src/dve/pipeline/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def load_config(
4747

4848
def load_reader(dataset: Dataset, model_name: str, file_extension: str):
4949
"""Loads the readers for the diven feed, model name and file extension"""
50-
reader_config = dataset[model_name].reader_config[f".{file_extension}"]
50+
reader_config = dataset[model_name].reader_config[f".{file_extension.lower()}"]
5151
reader = _READER_REGISTRY[reader_config.reader](**reader_config.kwargs_)
5252
return reader
5353

tests/features/books.feature

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ Feature: Pipeline tests using the books dataset
55
introduces more complex transformations that require aggregation.
66

77
Scenario: Validate complex nested XML data (spark)
8-
Given I submit the books file nested_books.xml for processing
8+
Given I submit the books file nested_books.XML for processing
99
And A spark pipeline is configured with schema file 'nested_books.dischema.json'
1010
And I add initial audit entries for the submission
1111
Then the latest audit record for the submission is marked with processing status file_transformation
@@ -32,7 +32,7 @@ Feature: Pipeline tests using the books dataset
3232
| number_warnings | 0 |
3333

3434
Scenario: Validate complex nested XML data (duckdb)
35-
Given I submit the books file nested_books.xml for processing
35+
Given I submit the books file nested_books.XML for processing
3636
And A duckdb pipeline is configured with schema file 'nested_books_ddb.dischema.json'
3737
And I add initial audit entries for the submission
3838
Then the latest audit record for the submission is marked with processing status file_transformation

tests/test_core_engine/test_engine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,8 @@ def test_dummy_books_run(self, spark, temp_dir: str):
9999
with test_instance:
100100
_, errors_uri = test_instance.run_pipeline(
101101
entity_locations={
102-
"header": get_test_file_path("books/nested_books.xml").as_posix(),
103-
"nested_books": get_test_file_path("books/nested_books.xml").as_posix(),
102+
"header": get_test_file_path("books/nested_books.XML").as_posix(),
103+
"nested_books": get_test_file_path("books/nested_books.XML").as_posix(),
104104
}
105105
)
106106

0 commit comments

Comments
 (0)