From ce09a752c85220391d3e02ca2d7feecabea4b13c Mon Sep 17 00:00:00 2001 From: georgeRobertson <50412379+georgeRobertson@users.noreply.github.com> Date: Tue, 17 Feb 2026 15:47:10 +0000 Subject: [PATCH 1/6] fix: issue with si filename handling when the filename contains special chars --- src/dve/core_engine/models.py | 11 ++----- tests/features/steps/steps_pipeline.py | 2 +- tests/test_core_engine/test_models.py | 44 +++++++++++++++++++++++--- 3 files changed, 42 insertions(+), 15 deletions(-) diff --git a/src/dve/core_engine/models.py b/src/dve/core_engine/models.py index 75a14ed..09fcbb3 100644 --- a/src/dve/core_engine/models.py +++ b/src/dve/core_engine/models.py @@ -8,7 +8,7 @@ import os import uuid from collections.abc import MutableMapping -from pathlib import Path, PurePath +from pathlib import Path from typing import Any, Optional from pydantic import UUID4, BaseModel, Field, FilePath, root_validator, validator @@ -64,16 +64,9 @@ class SubmissionInfo(AuditRecord): datetime_received: Optional[dt.datetime] = None # type: ignore """The datetime the file was received.""" - @validator("file_name") - def _ensure_metadata_extension_removed(cls, filename): # pylint: disable=no-self-argument - path = PurePath(filename) - return path.stem - @validator("file_extension") def _ensure_just_file_stem(cls, extension: str): # pylint: disable=no-self-argument - if "." in extension: - return extension.split(".")[-1] - return extension + return extension.rsplit(".", 1)[-1] @property def file_name_with_ext(self): diff --git a/tests/features/steps/steps_pipeline.py b/tests/features/steps/steps_pipeline.py index b047905..fa1e848 100644 --- a/tests/features/steps/steps_pipeline.py +++ b/tests/features/steps/steps_pipeline.py @@ -219,7 +219,7 @@ def submit_file_for_processing(context: Context, dataset: str, file_name: str): sub_info = { "submission_id": uuid4().hex, "dataset_id": dataset, - "file_name": file_name, + "file_name": file_name.rsplit(".", 1)[0], "file_extension": Path(file_name).suffix, "reporting_period_start": "2025-11-01 00:00:00", "reporting_period_end": "2025-11-30 23:59:59" diff --git a/tests/test_core_engine/test_models.py b/tests/test_core_engine/test_models.py index 87c8f9d..e187de1 100644 --- a/tests/test_core_engine/test_models.py +++ b/tests/test_core_engine/test_models.py @@ -18,7 +18,7 @@ "submitted": { "submission_id": CONSTANT_SUBMISSION_ID, "dataset_id": "test0", - "file_name": "my_file.csv", + "file_name": "my_file", "file_extension": "csv", }, "expected": { @@ -35,13 +35,13 @@ "submitted": { "submission_id": CONSTANT_SUBMISSION_ID, "dataset_id": "test1", - "file_name": "my_file.csv.csv", - "file_extension": "csv", + "file_name": "my_file", + "file_extension": ".csv.csv", }, "expected": { "submission_id": CONSTANT_SUBMISSION_ID, "dataset_id": "test1", - "file_name": "my_file.csv", + "file_name": "my_file", "file_extension": "csv", }, }, @@ -52,7 +52,7 @@ "submitted": { "submission_id": CONSTANT_SUBMISSION_ID, "dataset_id": "test2", - "file_name": "my_file.xml", + "file_name": "my_file", "file_extension": "csv.csv.xml", }, "expected": { @@ -63,6 +63,40 @@ }, }, ), + # submission with multiple file extensions + ( + { + "submitted": { + "submission_id": CONSTANT_SUBMISSION_ID, + "dataset_id": "test2", + "file_name": "my.file.perfect", + "file_extension": "csv", + }, + "expected": { + "submission_id": CONSTANT_SUBMISSION_ID, + "dataset_id": "test2", + "file_name": "my.file.perfect", + "file_extension": "csv", + }, + }, + ), + # submission with multiple file extensions + ( + { + "submitted": { + "submission_id": CONSTANT_SUBMISSION_ID, + "dataset_id": "test2", + "file_name": "m_y%fil\ne.perfect", + "file_extension": "csv", + }, + "expected": { + "submission_id": CONSTANT_SUBMISSION_ID, + "dataset_id": "test2", + "file_name": "m_y%fil\ne.perfect", + "file_extension": "csv", + }, + }, + ), ], ) def test_submission_info( # pylint: disable=missing-function-docstring From 751766cf7f6dfd9ebc0bbd858e2ef3c0bb83b11d Mon Sep 17 00:00:00 2001 From: georgeRobertson <50412379+georgeRobertson@users.noreply.github.com> Date: Tue, 17 Feb 2026 16:31:23 +0000 Subject: [PATCH 2/6] build: upgrade werkzeug to 3.1.5 to mitigate vuln & lock poetry to 2.2.1 --- .mise.toml | 2 +- .tool-versions | 2 +- poetry.lock | 21 +++++++-------------- pyproject.toml | 2 +- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/.mise.toml b/.mise.toml index b31659e..f27f070 100644 --- a/.mise.toml +++ b/.mise.toml @@ -1,4 +1,4 @@ [tools] python="3.11" -poetry="2.2" +poetry="2.2.1" java="liberica-1.8.0" diff --git a/.tool-versions b/.tool-versions index b23db8d..3a495f2 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,3 +1,3 @@ python 3.11.14 -poetry 2.2.0 +poetry 2.2.1 java liberica-1.8.0 diff --git a/poetry.lock b/poetry.lock index 68fc5b2..7b1987a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "argcomplete" @@ -2533,13 +2533,6 @@ optional = false python-versions = ">=3.8" groups = ["dev", "test"] files = [ - {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, - {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, - {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, - {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, - {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, - {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, - {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, @@ -2970,18 +2963,18 @@ files = [ [[package]] name = "werkzeug" -version = "3.0.6" +version = "3.1.5" description = "The comprehensive WSGI web application library." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev", "test"] files = [ - {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, - {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, + {file = "werkzeug-3.1.5-py3-none-any.whl", hash = "sha256:5111e36e91086ece91f93268bb39b4a35c1e6f1feac762c9c822ded0a4e322dc"}, + {file = "werkzeug-3.1.5.tar.gz", hash = "sha256:6a548b0e88955dd07ccb25539d7d0cc97417ee9e179677d22c7041c8f078ce67"}, ] [package.dependencies] -MarkupSafe = ">=2.1.1" +markupsafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] @@ -3127,4 +3120,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.12" -content-hash = "93fd40fa4a8924c95ee4aa46f75d693294c32670721b31f25576e3fe60236013" +content-hash = "08ea1eedf25a896fdc21f03d04f4403d47d655fc90eb5eb310ff7cde7e3b7a6d" diff --git a/pyproject.toml b/pyproject.toml index 3b9a736..4497211 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ faker = "18.11.1" behave = "1.3.3" coverage = "7.11.0" moto = {extras = ["s3"], version = "4.0.13"} -Werkzeug = "3.0.6" # Dependency of moto which needs 3.0.6 for security vuln mitigation +Werkzeug = "3.1.5" pytest = "8.4.2" pytest-lazy-fixtures = "1.4.0" # switched from https://github.com/TvoroG/pytest-lazy-fixture as it's no longer supported xlsx2csv = "0.8.2" From 9674700666644e05c0773eb2a4afa3c8f0d0ece6 Mon Sep 17 00:00:00 2001 From: stevenhsd <56357022+stevenhsd@users.noreply.github.com> Date: Tue, 17 Feb 2026 21:28:50 +0000 Subject: [PATCH 3/6] fix: included submission status (with additional processing failure check) in error report population to reduce chance of incorrect status --- src/dve/pipeline/pipeline.py | 9 ++ src/dve/reporting/excel_report.py | 154 ++-------------------- tests/test_reporting/test_excel_report.py | 56 +++----- 3 files changed, 38 insertions(+), 181 deletions(-) diff --git a/src/dve/pipeline/pipeline.py b/src/dve/pipeline/pipeline.py index 6aa4f41..46a89c2 100644 --- a/src/dve/pipeline/pipeline.py +++ b/src/dve/pipeline/pipeline.py @@ -20,6 +20,7 @@ dump_feedback_errors, dump_processing_errors, get_feedback_errors_uri, + get_processing_errors_uri, load_feedback_messages, ) from dve.core_engine.backends.base.auditing import BaseAuditingManager @@ -769,6 +770,13 @@ def error_report( "error_report", submission_info.submission_id ) + if not submission_status.processing_failed: + submission_status.processing_failed = fh.get_resource_exists( + get_processing_errors_uri( + fh.joinuri(self.processed_files_path, submission_info.submission_id) + ) + ) + if not self.processed_files_path: raise AttributeError("processed files path not provided") @@ -797,6 +805,7 @@ def error_report( if value is not None and not key.endswith("_updated") } summary_items = er.SummaryItems( + submission_status=submission_status, summary_dict=summary_dict, row_headings=["Submission Failure", "Warning"], ) diff --git a/src/dve/reporting/excel_report.py b/src/dve/reporting/excel_report.py index 4dc3fee..05e8d7c 100644 --- a/src/dve/reporting/excel_report.py +++ b/src/dve/reporting/excel_report.py @@ -16,11 +16,15 @@ from polars import DataFrame from polars.exceptions import ColumnNotFoundError +from dve.pipeline.utils import SubmissionStatus + @dataclass class SummaryItems: """Items to go into the Summary sheet""" + submission_status: SubmissionStatus = field(default_factory=SubmissionStatus) + """The status of the submission""" summary_dict: dict[str, Any] = field(default_factory=dict) """Dictionary of items to show in the front sheet key is put into Column B and value in column C""" @@ -84,9 +88,12 @@ def create_summary_sheet( return summary - @staticmethod - def get_submission_status(aggregates: DataFrame) -> str: + def get_submission_status(self, aggregates: DataFrame) -> str: """Returns the status of the submission based on the error data""" + if self.submission_status.processing_failed: + return "There was an issue processing the submission. This will be investigated." + if self.submission_status.validation_failed: + return "File has been rejected" if aggregates.is_empty(): return "File has been accepted, no issues to report" failures = aggregates["Type"].unique() @@ -134,149 +141,6 @@ def _add_submission_info(self, status: str, summary: Worksheet): summary.append(["", ""]) -@dataclass -class CombinedSummary(SummaryItems): - """Writes the combined report summary tables - - These get split out of multiple lines based on the partition key of the dataset. - - Each of these sub tables has rows, with the row being defined by row_field - and columns, with the each one being filtered by column field. - - An example would look like this... - - {Current partition} Table heading - partition_key column_field_n column_field_m additional_column_1 addition_column_2 etc. - first_partition 0 2 10 14 - 2nd_partition 3 4 11 15 - - {next partition} Table heading - partition_key column_field_n column_field_m additional_column_1 addition_column_2 etc. - first_partition 0 5 10 14 - 2nd_partition 3 4 12 15 - - ...by default the value in the first_partition x column_field_n cell will be the "Count" field - so it's the number of times that a partiticular column has occured within a partition. - - or more concretly, in a dataset where the columns are `Submission_error` and `warning`, and the - partition key is `file name` - the result would be the number of times a submission error or - warning has occured within a file. - - In the parent class there is an aggregations property, which allows custom aggregations to - be added. If an aggregation is added to a field not in the column field - (e.g. an additional column) then an aggregation and column mapping needs to be added for it. - - """ - - column_field: str = "Type" - """Field to display across the top of the table""" - row_field: str = "file_name" - """Field to display along the side of the table""" - partition_key: str = "FeedType" - """Key to split the data into multiple tables""" - table_heading: str = "Files processed" - """Heading for each partitioned table""" - table_mapping: dict = field(default_factory=dict) - """Mapping of a given column to a column in the dataframe, defaults to using Count""" - - def create_summary_sheet( - self, - summary: Worksheet, - aggregates: DataFrame, - status: str, - ): - """Creates a summary sheet for a combined error report""" - self._add_submission_info(status, summary) - - try: - agg_tables = aggregates[self.column_field].unique().to_list() - except ColumnNotFoundError: - agg_tables = [] - tables = self.table_columns or agg_tables - tables = tables.copy() # make sure not to mutate the original - difference = set(agg_tables).difference(tables) - if difference: - tables.extend(difference) - - if self.additional_columns: - tables.extend(self.additional_columns) - - if aggregates.is_empty(): - error_summary = aggregates - else: - groups = [self.column_field, self.row_field, self.partition_key] - - error_summary = ( - # chaining methods on dataframes seems to confuse mypy - aggregates.group_by(groups).agg(*self.aggregations) # type: ignore - ) - tables = [table for table in tables if table is not None] - column = self.partition_key - keys = error_summary[column].unique() - for item in sorted(str(key) for key in keys if key is not None): - summary.append(["", f"{item} {self.table_heading}"]) - self._write_combined_table( - summary, - tables, - error_summary.filter(pl.col(column) == pl.lit(item)), - ) - summary.append([""]) - return summary - - @staticmethod - def get_submission_status(aggregates: DataFrame) -> str: - """Returns the status of the submission based on the error data""" - if aggregates.is_empty(): - return "Overall submission has been accepted, no issues to report" - failures = aggregates["Type"].unique() - if "Submission Failure" in failures: - status = "Submission Failures found, overall submission has been rejected" - elif "Warning" in failures: - status = "Overall submission has been accepted, warnings found" - else: - status = "Overall submission has been accepted, no issues to report" - return status - - def _write_combined_table( - self, - summary: Worksheet, - tables: list[str], - error_summary: DataFrame, - ): - try: - agg_types = error_summary[self.row_field].unique().to_list() - except ColumnNotFoundError: - agg_types = [] - - row_headings = self.row_headings or agg_types - difference = set(row_headings).difference(agg_types) - if difference: - row_headings.extend(difference) - - row_headings = filter(bool, row_headings) - - summary.append(["", self.row_field.capitalize(), *map(str.capitalize, tables)]) - for row_type in sorted(row_headings): - row: list[Any] = ["", row_type] - for table in tables: - count_field = self.table_mapping.get(table, "Count") - if table in self.table_columns: - column_filter = pl.col(self.column_field) == pl.lit(table) - else: - column_filter = True - if error_summary.is_empty(): - counts = error_summary - else: - counts = error_summary.filter( # type: ignore - column_filter & (pl.col(self.row_field) == pl.lit(row_type)) - )[count_field] - if counts.is_empty(): - row.append(0) - else: - row.append(counts[0]) - summary.append(row) - - class ExcelFormat: """Formats error data into an excel file""" diff --git a/tests/test_reporting/test_excel_report.py b/tests/test_reporting/test_excel_report.py index 17c0dfd..d1393fa 100644 --- a/tests/test_reporting/test_excel_report.py +++ b/tests/test_reporting/test_excel_report.py @@ -5,13 +5,14 @@ import pytest from dve.core_engine.message import FeedbackMessage +from dve.pipeline.utils import SubmissionStatus from dve.reporting.error_report import ( create_error_dataframe, generate_report_dataframes, get_error_codes, populate_error_codes, ) -from dve.reporting.excel_report import CombinedSummary, ExcelFormat, SummaryItems +from dve.reporting.excel_report import ExcelFormat, SummaryItems from ..conftest import get_test_file_path from ..fixtures import temp_dir @@ -137,41 +138,6 @@ def test_excel_report(report_dfs): ] -def test_excel_combined_report(report_dfs): - error_df, aggregate_df = report_dfs - error_dfs = { - "MilkyWay": error_df, - "Andromeda": error_df, - "BlackEye": error_df, - "Cartwheel": error_df, - } - summary_df = aggregate_df.with_columns(file_name=pl.lit("filename"), Galaxy=pl.lit("galaxy")) - report = ExcelFormat(error_dfs, aggregate_df, summary_aggregates=summary_df) - summary_items = CombinedSummary( - summary_dict={ - "Sender": "X26", - "Datetime_sent": datetime.datetime.now(), - "Datetime_processed": datetime.datetime.now(), - }, - row_field="file_name", - column_field="Type", - table_columns=["Planet", "Derived"], - partition_key="Galaxy", - aggregations=[pl.sum("Count")], - ) - workbook = report.excel_format( - summary_items=summary_items, - ) - assert workbook.sheetnames == [ - "Summary", - "Error Summary", - "MilkyWay", - "Andromeda", - "BlackEye", - "Cartwheel", - ] - - def test_excel_report_overflow(big_report_dfs): error_df, aggregate_df = big_report_dfs error_dfs = {"MilkyWay": error_df} @@ -215,3 +181,21 @@ def test_excel_report_empty_dfs(): assert workbook.sheetnames == ["Summary", "Error Summary", "Error Data"] assert not all(cell.value for cell in workbook["Error Data"]["2"]) # no errors assert not all(cell.value for cell in workbook["Error Summary"]["2"]) # no aggregates + +def test_sub_status_failed_processing(): + """Check that the submission status is used to determine the """ + + summary_items = SummaryItems( + submission_status=SubmissionStatus(processing_failed=True), + summary_dict={ + "Sender": "X26", + "Datetime_sent": datetime.datetime.now(), + "Datetime_processed": datetime.datetime.now(), + }, + row_headings=["Submission Failure", "Warning"], + table_columns=["Planet", "Derived"], + ) + assert summary_items.get_submission_status(pl.DataFrame()) == "There was an issue processing the submission. This will be investigated." + summary_items.submission_status = SubmissionStatus(validation_failed=True) + assert summary_items.get_submission_status(pl.DataFrame()) == "File has been rejected" + From 965cb4582a0bff6ca3e5de679abeb1e18cd15a6a Mon Sep 17 00:00:00 2001 From: stevenhsd <56357022+stevenhsd@users.noreply.github.com> Date: Tue, 17 Feb 2026 22:53:39 +0000 Subject: [PATCH 4/6] fix: ensure that captured errors during business rule evaluation are being captured and logged --- src/dve/core_engine/backends/base/rules.py | 31 ++++++++++++++++------ 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/dve/core_engine/backends/base/rules.py b/src/dve/core_engine/backends/base/rules.py index b862c27..97a6b4d 100644 --- a/src/dve/core_engine/backends/base/rules.py +++ b/src/dve/core_engine/backends/base/rules.py @@ -172,7 +172,7 @@ def _step_metadata_to_location(step_metadata: "AbstractStep") -> str: def _handle_rule_error(self, error: Exception, config: AbstractStep) -> Messages: """Log an error and create appropriate error messages.""" - return render_error(error, self._step_metadata_to_location(config)) + return render_error(error, self._step_metadata_to_location(config), self.logger) def evaluate(self, entities, *, config: AbstractStep) -> tuple[Messages, StageSuccessful]: """Evaluate a step definition, applying it to the entities.""" @@ -411,7 +411,7 @@ def apply_sync_filters( CriticalProcessingError( "Issue occurred while applying filter logic", messages=[ - msg.error_message + msg.error_message # type: ignore for msg in temp_messages if msg.error_message ], @@ -439,7 +439,10 @@ def apply_sync_filters( [ CriticalProcessingError( "Issue occurred while generating FeedbackMessages", - [msg.error_message for msg in temp_messages], + messages=[ + msg.error_message # type: ignore + for msg in temp_messages + ], ) ], ) @@ -467,7 +470,10 @@ def apply_sync_filters( [ CriticalProcessingError( "Issue occurred while generating FeedbackMessages", - [msg.error_message for msg in temp_messages], + messages=[ + msg.error_message # type: ignore + for msg in temp_messages + ], ) ], ) @@ -505,7 +511,9 @@ def apply_sync_filters( [ CriticalProcessingError( "Issue occurred while filtering error records", - [msg.error_message for msg in temp_messages], + messages=[ + msg.error_message for msg in temp_messages # type: ignore + ], ) ], ) @@ -533,7 +541,10 @@ def apply_sync_filters( [ CriticalProcessingError( "Issue occurred while generating FeedbackMessages", - [msg.error_message for msg in temp_messages], + messages=[ + msg.error_message # type: ignore + for msg in temp_messages + ], ) ], ) @@ -592,7 +603,9 @@ def apply_rules( [ CriticalProcessingError( "Issue occurred while applying pre filter steps", - [msg.error_message for msg in stage_messages], + messages=[ + msg.error_message for msg in stage_messages # type: ignore + ], ) ], ) @@ -644,7 +657,9 @@ def apply_rules( [ CriticalProcessingError( "Issue occurred while applying post filter steps", - [msg.error_message for msg in stage_messages], + messages=[ + msg.error_message for msg in stage_messages # type: ignore + ], ) ], ) From 4dc0923ed5dc0e7a12632874c8dc613ea229125f Mon Sep 17 00:00:00 2001 From: stevenhsd <56357022+stevenhsd@users.noreply.github.com> Date: Wed, 18 Feb 2026 12:51:58 +0000 Subject: [PATCH 5/6] style: amended report message following review --- src/dve/reporting/excel_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dve/reporting/excel_report.py b/src/dve/reporting/excel_report.py index 05e8d7c..ce7c76a 100644 --- a/src/dve/reporting/excel_report.py +++ b/src/dve/reporting/excel_report.py @@ -91,7 +91,7 @@ def create_summary_sheet( def get_submission_status(self, aggregates: DataFrame) -> str: """Returns the status of the submission based on the error data""" if self.submission_status.processing_failed: - return "There was an issue processing the submission. This will be investigated." + return "There was an issue processing the submission. Please contact support." if self.submission_status.validation_failed: return "File has been rejected" if aggregates.is_empty(): From 323522e1f721bcb95e6d5f47aaad2a158ba4632c Mon Sep 17 00:00:00 2001 From: stevenhsd <56357022+stevenhsd@users.noreply.github.com> Date: Wed, 18 Feb 2026 13:05:01 +0000 Subject: [PATCH 6/6] test: correct summary report expected message --- tests/test_reporting/test_excel_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_reporting/test_excel_report.py b/tests/test_reporting/test_excel_report.py index d1393fa..bbbbca9 100644 --- a/tests/test_reporting/test_excel_report.py +++ b/tests/test_reporting/test_excel_report.py @@ -195,7 +195,7 @@ def test_sub_status_failed_processing(): row_headings=["Submission Failure", "Warning"], table_columns=["Planet", "Derived"], ) - assert summary_items.get_submission_status(pl.DataFrame()) == "There was an issue processing the submission. This will be investigated." + assert summary_items.get_submission_status(pl.DataFrame()) == "There was an issue processing the submission. Please contact support." summary_items.submission_status = SubmissionStatus(validation_failed=True) assert summary_items.get_submission_status(pl.DataFrame()) == "File has been rejected"