From 1bf29a96de350de4c9d4e75ae0e3537e7df598dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roc=C3=ADo=20Vega?= Date: Tue, 16 Dec 2025 11:40:11 -0300 Subject: [PATCH 1/2] [IMP] account_statement_import_sheet_file_bg: Add improvement to split attatchment file when it is too big Part-of: ingadhoc/miscellaneous#320 Signed-off-by: Filoquin adhoc --- .../models/account_statement_import.py | 93 +++++++++++++++++-- 1 file changed, 87 insertions(+), 6 deletions(-) diff --git a/account_statement_import_sheet_file_bg/models/account_statement_import.py b/account_statement_import_sheet_file_bg/models/account_statement_import.py index 5a294fd6..ed281590 100644 --- a/account_statement_import_sheet_file_bg/models/account_statement_import.py +++ b/account_statement_import_sheet_file_bg/models/account_statement_import.py @@ -1,26 +1,59 @@ # Copyright 2020 CorporateHub (https://corporatehub.eu) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). -import logging + +import base64 +from io import BytesIO from markupsafe import Markup from odoo import _, models - -_logger = logging.getLogger(__name__) +from openpyxl import Workbook, load_workbook class AccountStatementImport(models.TransientModel): _name = "account.statement.import" _inherit = ["account.statement.import", "base.bg"] - def import_file_button(self): + def import_file_button(self, wizard_data=None): """Process the file chosen in the wizard, create a bank statement and return a link to its reconciliation page.""" if not self._context.get("bg_job"): - return self.bg_enqueue("import_file_button") + if self.sheet_mapping_id: + header_column = self.sheet_mapping_id.header_lines_skip_count + files = self.split_base64_excel(header_column, 1000) + if files: + res = False + for file in files: + # Create wizard data to be passed to bg job + wizard_data = { + "statement_file": file, + "statement_filename": self.statement_filename, + "sheet_mapping_id": self.sheet_mapping_id.id, + } + # Call bg_enqueue on empty recordset and pass data as kwargs + res = self.env[self._name].bg_enqueue("import_file_button", wizard_data=wizard_data) + return res + # Pass wizard data for single file + wizard_data = { + "statement_file": self.statement_file, + "statement_filename": self.statement_filename, + "sheet_mapping_id": self.sheet_mapping_id.id if self.sheet_mapping_id else False, + } + return self.env[self._name].bg_enqueue("import_file_button", wizard_data=wizard_data) + # No sheet_mapping_id, pass basic data + wizard_data = { + "statement_file": self.statement_file, + "statement_filename": self.statement_filename, + } + return self.env[self._name].bg_enqueue("import_file_button", wizard_data=wizard_data) else: + # Running in background job - recreate wizard from passed data + if wizard_data: + wizard = self.create(wizard_data) + else: + wizard = self try: - result = super().import_file_button() + result = super(AccountStatementImport, wizard).import_file_button() statement_id = False @@ -46,3 +79,51 @@ def import_file_button(self): except Exception as e: return _("Error importing bank statement: %s") % str(e) return result + + def split_base64_excel(self, header_rows_count, rows_per_file_limit): + """Split Excel file into multiple parts to avoid overloading the system. + Returns empty list if file is not a valid Excel or if split is not needed.""" + if not self.statement_file: + return [] + + output_base64_list = [] + try: + file_bytes = base64.b64decode(self.statement_file) + read_buffer = BytesIO(file_bytes) + input_workbook = load_workbook(read_buffer) + input_worksheet = input_workbook.active + except Exception: + return [] + + all_rows = list(input_worksheet.rows) + if not all_rows: + return [] + + header_rows = all_rows[:header_rows_count] + data_rows = all_rows[header_rows_count:] + start_row_index = 0 + total_data_rows = len(data_rows) + + while start_row_index < total_data_rows: + end_row_index = min(start_row_index + rows_per_file_limit, total_data_rows) + rows_for_current_part = data_rows[start_row_index:end_row_index] + + output_workbook = Workbook() + output_worksheet = output_workbook.active + + for header_row in header_rows: + row_values = [cell.value for cell in header_row] + output_worksheet.append(row_values) + + for data_row in rows_for_current_part: + row_values = [cell.value for cell in data_row] + output_worksheet.append(row_values) + + write_buffer = BytesIO() + output_workbook.save(write_buffer) + output_bytes = write_buffer.getvalue() + base64_content = base64.b64encode(output_bytes).decode("utf-8") + output_base64_list.append(base64_content) + + start_row_index = end_row_index + return output_base64_list From d5b8f289eff9cd518130512700edfa2fc4a93ab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roc=C3=ADo=20Vega?= Date: Wed, 17 Dec 2025 10:54:21 -0300 Subject: [PATCH 2/2] [IMP] _bg*: Add new improvements whent auto triger closes ingadhoc/miscellaneous#320 Signed-off-by: Filoquin adhoc --- .../__manifest__.py | 6 +- .../data/ir_config_parameter_data.xml | 7 +++ .../models/account_statement_import.py | 58 ++++++++++++++++--- base_bg/models/bg_job.py | 5 ++ 4 files changed, 66 insertions(+), 10 deletions(-) create mode 100644 account_statement_import_sheet_file_bg/data/ir_config_parameter_data.xml diff --git a/account_statement_import_sheet_file_bg/__manifest__.py b/account_statement_import_sheet_file_bg/__manifest__.py index 5898a35a..4645a9e7 100644 --- a/account_statement_import_sheet_file_bg/__manifest__.py +++ b/account_statement_import_sheet_file_bg/__manifest__.py @@ -1,13 +1,15 @@ { "name": "Account statement import sheet file BG", - "version": "18.0.1.0.0", + "version": "18.0.1.0.1", "category": "Productivity/Documents", "summary": "Integration between Documents and Base BG modules", "depends": [ "base_bg", "account_statement_import_sheet_file", ], - "data": [], + "data": [ + "data/ir_config_parameter_data.xml", + ], "demo": [], "installable": True, "auto_install": False, diff --git a/account_statement_import_sheet_file_bg/data/ir_config_parameter_data.xml b/account_statement_import_sheet_file_bg/data/ir_config_parameter_data.xml new file mode 100644 index 00000000..54299c83 --- /dev/null +++ b/account_statement_import_sheet_file_bg/data/ir_config_parameter_data.xml @@ -0,0 +1,7 @@ + + + + account_statement_import_sheet_file_bg.rows_per_file_limit + 3000 + + diff --git a/account_statement_import_sheet_file_bg/models/account_statement_import.py b/account_statement_import_sheet_file_bg/models/account_statement_import.py index ed281590..0f192a1a 100644 --- a/account_statement_import_sheet_file_bg/models/account_statement_import.py +++ b/account_statement_import_sheet_file_bg/models/account_statement_import.py @@ -20,19 +20,51 @@ def import_file_button(self, wizard_data=None): if not self._context.get("bg_job"): if self.sheet_mapping_id: header_column = self.sheet_mapping_id.header_lines_skip_count - files = self.split_base64_excel(header_column, 1000) + # Get row limit from system parameter + rows_limit = ( + self.env["ir.config_parameter"] + .sudo() + .get_param("account_statement_import_sheet_file_bg.rows_per_file_limit") + ) + # Only split if parameter exists and has a valid value + files = [] + if rows_limit: + try: + rows_limit = int(rows_limit) + files = self.split_base64_excel(header_column, rows_limit) + except (ValueError, TypeError): + files = [] + if files: - res = False - for file in files: + for idx, file in enumerate(files): # Create wizard data to be passed to bg job wizard_data = { "statement_file": file, "statement_filename": self.statement_filename, "sheet_mapping_id": self.sheet_mapping_id.id, + "part_number": idx + 1, + "total_parts": len(files), } # Call bg_enqueue on empty recordset and pass data as kwargs - res = self.env[self._name].bg_enqueue("import_file_button", wizard_data=wizard_data) - return res + # Add part number to job name for clarity + job_name = f"{self._name}.import_file_button - Part {idx + 1}/{len(files)}" + self.env[self._name].bg_enqueue( + "import_file_button", + wizard_data=wizard_data, + name=job_name, + max_retries=5, + ) + # Return notification about all jobs enqueued + return { + "type": "ir.actions.client", + "tag": "display_notification", + "params": { + "title": _("Process sent to background successfully"), + "type": "success", + "message": _("Processing %s files. You will be notified when each is done.") % len(files), + "next": {"type": "ir.actions.act_window_close"}, + }, + } # Pass wizard data for single file wizard_data = { "statement_file": self.statement_file, @@ -48,7 +80,12 @@ def import_file_button(self, wizard_data=None): return self.env[self._name].bg_enqueue("import_file_button", wizard_data=wizard_data) else: # Running in background job - recreate wizard from passed data + part_number = None + total_parts = None if wizard_data: + # Extract part info before creating wizard + part_number = wizard_data.pop("part_number", None) + total_parts = wizard_data.pop("total_parts", None) wizard = self.create(wizard_data) else: wizard = self @@ -64,10 +101,15 @@ def import_file_button(self, wizard_data=None): break if statement_id: + statement = self.env["account.bank.statement"].browse(statement_id) + + # Add part info to statement name if split was done + if part_number and total_parts: + part_suffix = f" - Part {part_number}/{total_parts}" + statement.write({"name": statement.name + part_suffix}) + base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url") url = f"{base_url}/odoo/account.bank.statement/{statement_id}" - - statement = self.env["account.bank.statement"].browse(statement_id) name = statement.name or f"Statement {statement_id}" res_html = ( @@ -93,7 +135,7 @@ def split_base64_excel(self, header_rows_count, rows_per_file_limit): input_workbook = load_workbook(read_buffer) input_worksheet = input_workbook.active except Exception: - return [] + return [self.statement_file] all_rows = list(input_worksheet.rows) if not all_rows: diff --git a/base_bg/models/bg_job.py b/base_bg/models/bg_job.py index c5a9d8a8..2d4f5e2b 100644 --- a/base_bg/models/bg_job.py +++ b/base_bg/models/bg_job.py @@ -153,6 +153,7 @@ def run(self): } ) self.env.cr.commit() # pylint: disable=invalid-commit + try: context = self.context_json or {} context.update({"bg_job": True}) @@ -164,6 +165,7 @@ def run(self): record_ids = kwargs.pop("_record_ids", None) records = model.browse(record_ids).with_context(**context).with_user(self.create_uid) result = getattr(records, self.method)(*args, **kwargs) + self.write( { "state": "done", @@ -172,7 +174,9 @@ def run(self): ) if result: self._notify_user(result) + self.env.cr.commit() # pylint: disable=invalid-commit except Exception as e: + self.env.cr.rollback() # pylint: disable=invalid-commit self._handle_job_error(e) raise @@ -237,6 +241,7 @@ def _cron_run_enqueued_jobs(self, limit: int = 5): cron_ids = self.env["ir.cron"].search([], order="id").filtered(lambda c: c.code and code in c.code).ids index, total = cron_ids.index(cron_id), len(cron_ids) jobs = self.search([("state", "=", "enqueued")]).filtered(lambda r: r.id % total == index)[:limit] + for job in jobs: try: job.run()