diff --git a/.gitignore b/.gitignore
index ea1844e0bd..2158f957a3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -486,3 +486,8 @@ falk_stuff/
# ignore download libraries
**/test/regression/library/
local/
+bim2sim/tasks/.DS_Store
+docs/.DS_Store
+.gitignore
+.gitignore
+.DS_Store
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/__init__.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/__init__.py
index c5f6181846..4a5800297c 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/__init__.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/__init__.py
@@ -33,4 +33,5 @@ class PluginEnergyPlus(Plugin):
ep_tasks.CreateResultDF,
# ep_tasks.VisualizeResults,
bps.PlotBEPSResults,
+ ep_tasks.FixEPHtml
]
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/__init__.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/__init__.py
index 1ee3e763dd..a47a8c8acb 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/__init__.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/__init__.py
@@ -6,3 +6,4 @@
from .create_result_df import CreateResultDF
from .load_energyplus_results import LoadEnergyPlusResults
from .ep_visualize_results import VisualizeResults
+from .fix_ep_html import FixEPHtml
\ No newline at end of file
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/create_result_df.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/create_result_df.py
index 300fe37338..cd02de2059 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/create_result_df.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/create_result_df.py
@@ -13,6 +13,16 @@
from bim2sim.tasks.base import ITask
from bim2sim.elements.mapping.units import ureg
from bim2sim.utilities.common_functions import filter_elements
+import re
+
+def _normalize_cols(df):
+ df = df.copy()
+ df.columns = (
+ df.columns
+ .str.replace(r'\s+', ' ', regex=True) # collapse multiple spaces
+ .str.strip() # trim leading/trailing spaces
+ )
+ return df
bim2sim_energyplus_mapping_base = {
"NOT_AVAILABLE": "heat_demand_total",
@@ -22,7 +32,22 @@
"SPACEGUID IDEAL LOADS AIR SYSTEM:Zone Ideal Loads Supply Air Total "
"Cooling Rate [W](Hourly)": "cool_demand_rooms",
"DistrictHeating:HVAC [J](Hourly)": "heat_energy_total",
- "DistrictCooling:HVAC [J](Hourly) ": "cool_energy_total",
+ "DistrictCooling:HVAC [J](Hourly)": "cool_energy_total",
+ "Electricity:Facility [J](Hourly)": "electricity_total",
+ "Electricity:Building [J](Hourly)": "electricity_building",
+ "InteriorLights:Electricity [J](Hourly)": "electricity_lighting",
+ "InteriorEquipment:Electricity [J](Hourly)": "electricity_equipment",
+ "Fans:Electricity [J](Hourly)": "electricity_fans",
+ "Pumps:Electricity [J](Hourly)": "electricity_pumps",
+ "ExteriorLighting:Electricity [J](Hourly)": "electricity_exterior_lighting",
+ "ExteriorEquipment:Electricity [J](Hourly)": "electricity_exterior_equipment",
+ "DistrictHeating:Facility [J](Hourly)": "dhw_energy_total", # purchased DHW+space heat; filter to WaterSystems if your model splits
+ "DistrictHeating:WaterSystems [J](Hourly)": "dhw_energy_watersystems", # if present
+ "PlantLoopHeatingDemand:WaterSystems [J](Hourly)": "dhw_energy_plantloop", # if DHW via plant loop
+ "SPACEGUID Water Use Equipment Heating Energy [J](Hourly)": "dhw_energy_rooms",
+ "SPACEGUID Water Use Equipment Hot Water Volume [m3](Hourly)": "dhw_volume_rooms",
+ "SPACEGUID Water Use Connections Plant Hot Water Energy [J](Hourly)": "dhw_energy_connections_rooms",
+ "SPACEGUID Water Use Connections Hot Water Volume [m3](Hourly)": "dhw_volume_connections_rooms",
"SPACEGUID IDEAL LOADS AIR SYSTEM:Zone Ideal Loads Supply Air Total "
"Heating Energy [J](Hourly)":
"heat_energy_rooms",
@@ -108,6 +133,7 @@ def run(self, idf: IDF, sim_results_path: Path, elements: dict) \
"DataFrame ist needed.")
return df_finals,
raw_csv_path = sim_results_path / self.prj_name / 'eplusout.csv'
+ mtr_csv_path = sim_results_path / self.prj_name / 'eplusmtr.csv'
# TODO @Veronika: the zone_dict.json can be removed and instead the
# elements structure can be used to get the zone guids
zone_dict_path = sim_results_path / self.prj_name / 'zone_dict.json'
@@ -135,19 +161,41 @@ def run(self, idf: IDF, sim_results_path: Path, elements: dict) \
space.guid)
space_bound_dict[space.guid] = space_guids
with open(sim_results_path / self.prj_name / 'space_bound_dict.json',
- 'w+') as file1:
- json.dump(space_bound_dict, file1, indent=4)
- with open(sim_results_path / self.prj_name /
- 'space_bound_renamed_dict.json',
- 'w+') as file2:
- json.dump(space_bound_renamed_dict, file2, indent=4)
-
- df_original = PostprocessingUtils.read_csv_and_format_datetime(
- raw_csv_path)
- df_original = (
- PostprocessingUtils.shift_dataframe_to_midnight(df_original))
+ 'w+') as file:
+ json.dump(space_bound_dict, file, indent=4)
+
+
+ df_original = PostprocessingUtils.read_csv_and_format_datetime(raw_csv_path)
+ df_original = (PostprocessingUtils.shift_dataframe_to_midnight(df_original))
+ df_original = _normalize_cols(df_original)
+ if mtr_csv_path.exists():
+ df_mtr = PostprocessingUtils.read_csv_and_format_datetime(mtr_csv_path)
+ df_mtr = PostprocessingUtils.shift_dataframe_to_midnight(df_mtr)
+ df_mtr = _normalize_cols(df_mtr)
+
+ # Determine overlaps after normalization
+ overlap = [c for c in df_mtr.columns if c in df_original.columns]
+
+ if overlap:
+ # Option A (recommended): keep whatever is already in df_original,
+ # and only add *new* meter columns
+ new_cols = [c for c in df_mtr.columns if c not in df_original.columns]
+ df_original = df_original.join(df_mtr[new_cols], how='outer')
+
+ # If you *instead* want to prefer mtr values where there’s overlap:
+ # df_original = df_original.drop(columns=overlap).join(df_mtr[overlap + new_cols], how='outer')
+ else:
+ df_original = df_original.join(df_mtr, how='outer')
+ else:
+ self.logger.warning(
+ "eplusmtr.csv not found; meter-based time-series (e.g., Electricity:Facility) unavailable."
+ )
+
df_final = self.format_dataframe(df_original, zone_dict,
space_bound_dict)
+ for col in df_final.columns:
+ if df_final[col].name.endswith('[J](Hourly)'):
+ df_final[col.replace('[J](Hourly)', '[kWh](Hourly)')] = df_final[col] / 3_600_000.0
df_finals[self.prj_name] = df_final
return df_finals,
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_create_idf.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_create_idf.py
index bdb8613457..fd38fc1c78 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_create_idf.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_create_idf.py
@@ -1819,14 +1819,21 @@ def set_output_variables(idf: IDF, sim_settings: EnergyPlusSimSettings):
)
idf.newidfobject(
"OUTPUT:METER",
- Key_Name="DistrictHeating:HVAC",
- Reporting_Frequency="Hourly",
- )
- idf.newidfobject(
- "OUTPUT:METER",
- Key_Name="DistrictCooling:HVAC",
+ Key_Name="Electricity:Facility",
Reporting_Frequency="Hourly",
)
+ idf.newidfobject("OUTPUT:METER", Key_Name="Electricity:Building", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="InteriorLights:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="InteriorEquipment:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="Fans:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="Pumps:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="ExteriorLighting:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="ExteriorEquipment:Electricity", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="DistrictHeating:*", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="PlantLoopHeatingDemand:*", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="MainsWater:*", Reporting_Frequency="Hourly")
+ idf.newidfobject("OUTPUT:METER", Key_Name="Electricity:*", Reporting_Frequency="Hourly")
+
if 'output_dxf' in sim_settings.output_keys:
idf.newidfobject("OUTPUT:SURFACES:DRAWING",
Report_Type="DXF")
@@ -2291,6 +2298,32 @@ def map_boundary_conditions(self, inst_obj: Union[SpaceBoundary,
Args:
inst_obj: SpaceBoundary instance
"""
+ # ─── Centroid-based “buried” test for true exterior walls/windows ─────────
+ # Only BUILDINGSURFACE or FENESTRATIONSURFACE that have no matching
+ # interzone partner (i.e.exterior on one side)
+ if self.key in ("BUILDINGSURFACE:DETAILED", "FENESTRATIONSURFACE:DETAILED") \
+ and (inst_obj.related_bound is None
+ or inst_obj.related_bound.ifc.RelatingSpace.is_a("IfcExternalSpatialElement")):
+ try:
+ pts = PyOCCTools.get_points_of_face(inst_obj.bound_shape)
+ # compute centroid Z
+ zc = sum(p.Coord()[2] for p in pts) / len(pts)
+ if zc < 0.0:
+ # fully buried → ground
+ self.out_bound_cond = "Ground"
+ self.sun_exposed = "NoSun"
+ self.wind_exposed = "NoWind"
+ return
+ # if centroid >= 0.0, treat as exposed → outdoors, and return
+ self.out_bound_cond = "Outdoors"
+ self.sun_exposed = "SunExposed"
+ self.wind_exposed = "WindExposed"
+ # for fenestration, parent GUID already set
+ return
+ except Exception:
+ # any geometry error: fall back
+ pass
+
if inst_obj.level_description == '2b' \
or inst_obj.related_adb_bound is not None:
self.out_bound_cond = 'Adiabatic'
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_idf_postprocessing.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_idf_postprocessing.py
index 9f04f41519..1f214335b9 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_idf_postprocessing.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_idf_postprocessing.py
@@ -1,6 +1,5 @@
import json
from pathlib import Path
-
import pandas as pd
from geomeppy import IDF
@@ -8,6 +7,65 @@
from bim2sim.tasks.base import ITask
from bim2sim.utilities.common_functions import filter_elements
+# — robust GUID→name & TOC fixer for any EP HTML file —
+def replace_guids_in_html(report_dir, zone_dict_path):
+ """
+ Finds whichever .htm contains the “People Internal Gains Nominal” table,
+ moves its TOC to the top under
, replaces GUIDs in its “Zone Name”
+ column (and anywhere they occur) with human-readable labels from zone_dict.json,
+ and writes out a new file *_with_names.htm.
+ """
+ import json
+ from bs4 import BeautifulSoup
+
+ # load the mapping (normalize keys to uppercase)
+ raw = json.loads((zone_dict_path).read_text(encoding='utf-8'))
+ zone_map = {k.upper(): v for k, v in raw.items()}
+
+ # scan all .htm files until we find the right one
+ html_path = None
+ for f in report_dir.glob("*.htm"):
+ text = f.read_text(encoding='utf-8')
+ if "People Internal Gains Nominal" in text:
+ html_path = f
+ break
+ if html_path is None:
+ raise FileNotFoundError(f"No HTML file in {report_dir} contains the target table")
+
+ soup = BeautifulSoup(text, 'html.parser')
+
+ # 1) Move TOC: find all , remove the 2nd, insert the 1st under
+ toc_links = soup.find_all('a', href="#toc")
+ if len(toc_links) >= 2:
+ first_p = toc_links[0].find_parent('p')
+ second_p = toc_links[1].find_parent('p')
+ second_p.decompose()
+ first_p.extract()
+ soup.body.insert(1, first_p)
+
+ # 2) Replace GUIDs in the “People Internal Gains Nominal” table
+ header = soup.find('b', string="People Internal Gains Nominal")
+ if not header:
+ raise RuntimeError("Found HTML but no ‘People Internal Gains Nominal’ header")
+ # detect which column is “Zone Name”
+ idx = None
+ for i, cell in enumerate(tbl.find('tr').find_all(['td','th'])):
+ if "Zone Name" in cell.get_text(strip=True):
+ idx = i
+ break
+
+ if idx is not None:
+ for tr in tbl.find_all('tr')[1:]:
+ cols = tr.find_all('td')
+ if len(cols) > idx:
+ guid = cols[idx].get_text(strip=True).upper()
+ if guid in zone_map:
+ cols[idx].string.replace_with(zone_map[guid])
+
+ # write updated HTML
+ out = report_dir / f"{html_path.stem}_with_names{html_path.suffix}"
+ out.write_text(str(soup), encoding='utf-8')
+ return out
class IdfPostprocessing(ITask):
"""Idf Postprocessin task.
@@ -38,6 +96,7 @@ def run(self, elements: dict, idf: IDF, ifc_files: list,
self._export_boundary_report(elements, idf, ifc_files)
self.write_zone_names(idf, elements,
sim_results_path / self.prj_name)
+ self._export_combined_html_report()
self.logger.info("IDF Postprocessing finished!")
@@ -62,16 +121,15 @@ def write_zone_names(idf, elements, exportpath: Path):
ifc_zones = filter_elements(elements, ThermalZone)
zone_dict_ifc_names = {}
for zone in zones:
- usage = [z.usage for z in ifc_zones if z.guid == zone.Name]
- zone_dict.update({zone.Name: usage[0]})
- zone_dict_ifc_names.update({
- zone.Name: {
- 'ZoneUsage': usage[0],
- 'Name': elements[zone.Name].ifc.Name,
- 'LongName': elements[zone.Name].ifc.LongName,
- 'StoreyName': elements[zone.Name].storeys[0].ifc.Name,
- 'StoreyElevation': elements[zone.Name].storeys[
- 0].ifc.Elevation}})
+ # find the matching BIM2SIM ThermalZone element
+ matches = [z for z in ifc_zones if z.guid == zone.Name]
+ if matches:
+ # use the .name property (i.e. IFC Reference)
+ zone_dict[zone.Name] = matches[0].zone_name
+ else:
+ # fallback to GUID
+ zone_dict[zone.Name] = zone.Name
+
with open(exportpath / 'zone_dict.json', 'w+') as file:
json.dump(zone_dict, file, indent=4)
with open(exportpath / 'zone_dict_ifc_names.json', 'w+') as file:
@@ -187,6 +245,59 @@ def _export_space_info(self, elements, idf):
ignore_index=True)
space_df.to_csv(path_or_buf=str(self.paths.export) + "/space.csv")
+ def _export_combined_html_report(self):
+ """Create an HTML report combining area.csv and bound_count.csv data.
+
+ This method reads the previously exported CSV files and combines them
+ into a single HTML report with basic visualization.
+ The HTML file is saved in the same directory as the CSV files.
+ """
+ export_path = Path(str(self.paths.export))
+ area_file = export_path / "area.csv"
+ bound_count_file = export_path / "bound_count.csv"
+ html_file = export_path / "area_bound_count_energida.htm"
+
+ # Read the CSV files
+ area_df = pd.read_csv(area_file)
+ bound_count_df = pd.read_csv(bound_count_file)
+
+ # Convert DataFrames to HTML tables
+ area_table = area_df.to_html(index=False)
+ bound_count_table = bound_count_df.to_html(index=False)
+
+ # Create HTML content without complex formatting
+ html_content = """
+
+
+ BIM2SIM Export Report
+
+
+
+ BIM2SIM Export Report
+
+ Surface Areas
+ """ + area_table + """
+
+ Boundary Counts
+ """ + bound_count_table + """
+
+ """
+
+ # Save the HTML file
+ with open(html_file, 'w') as f:
+ f.write(html_content)
+
+ self.logger.info(f"Combined HTML report saved to {html_file}")
+
def _export_boundary_report(self, elements, idf, ifc_files):
"""Export a report on the number of space boundaries.
Creates a report as a DataFrame and exports it to csv.
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_run_simulation.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_run_simulation.py
index fd5c27e212..3d24d105ee 100644
--- a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_run_simulation.py
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/ep_run_simulation.py
@@ -15,6 +15,100 @@ class RunEnergyPlusSimulation(ITask):
"""
reads = ('idf', 'sim_results_path')
+ def _export_eplusout_html_report(self, csv_path):
+ """Create an HTML report for the eplusout.csv file.
+
+ This method reads the eplusout.csv file and creates an HTML report
+ with the simulation results for better visualization.
+
+ Args:
+ csv_path (Path): Path to the eplusout.csv file.
+ """
+ import pandas as pd
+
+ # Check if file exists
+ if not csv_path.exists():
+ self.logger.warning(f"eplusout.csv file not found at {csv_path}")
+ return
+
+ # Read the CSV file
+ try:
+ ep_df = pd.read_csv(csv_path)
+
+ # Create an HTML file path in the same directory
+ html_file = csv_path.parent / "eplusout_energida.htm"
+
+ # Create HTML content
+ html_content = """
+
+
+ EnergyPlus Simulation Results
+
+
+
+ EnergyPlus Simulation Results
+
+
+
Summary Statistics
+
+ | Total Rows | """ + str(len(ep_df)) + """ |
+ | Time Period | """ + str(ep_df.iloc[0]['Date/Time']) + """ to """ + str(ep_df.iloc[-1]['Date/Time']) + """ |
+ | Number of Variables | """ + str(len(ep_df.columns)) + """ |
+
+
+
+ Column Statistics
+
+
+ | Column |
+ Min |
+ Max |
+ Mean |
+ Std Dev |
+
+ """
+
+ # Add statistics for each numerical column
+ for col in ep_df.columns:
+ if col != 'Date/Time' and pd.api.types.is_numeric_dtype(ep_df[col]):
+ html_content += f"""
+
+ | {col} |
+ {ep_df[col].min():.4f} |
+ {ep_df[col].max():.4f} |
+ {ep_df[col].mean():.4f} |
+ {ep_df[col].std():.4f} |
+
"""
+
+ html_content += """
+
+
+ Data Preview (First 20 rows)
+ """ + ep_df.head(20).to_html(index=False) + """
+
+ """
+
+ # Save the HTML file
+ with open(html_file, 'w') as f:
+ f.write(html_content)
+
+ self.logger.info(f"Generated HTML report for eplusout.csv at {html_file}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to create HTML report for eplusout.csv: {e}")
+
+
def run(self, idf: IDF, sim_results_path: Path):
"""Run EneryPlus simulation.
@@ -42,8 +136,10 @@ def run(self, idf: IDF, sim_results_path: Path):
self.playground.sim_settings.simulated = True
self.logger.info(f"Simulation successfully finished.")
if ep_full:
+ eplusout_csv_path = export_path / 'eplusout.csv'
webtool_df_ep = PostprocessingUtils.export_df_for_webtool(
- csv_name=export_path / 'eplusout.csv')
+ csv_name=eplusout_csv_path)
+ self._export_eplusout_html_report(eplusout_csv_path)
self.logger.info(f"Exported dataframe for postprocessing.")
else:
self.logger.info(f"No dataframe output for postprocessing "
@@ -51,4 +147,4 @@ def run(self, idf: IDF, sim_results_path: Path):
"'run_full_simulation' to True to enable the "
"postprocessing output.")
self.logger.info(f"You can find the results under "
- f"{str(export_path)}")
+ f"{str(export_path)}")
\ No newline at end of file
diff --git a/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/fix_ep_html.py b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/fix_ep_html.py
new file mode 100644
index 0000000000..45d0035939
--- /dev/null
+++ b/bim2sim/plugins/PluginEnergyPlus/bim2sim_energyplus/task/fix_ep_html.py
@@ -0,0 +1,59 @@
+from pathlib import Path
+import json
+from bs4 import BeautifulSoup
+from bim2sim.tasks.base import ITask
+
+class FixEPHtml(ITask):
+ """After EP runs, post-process the HTML to move the TOC and replace GUIDs."""
+ reads = ('sim_results_path',)
+ touches = ('sim_results_path',)
+
+ def run(self, sim_results_path: Path):
+ report_dir = sim_results_path / self.prj_name
+ zone_dict_path = report_dir / 'zone_dict.json'
+ zone_map = {k.upper(): v for k, v in json.loads(zone_dict_path.read_text()).items()}
+
+ # 1) find the HTML that contains our table
+ html_path = None
+ for f in report_dir.glob("*.htm"):
+ if "People Internal Gains Nominal" in f.read_text():
+ html_path = f
+ break
+ if html_path is None:
+ raise FileNotFoundError(f"No HTML contains the target table in {report_dir}")
+
+ soup = BeautifulSoup(html_path.read_text(), 'html.parser')
+
+ # 2) Move the second TOC up
+ tocs = soup.find_all('a', href="#toc")
+ if len(tocs) >= 2:
+ first_p = tocs[0].find_parent('p')
+ second_p = tocs[1].find_parent('p')
+ second_p.decompose()
+ first_p.extract()
+ soup.body.insert(1, first_p)
+
+ # 3) Replace GUIDs in the Zone Name column
+ header = soup.find('b', string="People Internal Gains Nominal")
+ tbl = header.find_next('table')
+ # find the column index
+ first_row = tbl.find('tr')
+ idx = None
+ for i, cell in enumerate(first_row.find_all(['td','th'])):
+ if "Zone Name" in cell.get_text(strip=True):
+ idx = i
+ break
+
+ if idx is not None:
+ for tr in tbl.find_all('tr')[1:]:
+ cols = tr.find_all('td')
+ if len(cols) > idx:
+ guid = cols[idx].get_text(strip=True).upper()
+ if guid in zone_map:
+ cols[idx].string.replace_with(zone_map[guid])
+
+ # 4) write out
+ out = report_dir / f"{html_path.stem}_with_names{html_path.suffix}"
+ out.write_text(str(soup))
+ self.logger.info(f"Wrote updated HTML: {out}")
+ return sim_results_path,
diff --git a/bim2sim/sim_settings.py b/bim2sim/sim_settings.py
index dd4bf3a946..3065a8953f 100644
--- a/bim2sim/sim_settings.py
+++ b/bim2sim/sim_settings.py
@@ -702,7 +702,7 @@ def __init__(self):
year_of_construction_overwrite = NumberSetting(
default=None,
min_value=0,
- max_value=2015,
+ max_value=2100,
description="Force an overwrite of the year of construction as a "
"base for the selected construction set.",
for_frontend=True,
@@ -1051,8 +1051,16 @@ def __init__(self):
"internal_gains_machines_rooms", "internal_gains_persons_rooms",
"internal_gains_lights_rooms", "n_persons_rooms",
"infiltration_rooms", "mech_ventilation_rooms",
- "heat_set_rooms", "cool_set_rooms"
-
+ "heat_set_rooms", "cool_set_rooms","electricity_total","electricity_building",
+ "electricity_lighting",
+ "electricity_equipment",
+ "electricity_fans",
+ "electricity_pumps",
+ "electricity_exterior_lighting",
+ "electricity_exterior_equipment",
+ "dhw_energy_total", "dhw_energy_watersystems", "dhw_energy_plantloop",
+ "dhw_energy_rooms", "dhw_energy_connections_rooms",
+ "dhw_volume_rooms", "dhw_volume_connections_rooms"
],
choices={
"heat_demand_total":
@@ -1093,6 +1101,22 @@ def __init__(self):
"Heating set point in °C time series data",
"cool_set_rooms":
"Cooling set point in °C time series data",
+ "electricity_total":
+ "Total electricity energy (meter) as time series data",
+ "electricity_building": "Electricity:Building [J](Hourly)",
+ "electricity_lighting": "InteriorLights:Electricity [J](Hourly)",
+ "electricity_equipment": "InteriorEquipment:Electricity [J](Hourly)",
+ "electricity_fans": "Fans:Electricity [J](Hourly)",
+ "electricity_pumps": "Pumps:Electricity [J](Hourly)",
+ "electricity_exterior_lighting": "ExteriorLighting:Electricity [J](Hourly)",
+ "electricity_exterior_equipment": "ExteriorEquipment:Electricity [J](Hourly)",
+ "dhw_energy_total": "Total DHW thermal energy (from meters) as time series",
+ "dhw_energy_watersystems": "DHW thermal energy (end-use = WaterSystems, meters)",
+ "dhw_energy_plantloop": "Plant loop DHW thermal energy (meters)",
+ "dhw_energy_rooms": "Per-room DHW thermal energy (fixtures, variables)",
+ "dhw_energy_connections_rooms": "Per-room DHW thermal energy (connections, variables)",
+ "dhw_volume_rooms": "Per-room DHW hot water volume (m³), variables",
+ "dhw_volume_connections_rooms": "Per-room DHW hot water volume (m³), connections",
},
multiple_choice=True,
)
diff --git a/bim2sim/tasks/bps/enrich_use_cond.py b/bim2sim/tasks/bps/enrich_use_cond.py
index 91efa0e87e..bbe97b313e 100644
--- a/bim2sim/tasks/bps/enrich_use_cond.py
+++ b/bim2sim/tasks/bps/enrich_use_cond.py
@@ -4,11 +4,12 @@
from bim2sim.elements.bps_elements import ThermalZone
from bim2sim.tasks.base import ITask
from bim2sim.utilities.common_functions import get_use_conditions_dict, \
- get_pattern_usage, wildcard_match, filter_elements
+ get_effective_usage_data, wildcard_match, filter_elements
from bim2sim.tasks.base import Playground
from bim2sim.sim_settings import BuildingSimSettings
from bim2sim.utilities.types import AttributeDataSource
-
+import re
+from typing import Dict
class EnrichUseConditions(ITask):
"""Enriches Use Conditions of thermal zones
@@ -20,7 +21,6 @@ def __init__(self, playground: Playground):
super().__init__(playground)
self.enriched_tz: list = []
self.use_conditions: dict = {}
-
def run(self, elements: dict):
"""Enriches Use Conditions of thermal zones and central AHU settings.
@@ -39,9 +39,7 @@ def run(self, elements: dict):
self.playground.sim_settings.prj_custom_usages
self.logger.info("enriches thermal zones usage")
- self.use_conditions = get_use_conditions_dict(custom_use_cond_path)
- pattern_usage = get_pattern_usage(self.use_conditions,
- custom_usage_path)
+ self.use_conditions, pattern_usage = get_effective_usage_data(custom_usage_path, custom_use_cond_path)
final_usages = yield from self.enrich_usages(
pattern_usage, tz_elements)
for tz, usage in final_usages.items():
@@ -207,6 +205,7 @@ def enrich_usages(
thermal_zones: Dict[str, ThermalZone]) -> Dict[str, ThermalZone]:
"""Sets the usage of the given thermal_zones and enriches them.
+
Looks for fitting usages in assets/enrichment/usage based on the given
usage of a zone in the IFC. The way the usage is obtained is described
in the ThermalZone classes attribute "usage".
@@ -215,6 +214,7 @@ def enrich_usages(
customUsages.json: project specific translations that can
be stored for easier simulation.
+
Args:
pattern_usage: Dict with custom and common pattern
thermal_zones: dict with tz elements guid as key and the element
@@ -222,6 +222,7 @@ def enrich_usages(
Returns:
final_usages: key: str of usage type, value: ThermalZone element
+
"""
# selected_usage = {}
final_usages = {}
@@ -229,46 +230,115 @@ def enrich_usages(
orig_usage = str(tz.usage)
if orig_usage in pattern_usage:
final_usages[tz] = orig_usage
- else:
- matches = []
- list_org = tz.usage.replace(' (', ' ').replace(')', ' '). \
- replace(' -', ' ').replace(', ', ' ').split()
- for usage in pattern_usage.keys():
- # check custom first
- if "custom" in pattern_usage[usage]:
- for cus_usage in pattern_usage[usage]["custom"]:
- # if cus_usage == tz.usage:
- if wildcard_match(cus_usage, tz.usage):
+ continue
+
+
+ matches = []
+ list_org = tz.usage.replace(' (', ' ').replace(')', ' ').replace(' -', ' ').replace(', ', ' ').split()
+
+
+ # --- 1. Build list of potential matches (same as before)
+ for usage in pattern_usage.keys():
+ # check custom first
+ if "custom" in pattern_usage[usage]:
+ for cus_usage in pattern_usage[usage]["custom"]:
+ if wildcard_match(cus_usage, tz.usage):
+ if usage not in matches:
+ matches.append(usage)
+ # if not found in custom, continue with common
+ if len(matches) == 0:
+ for i in pattern_usage[usage]["common"]:
+ for i_name in list_org:
+ if i.match(i_name):
if usage not in matches:
matches.append(usage)
- # if not found in custom, continue with common
- if len(matches) == 0:
- for i in pattern_usage[usage]["common"]:
- for i_name in list_org:
- if i.match(i_name):
- if usage not in matches:
- matches.append(usage)
- # if just one match
- if len(matches) == 1:
- # case its an office
- if 'office_function' == matches[0]:
- office_use = cls.office_usage(tz)
- if isinstance(office_use, list):
- final_usages[tz] = cls.list_decision_usage(
- tz, office_use)
- else:
- final_usages[tz] = office_use
- # other zone usage
+
+
+ # --- 2. Smart match logic (fixed)
+ if len(matches) > 1:
+ tz_usage_clean = tz.usage.lower().strip()
+
+
+ # ✅ Exact match (case-insensitive)
+ exact = [m for m in matches if m.lower().strip() == tz_usage_clean]
+ if len(exact) == 1:
+ final_usages[tz] = exact[0]
+ print(f"[AUTO-MATCH] Space '{tz.usage}' → '{exact[0]}' (perfect match)")
+ continue
+
+
+ # ✅ Check for exact alias matches in pattern_usage
+ print(f"[DEBUG] Checking usage '{tz.usage}' against potential matches: {matches}")
+ exact_alias_matches = []
+ partial_matches = []
+
+ for m in matches:
+ all_aliases = []
+ if "common" in pattern_usage[m]:
+ all_aliases += [
+ (a.pattern if hasattr(a, "pattern") else str(a)).lower()
+ for a in pattern_usage[m]["common"]
+ ]
+ if "custom" in pattern_usage[m]:
+ all_aliases += [
+ (a.pattern if hasattr(a, "pattern") else str(a)).lower()
+ for a in pattern_usage[m]["custom"]
+ ]
+
+ # Check for EXACT alias match first
+ if tz_usage_clean in all_aliases:
+ exact_alias_matches.append(m)
+ # Then check containment (both directions)
+ elif any(tz_usage_clean in alias or alias in tz_usage_clean for alias in all_aliases):
+ partial_matches.append(m)
+
+ # Prioritize exact alias matches
+ if len(exact_alias_matches) == 1:
+ chosen = exact_alias_matches[0]
+ final_usages[tz] = chosen
+ print(f"[AUTO-MATCH] Space '{tz.usage}' → '{chosen}' (exact alias match)")
+ continue
+ elif len(exact_alias_matches) > 1:
+ # Multiple exact matches - pick longest key name
+ chosen = sorted(exact_alias_matches, key=len, reverse=True)[0]
+ final_usages[tz] = chosen
+ print(f"[AUTO-MATCH] Space '{tz.usage}' → '{chosen}' (exact alias, longest key)")
+ continue
+
+ # Fall back to partial matches
+ if partial_matches:
+ if len(partial_matches) > 1:
+ # Sort by longest key name
+ chosen = sorted(partial_matches, key=len, reverse=True)[0]
+ final_usages[tz] = chosen
+ print(f"[AUTO-MATCH] Space '{tz.usage}' → '{chosen}' (longest containment match)")
+ continue
else:
- final_usages[tz] = matches[0]
- # if no matches given forward all (for decision)
- elif len(matches) == 0:
- matches = list(pattern_usage.keys())
- if len(matches) > 1:
- final_usages[tz] = cls.list_decision_usage(
- tz, matches)
- # selected_usage[orig_usage] = tz.usage
- # collect decisions
+ chosen = partial_matches[0]
+ final_usages[tz] = chosen
+ print(f"[AUTO-MATCH] Space '{tz.usage}' → '{chosen}' (partial match)")
+ continue
+
+ # Fallback to interactive decision if still ambiguous
+ final_usages[tz] = cls.list_decision_usage(tz, matches)
+ print(f"[ASK] Space '{tz.usage}' ambiguous → asking user decision.")
+ continue
+
+
+ # --- 3. Handle single or no matches (original logic)
+ if len(matches) == 1:
+ if 'office_function' == matches[0]:
+ office_use = cls.office_usage(tz)
+ if isinstance(office_use, list):
+ final_usages[tz] = cls.list_decision_usage(tz, office_use)
+ else:
+ final_usages[tz] = office_use
+ else:
+ final_usages[tz] = matches[0]
+ elif len(matches) == 0:
+ matches = list(pattern_usage.keys())
+ final_usages[tz] = cls.list_decision_usage(tz, matches)
+
usage_dec_bunch = DecisionBunch()
for tz, use_or_dec in final_usages.items():
if isinstance(use_or_dec, ListDecision):
diff --git a/bim2sim/utilities/common_functions.py b/bim2sim/utilities/common_functions.py
index 52dfa073f3..ab4a2ae45f 100644
--- a/bim2sim/utilities/common_functions.py
+++ b/bim2sim/utilities/common_functions.py
@@ -83,7 +83,7 @@ def get_use_conditions_dict(custom_use_cond_path: Path) -> dict:
else:
raise ValueError(f"Invalid JSON file {use_cond_path}")
-
+# obsolete
def get_common_pattern_usage() -> dict:
common_pattern_path = assets / 'enrichment/usage/commonUsages.json'
if validateJSON(common_pattern_path):
@@ -93,7 +93,7 @@ def get_common_pattern_usage() -> dict:
else:
raise ValueError(f"Invalid JSON file {common_pattern_path}")
-
+# obsolete
def get_custom_pattern_usage(custom_usages_path: Path) -> dict:
"""gets custom usages based on given json file."""
custom_usages = {}
@@ -108,19 +108,13 @@ def get_custom_pattern_usage(custom_usages_path: Path) -> dict:
raise ValueError(f"Invalid JSON file {custom_usages_path}")
-def get_pattern_usage(use_conditions: dict, custom_usages_path: Path):
- """get usage patterns to use it on the thermal zones get_usage"""
- common_usages = get_common_pattern_usage()
-
- custom_usages = get_custom_pattern_usage(custom_usages_path)
- usages = combine_usages(common_usages, custom_usages)
+def compile_usage_patterns(use_conditions, custom_usages, common_usages):
+ import collections
+ pattern_usage = collections.defaultdict(lambda: {"common": [], "custom": []})
+ combined = combine_usages(common_usages, custom_usages)
- pattern_usage_teaser = collections.defaultdict(dict)
-
- for i in use_conditions:
- pattern_usage_teaser[i]["common"] = []
- pattern_usage_teaser[i]["custom"] = []
- list_engl = re.sub(r'\((.*?)\)', '', i) \
+ for usage_label in use_conditions:
+ list_engl = re.sub(r'\((.*?)\)', '', usage_label) \
.replace(' - ', ', ') \
.replace(' and ', ', ') \
.replace(' in ', ', ') \
@@ -128,23 +122,64 @@ def get_pattern_usage(use_conditions: dict, custom_usages_path: Path):
.replace(' or ', ', ') \
.replace(' the ', ' ') \
.split(', ')
- for i_eng in list_engl:
- new_i_eng = i_eng.replace(' ', '(.*?)')
- pattern_usage_teaser[i]["common"].append(re.compile(
- '(.*?)%s' % new_i_eng, flags=re.IGNORECASE))
- if i in usages:
- for c_trans in usages[i]["common"]:
- pattern_usage_teaser[i]["common"].append(re.compile(
- '(.*?)%s' % c_trans, flags=re.IGNORECASE))
- if "custom" in usages[i]:
- for clear_usage in usages[i]["custom"]:
- pattern_usage_teaser[i]["custom"].append(clear_usage)
-
- pattern_usage_teaser['office_function']["common"] = [re.compile(
- '(.*?)%s' % c_trans, re.IGNORECASE)
- for c_trans in usages['office_function']["common"]]
-
- return pattern_usage_teaser
+ for word in list_engl:
+ regex = re.compile(f"(.*?){word.strip().replace(' ', '(.*?)')}", re.IGNORECASE)
+ pattern_usage[usage_label]["common"].append(regex)
+
+ if usage_label in combined:
+ for val in combined[usage_label].get("common", []):
+ pattern_usage[usage_label]["common"].append(re.compile(f"(.*?){val}", re.IGNORECASE))
+ for val in combined[usage_label].get("custom", []):
+ pattern_usage[usage_label]["custom"].append(val)
+
+ return pattern_usage
+
+
+def get_effective_usage_data(custom_usage_path: Path = None, custom_conditions_path: Path = None):
+ """
+ Determines which usage and condition data to use:
+ - If both custom files are present and valid → use only them.
+ - Else → fallback to defaults in `assets/enrichment/usage`.
+
+ Returns:
+ Tuple of:
+ - use_conditions (dict)
+ - pattern_usage (dict with compiled regex)
+ """
+ # Check custom use conditions
+ if custom_conditions_path and custom_conditions_path.exists():
+ use_conditions_path = custom_conditions_path
+ else:
+ use_conditions_path = assets / 'enrichment/usage/UseConditions.json'
+
+ if not validateJSON(use_conditions_path):
+ raise ValueError(f"Invalid use conditions file: {use_conditions_path}")
+
+ with open(use_conditions_path, 'r', encoding='utf-8') as f:
+ use_conditions = json.load(f)
+ if "version" in use_conditions:
+ del use_conditions["version"]
+
+ # Check custom usage definitions
+ custom_usages = {}
+ if custom_usage_path and custom_usage_path.exists() and validateJSON(custom_usage_path):
+ with open(custom_usage_path, 'r', encoding='utf-8') as f:
+ custom_data = json.load(f)
+ if custom_data.get("settings", {}).get("use"):
+ custom_usages = custom_data["usage_definitions"]
+
+ if custom_usages:
+ common_usages = {} # ⛔️ skip loading common if custom is valid
+ else:
+ # fallback to defaults
+ common_usages_path = assets / 'enrichment/usage/commonUsages.json'
+ if not validateJSON(common_usages_path):
+ raise ValueError(f"Invalid fallback usage file: {common_usages_path}")
+ with open(common_usages_path, 'r', encoding='utf-8') as f:
+ common_usages = json.load(f)
+
+ pattern_usage = compile_usage_patterns(use_conditions, custom_usages, common_usages)
+ return use_conditions, pattern_usage
def combine_usages(common_usages, custom_usages) -> dict:
diff --git a/pyproject.toml b/pyproject.toml
index 7ff4f4113a..9083c00838 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -55,6 +55,7 @@ dependencies = [
"svglib==1.5.1",
"reportlab==4.2.2",
"scipy==1.11.3", # is a dependency from networkx, should installed automatically
+ "beautifulsoup4",
]
# root folder of the python/django not found automatically
diff --git a/test/resources b/test/resources
index 53715fffdd..66602ba24a 160000
--- a/test/resources
+++ b/test/resources
@@ -1 +1 @@
-Subproject commit 53715fffdde5fc2da67835e012f643047739e901
+Subproject commit 66602ba24a8b981caae709cda7ce0e904f77eaa3