diff --git a/DESCRIPTION b/DESCRIPTION
deleted file mode 100644
index 259b5d6..0000000
--- a/DESCRIPTION
+++ /dev/null
@@ -1,30 +0,0 @@
-Package: MyProjectDependencies
-Version: 1.0
-Title: Dependencies for GitHub Actions
-Description: Manages R package dependencies for GitHub Actions.
-License: GPL-3
-Imports:
- arrow,
- cluster,
- cppRouting,
- data.table,
- dbscan,
- dodgr,
- dplyr,
- duckdb,
- FNN,
- future,
- future.apply,
- geos,
- ggplot2,
- gtfsrouter,
- hms,
- jsonlite,
- log4r,
- lubridate,
- nngeo,
- osmdata,
- remotes,
- sf,
- sfheaders,
- readxl
\ No newline at end of file
diff --git a/diff.patch b/diff.patch
deleted file mode 100644
index 0142473..0000000
Binary files a/diff.patch and /dev/null differ
diff --git a/front/app/scenario/__init__.py b/front/__init__.py
similarity index 100%
rename from front/app/scenario/__init__.py
rename to front/__init__.py
diff --git a/front/app/components/features/map/__init__.py b/front/app/components/features/map/__init__.py
index e69de29..b0e3f0e 100644
--- a/front/app/components/features/map/__init__.py
+++ b/front/app/components/features/map/__init__.py
@@ -0,0 +1,4 @@
+# Réexporte Map depuis la nouvelle implémentation.
+from .map_component import Map
+
+__all__ = ["Map"]
diff --git a/front/app/components/features/map/color_scale.py b/front/app/components/features/map/color_scale.py
new file mode 100644
index 0000000..007562b
--- /dev/null
+++ b/front/app/components/features/map/color_scale.py
@@ -0,0 +1,40 @@
+from dataclasses import dataclass
+import numpy as np
+import pandas as pd
+
+@dataclass(frozen=True)
+class ColorScale:
+ vmin: float
+ vmax: float
+
+ def _rng(self) -> float:
+ r = self.vmax - self.vmin
+ return r if r > 1e-9 else 1.0
+
+ def legend(self, v) -> str:
+ if pd.isna(v):
+ return "Donnée non disponible"
+ rng = self._rng()
+ t1 = self.vmin + rng / 3.0
+ t2 = self.vmin + 2 * rng / 3.0
+ v = float(v)
+ if v <= t1:
+ return "Accès rapide"
+ if v <= t2:
+ return "Accès moyen"
+ return "Accès lent"
+
+ def rgba(self, v) -> list[int]:
+ if pd.isna(v):
+ return [200, 200, 200, 140]
+ z = (float(v) - self.vmin) / self._rng()
+ z = max(0.0, min(1.0, z))
+ r = int(255 * z)
+ g = int(64 + 128 * (1 - z))
+ b = int(255 * (1 - z))
+ return [r, g, b, 180]
+
+def fit_color_scale(series: pd.Series) -> ColorScale:
+ s = pd.to_numeric(series, errors="coerce").replace([np.inf, -np.inf], np.nan).dropna()
+ vmin, vmax = (float(s.min()), float(s.max())) if not s.empty else (0.0, 1.0)
+ return ColorScale(vmin=vmin, vmax=vmax)
diff --git a/front/app/components/features/map/components.py b/front/app/components/features/map/components.py
new file mode 100644
index 0000000..d235f53
--- /dev/null
+++ b/front/app/components/features/map/components.py
@@ -0,0 +1,73 @@
+import dash_deck
+from dash import html
+import dash_mantine_components as dmc
+
+from .config import HEADER_OFFSET_PX, SIDEBAR_WIDTH
+from app.components.features.study_area_summary import StudyAreaSummary
+from app.components.features.scenario_controls import ScenarioControlsPanel
+
+from .tooltip import default_tooltip
+
+def DeckMap(id_prefix: str, deck_json: str) -> dash_deck.DeckGL:
+ return dash_deck.DeckGL(
+ id=f"{id_prefix}-deck-map",
+ data=deck_json,
+ tooltip=default_tooltip(),
+ mapboxKey="",
+ style={
+ "position": "absolute",
+ "inset": 0,
+ "height": "100vh",
+ "width": "100%",
+ },
+ )
+
+def SummaryPanelWrapper(zones_gdf, id_prefix: str):
+ return html.Div(
+ id=f"{id_prefix}-summary-wrapper",
+ children=StudyAreaSummary(zones_gdf, visible=True, id_prefix=id_prefix),
+ )
+
+def ControlsSidebarWrapper(id_prefix: str):
+ return html.Div(
+ dmc.Paper(
+ children=[
+ dmc.Stack(
+ [
+ ScenarioControlsPanel(
+ id_prefix=id_prefix,
+ min_radius=15,
+ max_radius=50,
+ step=1,
+ default=40,
+ default_insee="31555",
+ )
+ ],
+ gap="md",
+ )
+ ],
+ withBorder=True,
+ shadow="md",
+ radius="md",
+ p="md",
+ style={
+ "width": "100%",
+ "height": "100%",
+ "overflowY": "auto",
+ "overflowX": "hidden",
+ "background": "#ffffffee",
+ "boxSizing": "border-box",
+ },
+ ),
+ id=f"{id_prefix}-controls-sidebar",
+ style={
+ "position": "absolute",
+ "top": f"{HEADER_OFFSET_PX}px",
+ "left": "0px",
+ "bottom": "0px",
+ "width": f"{SIDEBAR_WIDTH}px",
+ "zIndex": 1200,
+ "pointerEvents": "auto",
+ "overflow": "hidden",
+ },
+ )
diff --git a/front/app/components/features/map/config.py b/front/app/components/features/map/config.py
new file mode 100644
index 0000000..9498682
--- /dev/null
+++ b/front/app/components/features/map/config.py
@@ -0,0 +1,16 @@
+from dataclasses import dataclass
+
+# ---------- CONSTANTES ----------
+CARTO_POSITRON_GL = "https://basemaps.cartocdn.com/gl/positron-gl-style/style.json"
+FALLBACK_CENTER = (1.4442, 43.6045) # Toulouse
+
+HEADER_OFFSET_PX = 80
+SIDEBAR_WIDTH = 340
+
+# ---------- OPTIONS ----------
+@dataclass(frozen=True)
+class DeckOptions:
+ zoom: float = 10
+ pitch: float = 35
+ bearing: float = -15
+ map_style: str = CARTO_POSITRON_GL
diff --git a/front/app/components/features/map/deck_factory.py b/front/app/components/features/map/deck_factory.py
new file mode 100644
index 0000000..93f3d67
--- /dev/null
+++ b/front/app/components/features/map/deck_factory.py
@@ -0,0 +1,46 @@
+import pydeck as pdk
+import pandas as pd
+import geopandas as gpd
+
+from .config import FALLBACK_CENTER, DeckOptions
+from .geo_utils import safe_center
+from .color_scale import fit_color_scale
+from .layers import build_zones_layer, build_flows_layer
+
+def make_layers(zones_gdf: gpd.GeoDataFrame, flows_df: pd.DataFrame, zones_lookup: gpd.GeoDataFrame):
+ scale = fit_color_scale(zones_gdf.get("average_travel_time", pd.Series(dtype="float64")))
+ layers = []
+ zl = build_zones_layer(zones_gdf, scale)
+ if zl is not None:
+ layers.append(zl)
+ fl = build_flows_layer(flows_df, zones_lookup)
+ if fl is not None:
+ layers.append(fl)
+ return layers
+
+def make_deck(scn: dict, opts: DeckOptions) -> pdk.Deck:
+ zones_gdf: gpd.GeoDataFrame = scn["zones_gdf"].copy()
+ flows_df: pd.DataFrame = scn["flows_df"].copy()
+ zones_lookup: gpd.GeoDataFrame = scn["zones_lookup"].copy()
+
+ layers = make_layers(zones_gdf, flows_df, zones_lookup)
+ lon, lat = safe_center(zones_gdf) or FALLBACK_CENTER
+
+ view_state = pdk.ViewState(
+ longitude=lon,
+ latitude=lat,
+ zoom=opts.zoom,
+ pitch=opts.pitch,
+ bearing=opts.bearing,
+ )
+
+ return pdk.Deck(
+ layers=layers,
+ initial_view_state=view_state,
+ map_provider="carto",
+ map_style=opts.map_style,
+ views=[pdk.View(type="MapView", controller=True)],
+ )
+
+def make_deck_json(scn: dict, opts: DeckOptions) -> str:
+ return make_deck(scn, opts).to_json()
diff --git a/front/app/components/features/map/geo_utils.py b/front/app/components/features/map/geo_utils.py
new file mode 100644
index 0000000..5be1055
--- /dev/null
+++ b/front/app/components/features/map/geo_utils.py
@@ -0,0 +1,62 @@
+import logging
+from typing import Optional, Tuple
+
+import geopandas as gpd
+import numpy as np
+import pandas as pd
+from shapely.geometry import Polygon, MultiPolygon
+
+logger = logging.getLogger(__name__)
+
+def ensure_wgs84(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
+ """Assure EPSG:4326 pour la sortie."""
+ g = gdf.copy()
+ if g.crs is None:
+ g = g.set_crs(4326, allow_override=True)
+ elif getattr(g.crs, "to_epsg", lambda: None)() != 4326:
+ g = g.to_crs(4326)
+ return g
+
+def centroids_lonlat(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
+ """Ajoute colonnes lon/lat calculées en mètres (EPSG:3857) puis reprojetées en 4326."""
+ g = gdf.copy()
+ if g.crs is None:
+ g = g.set_crs(4326, allow_override=True)
+ g_m = g.to_crs(3857)
+ pts_m = g_m.geometry.centroid
+ pts_ll = gpd.GeoSeries(pts_m, crs=g_m.crs).to_crs(4326)
+ g["lon"] = pts_ll.x.astype("float64")
+ g["lat"] = pts_ll.y.astype("float64")
+ return g
+
+def safe_center(gdf: gpd.GeoDataFrame) -> Optional[Tuple[float, float]]:
+ """Calcule un centroïde global robuste en WGS84, sinon None."""
+ try:
+ zvalid = gdf[gdf.geometry.notnull() & gdf.geometry.is_valid]
+ if zvalid.empty:
+ return None
+ centroid = ensure_wgs84(zvalid).geometry.unary_union.centroid
+ return float(centroid.x), float(centroid.y)
+ except Exception as e:
+ logger.warning("safe_center failed: %s", e)
+ return None
+
+def fmt_num(v, nd=1):
+ try:
+ return f"{round(float(v), nd):.{nd}f}"
+ except Exception:
+ return "N/A"
+
+def fmt_pct(v, nd=1):
+ try:
+ return f"{round(float(v) * 100.0, nd):.{nd}f} %"
+ except Exception:
+ return "N/A"
+
+def as_polygon_rings(geom):
+ """Retourne les anneaux extérieurs d’un Polygon/MultiPolygon sous forme de liste de coordonnées."""
+ if isinstance(geom, Polygon):
+ return [list(geom.exterior.coords)]
+ if isinstance(geom, MultiPolygon):
+ return [list(p.exterior.coords) for p in geom.geoms]
+ return []
diff --git a/front/app/components/features/map/layers.py b/front/app/components/features/map/layers.py
new file mode 100644
index 0000000..f33ef05
--- /dev/null
+++ b/front/app/components/features/map/layers.py
@@ -0,0 +1,94 @@
+from typing import List, Dict
+import pandas as pd
+import numpy as np
+import pydeck as pdk
+import geopandas as gpd
+
+from .geo_utils import ensure_wgs84, as_polygon_rings, fmt_num, fmt_pct, centroids_lonlat
+from .color_scale import ColorScale
+
+def _polygons_records(zones_gdf: gpd.GeoDataFrame, scale: ColorScale) -> List[Dict]:
+ g = ensure_wgs84(zones_gdf)
+ out = []
+ for _, row in g.iterrows():
+ rings = as_polygon_rings(row.geometry)
+ if not rings:
+ continue
+
+ zone_id = row.get("transport_zone_id", "Zone inconnue")
+ insee = row.get("local_admin_unit_id", "N/A")
+ avg_tt = pd.to_numeric(row.get("average_travel_time", np.nan), errors="coerce")
+ total_dist_km = pd.to_numeric(row.get("total_dist_km", np.nan), errors="coerce")
+ total_time_min = pd.to_numeric(row.get("total_time_min", np.nan), errors="coerce")
+ share_car = pd.to_numeric(row.get("share_car", np.nan), errors="coerce")
+ share_bicycle = pd.to_numeric(row.get("share_bicycle", np.nan), errors="coerce")
+ share_walk = pd.to_numeric(row.get("share_walk", np.nan), errors="coerce")
+
+ for ring in rings:
+ out.append({
+ "geometry": [[float(x), float(y)] for x, y in ring],
+ "fill_rgba": scale.rgba(avg_tt),
+ "Unité INSEE": str(insee),
+ "Identifiant de zone": str(zone_id),
+ "Temps moyen de trajet (minutes)": fmt_num(avg_tt, 1),
+ "Niveau d’accessibilité": scale.legend(avg_tt),
+ "Distance totale parcourue (km/jour)": fmt_num(total_dist_km, 1),
+ "Temps total de déplacement (min/jour)": fmt_num(total_time_min, 1),
+ "Part des trajets en voiture (%)": fmt_pct(share_car, 1),
+ "Part des trajets à vélo (%)": fmt_pct(share_bicycle, 1),
+ "Part des trajets à pied (%)": fmt_pct(share_walk, 1),
+ })
+ return out
+
+def build_zones_layer(zones_gdf: gpd.GeoDataFrame, scale: ColorScale) -> pdk.Layer | None:
+ polys = _polygons_records(zones_gdf, scale)
+ if not polys:
+ return None
+ return pdk.Layer(
+ "PolygonLayer",
+ data=polys,
+ get_polygon="geometry",
+ get_fill_color="fill_rgba",
+ pickable=True,
+ filled=True,
+ stroked=True,
+ get_line_color=[0, 0, 0, 80],
+ lineWidthMinPixels=1.5,
+ elevation_scale=0,
+ opacity=0.4,
+ auto_highlight=True,
+ )
+
+def build_flows_layer(flows_df: pd.DataFrame, zones_lookup: gpd.GeoDataFrame) -> pdk.Layer | None:
+ if flows_df is None or flows_df.empty:
+ return None
+
+ lookup_ll = centroids_lonlat(zones_lookup)
+ f = flows_df.copy()
+ f["flow_volume"] = pd.to_numeric(f["flow_volume"], errors="coerce").fillna(0.0)
+ f = f[f["flow_volume"] > 0]
+
+ f = f.merge(
+ lookup_ll[["transport_zone_id", "lon", "lat"]],
+ left_on="from", right_on="transport_zone_id", how="left"
+ ).rename(columns={"lon": "lon_from", "lat": "lat_from"}).drop(columns=["transport_zone_id"])
+ f = f.merge(
+ lookup_ll[["transport_zone_id", "lon", "lat"]],
+ left_on="to", right_on="transport_zone_id", how="left"
+ ).rename(columns={"lon": "lon_to", "lat": "lat_to"}).drop(columns=["transport_zone_id"])
+ f = f.dropna(subset=["lon_from", "lat_from", "lon_to", "lat_to"])
+ if f.empty:
+ return None
+
+ f["flow_width"] = (1.0 + np.log1p(f["flow_volume"])).astype("float64").clip(0.5, 6.0)
+
+ return pdk.Layer(
+ "ArcLayer",
+ data=f,
+ get_source_position=["lon_from", "lat_from"],
+ get_target_position=["lon_to", "lat_to"],
+ get_source_color=[255, 140, 0, 180],
+ get_target_color=[0, 128, 255, 180],
+ get_width="flow_width",
+ pickable=True,
+ )
diff --git a/front/app/components/features/map/map.py b/front/app/components/features/map/map.py
deleted file mode 100644
index d4cd02a..0000000
--- a/front/app/components/features/map/map.py
+++ /dev/null
@@ -1,267 +0,0 @@
-import json
-import pydeck as pdk
-import dash_deck
-from dash import html
-import geopandas as gpd
-import pandas as pd
-import numpy as np
-from shapely.geometry import Polygon, MultiPolygon
-from app.scenario.scenario_001_from_docs import load_scenario
-
-
-# ---------- CONSTANTES ----------
-CARTO_POSITRON_GL = "https://basemaps.cartocdn.com/gl/positron-gl-style/style.json"
-FALLBACK_CENTER = (1.4442, 43.6045) # Toulouse
-
-
-# ---------- HELPERS ----------
-
-def _centroids_lonlat(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
- """Calcule les centroides en coordonnées géographiques (lon/lat)."""
- g = gdf.copy()
- if g.crs is None:
- g = g.set_crs(4326, allow_override=True)
- g_m = g.to_crs(3857)
- pts_m = g_m.geometry.centroid
- pts_ll = gpd.GeoSeries(pts_m, crs=g_m.crs).to_crs(4326)
- g["lon"] = pts_ll.x.astype("float64")
- g["lat"] = pts_ll.y.astype("float64")
- return g
-
-
-def _fmt_num(v, nd=1):
- try:
- return f"{round(float(v), nd):.{nd}f}"
- except Exception:
- return "N/A"
-
-
-def _fmt_pct(v, nd=1):
- try:
- return f"{round(float(v) * 100.0, nd):.{nd}f} %"
- except Exception:
- return "N/A"
-
-
-def _polygons_for_layer(zones_gdf: gpd.GeoDataFrame):
- """
- Prépare les polygones pour Deck.gl :
- - geometry / fill_rgba : nécessaires au rendu
- - champs “métier” (INSEE/Zone/Temps/Niveau + stats & parts modales) : pour le tooltip
- """
- g = zones_gdf
- if g.crs is None or getattr(g.crs, "to_epsg", lambda: None)() != 4326:
- g = g.to_crs(4326)
-
- polygons = []
- for _, row in g.iterrows():
- geom = row.geometry
- zone_id = row.get("transport_zone_id", "Zone inconnue")
- insee = row.get("local_admin_unit_id", "N/A")
- travel_time = _fmt_num(row.get("average_travel_time", np.nan), 1)
- legend = row.get("__legend", "")
-
- # Stats “par personne et par jour”
- total_dist_km = _fmt_num(row.get("total_dist_km", np.nan), 1)
- total_time_min = _fmt_num(row.get("total_time_min", np.nan), 1)
-
- # Parts modales
- share_car = _fmt_pct(row.get("share_car", np.nan), 1)
- share_bicycle = _fmt_pct(row.get("share_bicycle", np.nan), 1)
- share_walk = _fmt_pct(row.get("share_walk", np.nan), 1)
-
- color = row.get("__color", [180, 180, 180, 160])
-
- if isinstance(geom, Polygon):
- rings = [list(geom.exterior.coords)]
- elif isinstance(geom, MultiPolygon):
- rings = [list(p.exterior.coords) for p in geom.geoms]
- else:
- continue
-
- for ring in rings:
- polygons.append({
- # ⚙️ Champs techniques pour le rendu
- "geometry": [[float(x), float(y)] for x, y in ring],
- "fill_rgba": color,
- # ✅ Champs métier visibles dans le tooltip (clés FR)
- "Unité INSEE": str(insee),
- "Identifiant de zone": str(zone_id),
- "Temps moyen de trajet (minutes)": travel_time,
- "Niveau d’accessibilité": legend,
- "Distance totale parcourue (km/jour)": total_dist_km,
- "Temps total de déplacement (min/jour)": total_time_min,
- "Part des trajets en voiture (%)": share_car,
- "Part des trajets à vélo (%)": share_bicycle,
- "Part des trajets à pied (%)": share_walk,
- })
- return polygons
-
-
-# ---------- DECK FACTORY ----------
-
-def _deck_json():
- layers = []
- lon_center, lat_center = FALLBACK_CENTER
-
- try:
- scn = load_scenario()
- zones_gdf = scn["zones_gdf"].copy()
- flows_df = scn["flows_df"].copy()
- zones_lookup = scn["zones_lookup"].copy()
-
- # Centrage robuste
- if not zones_gdf.empty:
- zvalid = zones_gdf[zones_gdf.geometry.notnull() & zones_gdf.geometry.is_valid]
- if not zvalid.empty:
- c = zvalid.to_crs(4326).geometry.unary_union.centroid
- lon_center, lat_center = float(c.x), float(c.y)
-
- # Palette couleur
- at = pd.to_numeric(zones_gdf.get("average_travel_time", pd.Series(dtype="float64")), errors="coerce")
- zones_gdf["average_travel_time"] = at
- finite_at = at.replace([np.inf, -np.inf], np.nan).dropna()
- vmin, vmax = (finite_at.min(), finite_at.max()) if not finite_at.empty else (0.0, 1.0)
- rng = vmax - vmin if (vmax - vmin) > 1e-9 else 1.0
- t1, t2 = vmin + rng / 3.0, vmin + 2 * rng / 3.0
-
- def _legend(v):
- if pd.isna(v):
- return "Donnée non disponible"
- if v <= t1:
- return "Accès rapide"
- elif v <= t2:
- return "Accès moyen"
- return "Accès lent"
-
- def _colorize(v):
- if pd.isna(v):
- return [200, 200, 200, 140]
- z = (float(v) - vmin) / rng
- z = max(0.0, min(1.0, z))
- r = int(255 * z)
- g = int(64 + 128 * (1 - z))
- b = int(255 * (1 - z))
- return [r, g, b, 180]
-
- zones_gdf["__legend"] = zones_gdf["average_travel_time"].map(_legend)
- zones_gdf["__color"] = zones_gdf["average_travel_time"].map(_colorize)
-
- # Appliquer la palette au jeu transmis au layer
- polys = []
- for p, v in zip(_polygons_for_layer(zones_gdf), zones_gdf["average_travel_time"].tolist() or []):
- p["fill_rgba"] = _colorize(v)
- polys.append(p)
-
- # Polygones (zones)
- if polys:
- zones_layer = pdk.Layer(
- "PolygonLayer",
- data=polys,
- get_polygon="geometry",
- get_fill_color="fill_rgba",
- pickable=True,
- filled=True,
- stroked=True,
- get_line_color=[0, 0, 0, 80],
- lineWidthMinPixels=1.5,
- elevation_scale=0,
- opacity=0.4,
- auto_highlight=True,
- )
- layers.append(zones_layer)
-
- # --- Arcs de flux ---
- lookup_ll = _centroids_lonlat(zones_lookup)
- flows_df["flow_volume"] = pd.to_numeric(flows_df["flow_volume"], errors="coerce").fillna(0.0)
- flows_df = flows_df[flows_df["flow_volume"] > 0]
- flows = flows_df.merge(
- lookup_ll[["transport_zone_id", "lon", "lat"]],
- left_on="from", right_on="transport_zone_id", how="left"
- ).rename(columns={"lon": "lon_from", "lat": "lat_from"}).drop(columns=["transport_zone_id"])
- flows = flows.merge(
- lookup_ll[["transport_zone_id", "lon", "lat"]],
- left_on="to", right_on="transport_zone_id", how="left"
- ).rename(columns={"lon": "lon_to", "lat": "lat_to"}).drop(columns=["transport_zone_id"])
- flows = flows.dropna(subset=["lon_from", "lat_from", "lon_to", "lat_to"])
- flows["flow_width"] = (1.0 + np.log1p(flows["flow_volume"])).astype("float64").clip(0.5, 6.0)
-
- arcs_layer = pdk.Layer(
- "ArcLayer",
- data=flows,
- get_source_position=["lon_from", "lat_from"],
- get_target_position=["lon_to", "lat_to"],
- get_source_color=[255, 140, 0, 180],
- get_target_color=[0, 128, 255, 180],
- get_width="flow_width",
- pickable=True,
- )
- layers.append(arcs_layer)
-
- except Exception as e:
- print("Overlay scénario désactivé (erreur):", e)
-
- # Vue centrée
- view_state = pdk.ViewState(
- longitude=lon_center,
- latitude=lat_center,
- zoom=10,
- pitch=35,
- bearing=-15,
- )
-
- deck = pdk.Deck(
- layers=layers,
- initial_view_state=view_state,
- map_provider="carto",
- map_style=CARTO_POSITRON_GL,
- views=[pdk.View(type="MapView", controller=True)],
- )
-
- return deck.to_json()
-
-
-# ---------- DASH COMPONENT ----------
-
-def Map():
- deckgl = dash_deck.DeckGL(
- id="deck-map",
- data=_deck_json(),
- # Tooltip personnalisé (aucun champ technique)
- tooltip={
- "html": (
- "
"
- "Zone d’étude
"
- "Unité INSEE : {Unité INSEE}
"
- "Identifiant de zone : {Identifiant de zone}
"
- "Mobilité moyenne
"
- "Temps moyen de trajet : {Temps moyen de trajet (minutes)} min/jour
"
- "Distance totale parcourue : {Distance totale parcourue (km/jour)} km/jour
"
- "Niveau d’accessibilité : {Niveau d’accessibilité}
"
- "Répartition modale
"
- "Part des trajets en voiture : {Part des trajets en voiture (%)}
"
- "Part des trajets à vélo : {Part des trajets à vélo (%)}
"
- "Part des trajets à pied : {Part des trajets à pied (%)}"
- "
"
- ),
- "style": {
- "backgroundColor": "rgba(255,255,255,0.9)",
- "color": "#111",
- "fontSize": "12px",
- "padding": "8px",
- "borderRadius": "6px",
- },
- },
- mapboxKey="",
- style={"position": "absolute", "inset": 0},
- )
-
- return html.Div(
- deckgl,
- style={
- "position": "relative",
- "width": "100%",
- "height": "100%",
- "background": "#fff",
- },
- )
diff --git a/front/app/components/features/map/map_component.py b/front/app/components/features/map/map_component.py
new file mode 100644
index 0000000..0288449
--- /dev/null
+++ b/front/app/components/features/map/map_component.py
@@ -0,0 +1,41 @@
+from dash import html
+from .config import DeckOptions
+from .components import DeckMap, ControlsSidebarWrapper, SummaryPanelWrapper
+
+# — Option A : via map_service s’il existe
+try:
+ from front.app.services.map_service import get_map_deck_json, get_map_zones_gdf
+ _USE_SERVICE = True
+except Exception:
+ _USE_SERVICE = False
+
+# — Option B : fallback direct si map_service absent
+if not _USE_SERVICE:
+ from front.app.services.scenario_service import get_scenario
+ from .deck_factory import make_deck_json
+ def get_map_deck_json(id_prefix: str, opts: DeckOptions) -> str:
+ scn = get_scenario()
+ return make_deck_json(scn, opts)
+ def get_map_zones_gdf():
+ scn = get_scenario()
+ return scn["zones_gdf"]
+
+def Map(id_prefix: str = "map"):
+ opts = DeckOptions()
+ deck_json = get_map_deck_json(id_prefix=id_prefix, opts=opts)
+ zones_gdf = get_map_zones_gdf()
+
+ deckgl = DeckMap(id_prefix=id_prefix, deck_json=deck_json)
+ summary = SummaryPanelWrapper(zones_gdf, id_prefix=id_prefix)
+ controls_sidebar = ControlsSidebarWrapper(id_prefix=id_prefix)
+
+ return html.Div(
+ [deckgl, summary, controls_sidebar],
+ style={
+ "position": "relative",
+ "width": "100%",
+ "height": "100vh",
+ "background": "#fff",
+ "overflow": "hidden",
+ },
+ )
diff --git a/front/app/components/features/map/tooltip.py b/front/app/components/features/map/tooltip.py
new file mode 100644
index 0000000..ef32370
--- /dev/null
+++ b/front/app/components/features/map/tooltip.py
@@ -0,0 +1,25 @@
+def default_tooltip() -> dict:
+ return {
+ "html": (
+ ""
+ "Zone d’étude
"
+ "Unité INSEE : {Unité INSEE}
"
+ "Identifiant de zone : {Identifiant de zone}
"
+ "Mobilité moyenne
"
+ "Temps moyen de trajet : {Temps moyen de trajet (minutes)} min/jour
"
+ "Distance totale parcourue : {Distance totale parcourue (km/jour)} km/jour
"
+ "Niveau d’accessibilité : {Niveau d’accessibilité}
"
+ "Répartition modale
"
+ "Part des trajets en voiture : {Part des trajets en voiture (%)}
"
+ "Part des trajets à vélo : {Part des trajets à vélo (%)}
"
+ "Part des trajets à pied : {Part des trajets à pied (%)}"
+ "
"
+ ),
+ "style": {
+ "backgroundColor": "rgba(255,255,255,0.9)",
+ "color": "#111",
+ "fontSize": "12px",
+ "padding": "8px",
+ "borderRadius": "6px",
+ },
+ }
diff --git a/front/app/components/features/scenario_controls/__init__.py b/front/app/components/features/scenario_controls/__init__.py
new file mode 100644
index 0000000..0ce24cc
--- /dev/null
+++ b/front/app/components/features/scenario_controls/__init__.py
@@ -0,0 +1,6 @@
+from .panel import ScenarioControlsPanel
+from .radius import RadiusControl
+from .lau_input import LauInput
+from .run_button import RunButton
+
+__all__ = ["ScenarioControlsPanel", "RadiusControl", "LauInput", "RunButton"]
diff --git a/front/app/components/features/scenario_controls/lau_input.py b/front/app/components/features/scenario_controls/lau_input.py
new file mode 100644
index 0000000..044e7a4
--- /dev/null
+++ b/front/app/components/features/scenario_controls/lau_input.py
@@ -0,0 +1,14 @@
+import dash_mantine_components as dmc
+
+def LauInput(id_prefix: str, *, default_insee: str = "31555"):
+ """
+ Champ de saisie de la zone d’étude (code INSEE/LAU).
+ Conserve l’ID existant.
+ """
+ return dmc.TextInput(
+ id=f"{id_prefix}-lau-input",
+ value=default_insee,
+ label="Zone d’étude (INSEE)",
+ placeholder="ex: 31555",
+ w=250,
+ )
diff --git a/front/app/components/features/scenario_controls/panel.py b/front/app/components/features/scenario_controls/panel.py
new file mode 100644
index 0000000..be48398
--- /dev/null
+++ b/front/app/components/features/scenario_controls/panel.py
@@ -0,0 +1,38 @@
+import dash_mantine_components as dmc
+from .radius import RadiusControl
+from .lau_input import LauInput
+from .run_button import RunButton
+
+def ScenarioControlsPanel(
+ id_prefix: str = "scenario",
+ *,
+ min_radius: int = 15,
+ max_radius: int = 50,
+ step: int = 1,
+ default: int | float = 40,
+ default_insee: str = "31555",
+):
+ """
+ Panneau vertical de contrôles :
+ - Rayon (slider + input)
+ - Zone d’étude (INSEE)
+ - Bouton 'Lancer la simulation'
+ """
+ return dmc.Stack(
+ [
+ RadiusControl(
+ id_prefix,
+ min_radius=min_radius,
+ max_radius=max_radius,
+ step=step,
+ default=default,
+ ),
+ LauInput(id_prefix, default_insee=default_insee),
+ RunButton(id_prefix),
+ ],
+ gap="sm",
+ style={
+ "width": "fit-content",
+ "padding": "8px",
+ },
+ )
diff --git a/front/app/components/features/scenario_controls/radius.py b/front/app/components/features/scenario_controls/radius.py
new file mode 100644
index 0000000..0e4ad48
--- /dev/null
+++ b/front/app/components/features/scenario_controls/radius.py
@@ -0,0 +1,45 @@
+import dash_mantine_components as dmc
+
+def RadiusControl(
+ id_prefix: str,
+ *,
+ min_radius: int = 15,
+ max_radius: int = 50,
+ step: int = 1,
+ default: int | float = 40,
+):
+ """
+ Contrôle de rayon : slider + number input.
+ Conserve EXACTEMENT les mêmes IDs qu'avant.
+ """
+ return dmc.Group(
+ [
+ dmc.Text("Rayon (km)", fw=600, w=100, ta="right"),
+ dmc.Slider(
+ id=f"{id_prefix}-radius-slider",
+ value=default,
+ min=min_radius,
+ max=max_radius,
+ step=step,
+ w=280,
+ marks=[
+ {"value": min_radius, "label": str(min_radius)},
+ {"value": default, "label": str(default)},
+ {"value": max_radius, "label": str(max_radius)},
+ ],
+ ),
+ dmc.NumberInput(
+ id=f"{id_prefix}-radius-input",
+ value=default,
+ min=min_radius,
+ max=max_radius,
+ step=step,
+ w=90,
+ styles={"input": {"textAlign": "center", "marginTop": "10px"}},
+ ),
+ ],
+ gap="md",
+ align="center",
+ justify="flex-start",
+ wrap=False,
+ )
diff --git a/front/app/components/features/scenario_controls/run_button.py b/front/app/components/features/scenario_controls/run_button.py
new file mode 100644
index 0000000..5132bf0
--- /dev/null
+++ b/front/app/components/features/scenario_controls/run_button.py
@@ -0,0 +1,16 @@
+import dash_mantine_components as dmc
+
+def RunButton(id_prefix: str, *, label: str = "Lancer la simulation"):
+ """
+ Bouton d’action principal. Conserve l’ID existant.
+ """
+ return dmc.Button(
+ label,
+ id=f"{id_prefix}-run-btn",
+ variant="filled",
+ style={
+ "marginTop": "10px",
+ "width": "fit-content",
+ "alignSelf": "flex-start",
+ },
+ )
diff --git a/front/app/components/features/scenario_controls/scenario.py b/front/app/components/features/scenario_controls/scenario.py
new file mode 100644
index 0000000..dba72bd
--- /dev/null
+++ b/front/app/components/features/scenario_controls/scenario.py
@@ -0,0 +1,18 @@
+from .scenario_controls.panel import ScenarioControlsPanel
+
+def ScenarioControls(
+ id_prefix: str = "scenario",
+ min_radius: int = 15,
+ max_radius: int = 50,
+ step: int = 1,
+ default: int | float = 40,
+ default_insee: str = "31555",
+):
+ return ScenarioControlsPanel(
+ id_prefix=id_prefix,
+ min_radius=min_radius,
+ max_radius=max_radius,
+ step=step,
+ default=default,
+ default_insee=default_insee,
+ )
diff --git a/front/app/components/features/study_area_summary/__init__.py b/front/app/components/features/study_area_summary/__init__.py
new file mode 100644
index 0000000..b76816a
--- /dev/null
+++ b/front/app/components/features/study_area_summary/__init__.py
@@ -0,0 +1,3 @@
+from .panel import StudyAreaSummary
+
+__all__ = ["StudyAreaSummary"]
diff --git a/front/app/components/features/study_area_summary/kpi.py b/front/app/components/features/study_area_summary/kpi.py
new file mode 100644
index 0000000..4a1e722
--- /dev/null
+++ b/front/app/components/features/study_area_summary/kpi.py
@@ -0,0 +1,18 @@
+from dash import html
+import dash_mantine_components as dmc
+from .utils import fmt_num
+
+def KPIStat(label: str, value: str):
+ return dmc.Group(
+ [dmc.Text(label, size="sm"), dmc.Text(value, fw=600, size="sm")],
+ gap="xs",
+ )
+
+def KPIStatGroup(avg_time_min: float | None, avg_dist_km: float | None):
+ return dmc.Stack(
+ [
+ KPIStat("Temps moyen de trajet :", f"{fmt_num(avg_time_min, 1)} min/jour"),
+ KPIStat("Distance totale moyenne :", f"{fmt_num(avg_dist_km, 1)} km/jour"),
+ ],
+ gap="xs",
+ )
diff --git a/front/app/components/features/study_area_summary/legend.py b/front/app/components/features/study_area_summary/legend.py
new file mode 100644
index 0000000..b4d0ea7
--- /dev/null
+++ b/front/app/components/features/study_area_summary/legend.py
@@ -0,0 +1,87 @@
+from dash import html
+import dash_mantine_components as dmc
+import pandas as pd
+from .utils import safe_min_max, colorize_from_range, rgb_str, fmt_num
+
+def _chip(color_rgb, label: str):
+ r, g, b = color_rgb
+ return dmc.Group(
+ [
+ html.Div(
+ style={
+ "width": "14px",
+ "height": "14px",
+ "borderRadius": "3px",
+ "background": f"rgb({r},{g},{b})",
+ "border": "1px solid rgba(0,0,0,0.2)",
+ "flexShrink": 0,
+ }
+ ),
+ dmc.Text(label, size="sm"),
+ ],
+ gap="xs",
+ align="center",
+ wrap="nowrap",
+ )
+
+def LegendCompact(avg_series):
+ """
+ Légende compacte :
+ - 3 classes : Accès rapide / moyen / lent (mêmes seuils que la carte)
+ - barre de dégradé continue + libellés min/max
+ """
+ vmin, vmax = safe_min_max(avg_series)
+ if pd.isna(vmin) or pd.isna(vmax) or vmax - vmin <= 1e-9:
+ return dmc.Alert(
+ "Légende indisponible (valeurs manquantes).",
+ color="gray", variant="light", radius="sm",
+ styles={"root": {"padding": "8px"}}
+ )
+
+ rng = vmax - vmin
+ t1 = vmin + rng / 3.0
+ t2 = vmin + 2.0 * rng / 3.0
+
+ # couleurs représentatives au milieu de chaque classe
+ c1 = colorize_from_range((vmin + t1) / 2.0, vmin, vmax)
+ c2 = colorize_from_range((t1 + t2) / 2.0, vmin, vmax)
+ c3 = colorize_from_range((t2 + vmax) / 2.0, vmin, vmax)
+
+ # dégradé continu
+ left = rgb_str(colorize_from_range(vmin + 1e-6, vmin, vmax))
+ mid = rgb_str(colorize_from_range((vmin + vmax) / 2.0, vmin, vmax))
+ right = rgb_str(colorize_from_range(vmax - 1e-6, vmin, vmax))
+
+ return dmc.Stack(
+ [
+ dmc.Text("Légende — temps moyen (min)", fw=600, size="sm"),
+ _chip(c1, f"Accès rapide — {fmt_num(vmin, 1)}–{fmt_num(t1, 1)} min"),
+ _chip(c2, f"Accès moyen — {fmt_num(t1, 1)}–{fmt_num(t2, 1)} min"),
+ _chip(c3, f"Accès lent — {fmt_num(t2, 1)}–{fmt_num(vmax, 1)} min"),
+ html.Div(
+ style={
+ "height": "10px",
+ "width": "100%",
+ "borderRadius": "6px",
+ "background": f"linear-gradient(to right, {left}, {mid}, {right})",
+ "border": "1px solid rgba(0,0,0,0.15)",
+ "marginTop": "6px",
+ }
+ ),
+ dmc.Group(
+ [
+ dmc.Text(f"{fmt_num(vmin, 1)}", size="xs", style={"opacity": 0.8}),
+ dmc.Text("→", size="xs", style={"opacity": 0.6}),
+ dmc.Text(f"{fmt_num(vmax, 1)}", size="xs", style={"opacity": 0.8}),
+ ],
+ justify="space-between",
+ align="center",
+ gap="xs",
+ ),
+ dmc.Text(
+ "Plus la teinte est chaude, plus le déplacement moyen est long.",
+ size="xs", style={"opacity": 0.75},
+ ),
+ ],
+ gap="xs",
+ )
diff --git a/front/app/components/features/study_area_summary/modal_split.py b/front/app/components/features/study_area_summary/modal_split.py
new file mode 100644
index 0000000..19eb798
--- /dev/null
+++ b/front/app/components/features/study_area_summary/modal_split.py
@@ -0,0 +1,12 @@
+import dash_mantine_components as dmc
+from .utils import fmt_pct
+
+def ModalSplitList(share_car, share_bike, share_walk):
+ return dmc.Stack(
+ [
+ dmc.Group([dmc.Text("Voiture :", size="sm"), dmc.Text(fmt_pct(share_car, 1), fw=600, size="sm")], gap="xs"),
+ dmc.Group([dmc.Text("Vélo :", size="sm"), dmc.Text(fmt_pct(share_bike, 1), fw=600, size="sm")], gap="xs"),
+ dmc.Group([dmc.Text("À pied :", size="sm"), dmc.Text(fmt_pct(share_walk, 1), fw=600, size="sm")], gap="xs"),
+ ],
+ gap="xs",
+ )
diff --git a/front/app/components/features/study_area_summary/panel.py b/front/app/components/features/study_area_summary/panel.py
new file mode 100644
index 0000000..34d3c45
--- /dev/null
+++ b/front/app/components/features/study_area_summary/panel.py
@@ -0,0 +1,77 @@
+from dash import html
+import dash_mantine_components as dmc
+from .utils import safe_mean
+from .kpi import KPIStatGroup
+from .modal_split import ModalSplitList
+from .legend import LegendCompact
+
+def StudyAreaSummary(
+ zones_gdf,
+ visible: bool = True,
+ id_prefix: str = "map",
+ header_offset_px: int = 80,
+ width_px: int = 340,
+):
+ """
+ Panneau latéral droit affichant les agrégats globaux de la zone d'étude,
+ avec légende enrichie (dégradé continu) et contexte (code INSEE/LAU).
+ API inchangée par rapport à l'ancien composant.
+ """
+ comp_id = f"{id_prefix}-study-summary"
+
+ if zones_gdf is None or getattr(zones_gdf, "empty", True):
+ content = dmc.Text(
+ "Données globales indisponibles.",
+ size="sm",
+ style={"fontStyle": "italic", "opacity": 0.8},
+ )
+ else:
+ avg_time = safe_mean(zones_gdf.get("average_travel_time"))
+ avg_dist = safe_mean(zones_gdf.get("total_dist_km"))
+ share_car = safe_mean(zones_gdf.get("share_car"))
+ share_bike = safe_mean(zones_gdf.get("share_bicycle"))
+ share_walk = safe_mean(zones_gdf.get("share_walk"))
+
+ content = dmc.Stack(
+ [
+ dmc.Text("Résumé global de la zone d'étude", fw=700, size="md"),
+ dmc.Divider(),
+ KPIStatGroup(avg_time_min=avg_time, avg_dist_km=avg_dist),
+ dmc.Divider(),
+ dmc.Text("Répartition modale", fw=600, size="sm"),
+ ModalSplitList(share_car=share_car, share_bike=share_bike, share_walk=share_walk),
+ dmc.Divider(),
+ LegendCompact(zones_gdf.get("average_travel_time")),
+ ],
+ gap="md",
+ )
+
+ return html.Div(
+ id=comp_id,
+ children=dmc.Paper(
+ content,
+ withBorder=True,
+ shadow="md",
+ radius="md",
+ p="md",
+ style={
+ "width": "100%",
+ "height": "100%",
+ "overflowY": "auto",
+ "overflowX": "hidden",
+ "background": "#ffffffee",
+ "boxSizing": "border-box",
+ },
+ ),
+ style={
+ "display": "block" if visible else "none",
+ "position": "absolute",
+ "top": f"{header_offset_px}px",
+ "right": "0px",
+ "bottom": "0px",
+ "width": f"{width_px}px",
+ "zIndex": 1200,
+ "pointerEvents": "auto",
+ "overflow": "hidden",
+ },
+ )
diff --git a/front/app/components/features/study_area_summary/utils.py b/front/app/components/features/study_area_summary/utils.py
new file mode 100644
index 0000000..fb41de5
--- /dev/null
+++ b/front/app/components/features/study_area_summary/utils.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+from typing import Tuple
+import numpy as np
+import pandas as pd
+
+def fmt_num(v, nd: int = 1) -> str:
+ try:
+ return f"{round(float(v), nd):.{nd}f}"
+ except Exception:
+ return "N/A"
+
+def fmt_pct(v, nd: int = 1) -> str:
+ try:
+ return f"{round(float(v) * 100.0, nd):.{nd}f} %"
+ except Exception:
+ return "N/A"
+
+def safe_mean(series) -> float:
+ if series is None:
+ return float("nan")
+ s = pd.to_numeric(series, errors="coerce")
+ return float(np.nanmean(s)) if s.size else float("nan")
+
+def safe_min_max(series) -> Tuple[float, float]:
+ if series is None:
+ return float("nan"), float("nan")
+ s = pd.to_numeric(series, errors="coerce").replace([np.inf, -np.inf], np.nan).dropna()
+ if s.empty:
+ return float("nan"), float("nan")
+ return float(s.min()), float(s.max())
+
+def colorize_from_range(value, vmin, vmax):
+ """Même rampe que la carte : r=255*z ; g=64+128*(1-z) ; b=255*(1-z)"""
+ if value is None or pd.isna(value) or vmin is None or vmax is None or (vmax - vmin) <= 1e-9:
+ return (200, 200, 200)
+ rng = max(vmax - vmin, 1e-9)
+ z = (float(value) - vmin) / rng
+ z = max(0.0, min(1.0, z))
+ r = int(255 * z)
+ g = int(64 + 128 * (1 - z))
+ b = int(255 * (1 - z))
+ return (r, g, b)
+
+def rgb_str(rgb) -> str:
+ r, g, b = rgb
+ return f"rgb({r},{g},{b})"
diff --git a/front/app/pages/main/main.py b/front/app/pages/main/main.py
index 6653579..b07201f 100644
--- a/front/app/pages/main/main.py
+++ b/front/app/pages/main/main.py
@@ -1,42 +1,137 @@
+# app/pages/main/main.py
from pathlib import Path
-from dash import Dash
+import os
+
+from dash import Dash, html, no_update
import dash_mantine_components as dmc
+from dash.dependencies import Input, Output, State
+
from app.components.layout.header.header import Header
-from app.components.features.map.map import Map
+from app.components.features.map import Map
from app.components.layout.footer.footer import Footer
+from app.components.features.study_area_summary import StudyAreaSummary
+from app.components.features.map.config import DeckOptions
+from front.app.services.scenario_service import get_scenario
+# Utilise map_service si dispo : on lui passe le scénario construit
+try:
+ from front.app.services.map_service import get_map_deck_json_from_scn
+ USE_MAP_SERVICE = True
+except Exception:
+ from app.components.features.map.deck_factory import make_deck_json
+ USE_MAP_SERVICE = False
ASSETS_PATH = Path(__file__).resolve().parents[3] / "assets"
-HEADER_HEIGHT = 60
-
-
-
-app = Dash(
- __name__,
- suppress_callback_exceptions=True,
- assets_folder=str(ASSETS_PATH),
- assets_url_path="/assets",
-)
-
-app.layout = dmc.MantineProvider(
- dmc.AppShell(
- children=[
- Header("MOBILITY"),
- dmc.AppShellMain(
- Map(),
- style={
- "height": f"calc(100vh - {HEADER_HEIGHT}px)",
- "padding": 0,
- "margin": 0,
- "overflow": "hidden",
- },
- ),
- Footer(),
- ],
- padding=0,
- styles={"main": {"padding": 0}},
+MAPP = "map" # doit matcher Map(id_prefix="map")
+
+def _make_deck_json_from_scn(scn: dict) -> str:
+ if USE_MAP_SERVICE:
+ return get_map_deck_json_from_scn(scn, DeckOptions())
+ return make_deck_json(scn, DeckOptions())
+
+def create_app() -> Dash:
+ app = Dash(
+ __name__,
+ suppress_callback_exceptions=True,
+ assets_folder=str(ASSETS_PATH),
+ assets_url_path="/assets",
+ )
+
+ app.layout = dmc.MantineProvider(
+ dmc.AppShell(
+ children=[
+ Header("MOBILITY"),
+
+ dmc.AppShellMain(
+ html.Div(
+ Map(id_prefix=MAPP),
+ style={
+ "height": "100%",
+ "width": "100%",
+ "position": "relative",
+ "overflow": "hidden",
+ "margin": 0,
+ "padding": 0,
+ },
+ ),
+ style={
+ "flex": "1 1 auto",
+ "minHeight": 0,
+ "padding": 0,
+ "margin": 0,
+ "overflow": "hidden",
+ },
+ ),
+
+ html.Div(
+ Footer(),
+ style={
+ "flexShrink": "0",
+ "display": "flex",
+ "alignItems": "center",
+ },
+ ),
+ ],
+ padding=0,
+ styles={
+ "root": {"height": "100vh", "overflow": "hidden"},
+ "main": {"padding": 0, "margin": 0, "overflow": "hidden"},
+ },
+ style={"height": "100vh", "overflow": "hidden"},
+ )
)
-)
-if __name__ == "__main__":
- app.run(debug=True, dev_tools_ui=False)
+ # --------- CALLBACKS ---------
+ @app.callback(
+ Output(f"{MAPP}-radius-input", "value"),
+ Input(f"{MAPP}-radius-slider", "value"),
+ State(f"{MAPP}-radius-input", "value"),
+ )
+ def _sync_input_from_slider(slider_val, current_input):
+ if slider_val is None or slider_val == current_input:
+ return no_update
+ return slider_val
+
+ @app.callback(
+ Output(f"{MAPP}-radius-slider", "value"),
+ Input(f"{MAPP}-radius-input", "value"),
+ State(f"{MAPP}-radius-slider", "value"),
+ )
+ def _sync_slider_from_input(input_val, current_slider):
+ if input_val is None or input_val == current_slider:
+ return no_update
+ return input_val
+
+ @app.callback(
+ Output(f"{MAPP}-deck-map", "data"),
+ Output(f"{MAPP}-summary-wrapper", "children"),
+ Input(f"{MAPP}-run-btn", "n_clicks"),
+ State(f"{MAPP}-radius-input", "value"),
+ State(f"{MAPP}-lau-input", "value"),
+ prevent_initial_call=True,
+ )
+ def _run_simulation(n_clicks, radius_val, lau_val):
+ r = 40 if radius_val is None else int(radius_val)
+ lau = (lau_val or "").strip() or "31555"
+ try:
+ scn = get_scenario(radius=r, local_admin_unit_id=lau)
+ deck_json = _make_deck_json_from_scn(scn)
+ summary = StudyAreaSummary(scn["zones_gdf"], visible=True, id_prefix=MAPP)
+ return deck_json, summary
+ except Exception as e:
+ err = dmc.Alert(
+ f"Une erreur est survenue pendant la simulation : {e}",
+ color="red",
+ variant="filled",
+ radius="md",
+ )
+ return no_update, err
+
+ return app
+
+# Exécution locale
+app = create_app()
+
+if __name__ == "__main__": #pragma: no cover
+ port = int(os.environ.get("PORT", "8050"))
+ app.run(debug=True, dev_tools_ui=False, port=port, host="127.0.0.1")
diff --git a/file_upstream.py b/front/app/services/__init__.py
similarity index 100%
rename from file_upstream.py
rename to front/app/services/__init__.py
diff --git a/front/app/services/map_service.py b/front/app/services/map_service.py
new file mode 100644
index 0000000..eebcf4a
--- /dev/null
+++ b/front/app/services/map_service.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+from functools import lru_cache
+
+from front.app.services.scenario_service import get_scenario
+from app.components.features.map.config import DeckOptions
+from app.components.features.map.deck_factory import make_deck_json
+
+@lru_cache(maxsize=8)
+def _scenario_snapshot_key() -> int:
+ """
+ Clé de cache grossière : on peut brancher ici une version/horodatage de scénario
+ si `get_scenario()` l’expose ; sinon on renvoie 0 pour désactiver le cache fin.
+ """
+ return 0
+
+def get_map_deck_json_from_scn(scn: dict, opts: DeckOptions | None = None) -> str:
+ opts = opts or DeckOptions()
+ return make_deck_json(scn, opts)
+
+def get_map_deck_json(id_prefix: str, opts: DeckOptions) -> str:
+ # éventuellement invalider le cache selon _scenario_snapshot_key()
+ scn = get_scenario()
+ return make_deck_json(scn, opts)
+
+def get_map_zones_gdf():
+ scn = get_scenario()
+ return scn["zones_gdf"]
diff --git a/front/app/scenario/scenario_001_from_docs.py b/front/app/services/scenario_service.py
similarity index 61%
rename from front/app/scenario/scenario_001_from_docs.py
rename to front/app/services/scenario_service.py
index 0fefa61..4da290c 100644
--- a/front/app/scenario/scenario_001_from_docs.py
+++ b/front/app/services/scenario_service.py
@@ -1,12 +1,17 @@
-# app/scenario/scenario_001_from_docs.py
from __future__ import annotations
-import os
+
+from functools import lru_cache
+from typing import Dict, Any, Tuple
+
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.geometry import Point
+# --------------------------------------------------------------------
+# Helpers & fallback
+# --------------------------------------------------------------------
def _to_wgs84(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
if gdf.crs is None:
return gdf.set_crs(4326, allow_override=True)
@@ -17,8 +22,10 @@ def _to_wgs84(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
return gdf if epsg == 4326 else gdf.to_crs(4326)
-def _fallback_scenario() -> dict:
- """Scénario minimal de secours (Paris–Lyon)."""
+def _fallback_scenario() -> Dict[str, Any]:
+ """
+ Scénario minimal de secours (Paris–Lyon), utile si la lib échoue.
+ """
paris = (2.3522, 48.8566)
lyon = (4.8357, 45.7640)
@@ -29,6 +36,7 @@ def _fallback_scenario() -> dict:
zones = pts.to_crs(3857)
zones["geometry"] = zones.geometry.buffer(5000) # 5 km
zones = zones.to_crs(4326)
+
# Indicateurs d'exemple (minutes, km/personne/jour)
zones["average_travel_time"] = [18.0, 25.0]
zones["total_dist_km"] = [15.0, 22.0]
@@ -46,13 +54,28 @@ def _fallback_scenario() -> dict:
}
-def load_scenario() -> dict:
+def _normalize_lau_code(code: str) -> str:
+ """
+ Normalise le code commune pour la lib 'mobility' :
+ - '31555' -> 'fr-31555'
+ - 'fr-31555' -> inchangé
+ - sinon -> str(trim)
"""
- Charge un scénario de mobilité (Toulouse = fr-31555) et calcule:
- - average_travel_time (minutes)
- - total_dist_km (km/personne/jour)
- - parts modales share_car / share_bicycle / share_walk
- Bascule sur un fallback si la lib échoue.
+ s = str(code).strip().lower()
+ if s.startswith("fr-"):
+ return s
+ if s.isdigit() and len(s) == 5:
+ return f"fr-{s}"
+ return s
+
+
+# --------------------------------------------------------------------
+# Core computation (extracted & hardened)
+# --------------------------------------------------------------------
+def _compute_scenario(local_admin_unit_id: str = "31555", radius: float = 40.0) -> Dict[str, Any]:
+ """
+ Calcule un scénario pour une commune (INSEE/LAU) et un rayon (km).
+ Retourne un dict: { zones_gdf, flows_df, zones_lookup } en WGS84.
"""
try:
import mobility
@@ -60,27 +83,27 @@ def load_scenario() -> dict:
mobility.set_params(debug=True, r_packages_download_method="wininet")
- # Patch **instanciation** : fournir cache_path si attendu (certaines versions)
+ # Normalise le code pour la lib
+ lau_norm = _normalize_lau_code(local_admin_unit_id)
+
+ # Certaines versions exigent cache_path=None; d'autres non.
def _safe_instantiate(cls, *args, **kwargs):
try:
return cls(*args, **kwargs)
except TypeError as e:
- if "takes 2 positional arguments but 3 were given" in str(e):
- raise
- elif "missing 1 required positional argument: 'cache_path'" in str(e):
+ if "missing 1 required positional argument: 'cache_path'" in str(e):
return cls(*args, cache_path=None, **kwargs)
- else:
- raise
+ raise
- # --- Création des assets (Toulouse) ---
+ # --- Transport zones (study area) ---
transport_zones = _safe_instantiate(
mobility.TransportZones,
- local_admin_unit_id="fr-31555", # Toulouse
- radius=40,
+ local_admin_unit_id=lau_norm, # e.g., "fr-31555"
+ radius=float(radius),
level_of_detail=0,
)
- # Modes
+ # --- Modes ---
car = _safe_instantiate(
mobility.CarMode,
transport_zones=transport_zones,
@@ -92,16 +115,11 @@ def _safe_instantiate(cls, *args, **kwargs):
generalized_cost_parameters=mobility.GeneralizedCostParameters(cost_of_distance=0.0),
)
- # 🚶 Marche : on l'active si la classe existe, avec une fenêtre plus permissive
+ # Walk mode: nom variable selon version
walk = None
for cls_name in ("WalkMode", "PedestrianMode", "WalkingMode", "Pedestrian"):
if walk is None and hasattr(mobility, cls_name):
- walk_params = PathRoutingParameters(
- # La lib sort time en HEURES -> autorise 2h de marche max
- filter_max_time=2.0,
- # Vitesse 5 km/h (marche urbaine)
- filter_max_speed=5.0
- )
+ walk_params = PathRoutingParameters(filter_max_time=2.0, filter_max_speed=5.0)
walk = _safe_instantiate(
getattr(mobility, cls_name),
transport_zones=transport_zones,
@@ -111,22 +129,18 @@ def _safe_instantiate(cls, *args, **kwargs):
modes = [m for m in (car, bicycle, walk) if m is not None]
- work_choice_model = _safe_instantiate(
- mobility.WorkDestinationChoiceModel,
- transport_zones,
- modes=modes,
- )
+ # --- Models ---
+ work_choice_model = _safe_instantiate(mobility.WorkDestinationChoiceModel, transport_zones, modes=modes)
mode_choice_model = _safe_instantiate(
- mobility.TransportModeChoiceModel,
- destination_choice_model=work_choice_model,
+ mobility.TransportModeChoiceModel, destination_choice_model=work_choice_model
)
- # Résultats des modèles
+ # Fetch results
work_choice_model.get()
- mode_df = mode_choice_model.get() # colonnes attendues: from, to, mode, prob
+ mode_df = mode_choice_model.get() # columns: from, to, mode, prob
comparison = work_choice_model.get_comparison()
- # --- Harmoniser les labels de mode (canonisation) ---
+ # Canonicalise les labels de modes
def _canon_mode(label: str) -> str:
s = str(label).strip().lower()
if s in {"bike", "bicycle", "velo", "cycling"}:
@@ -140,44 +154,34 @@ def _canon_mode(label: str) -> str:
if "mode" in mode_df.columns:
mode_df["mode"] = mode_df["mode"].map(_canon_mode)
- # ---- Coûts de déplacement par mode ----
+ # Travel costs by mode
def _get_costs(m, label):
df = m.travel_costs.get().copy()
df["mode"] = label
return df
- costs_list = [
- _get_costs(car, "car"),
- _get_costs(bicycle, "bicycle"),
- ]
+ costs_list = [_get_costs(car, "car"), _get_costs(bicycle, "bicycle")]
if walk is not None:
costs_list.append(_get_costs(walk, "walk"))
travel_costs = pd.concat(costs_list, ignore_index=True)
travel_costs["mode"] = travel_costs["mode"].map(_canon_mode)
- # --- Normaliser les unités ---
- # 1) TEMPS : la lib renvoie des HEURES -> convertir en MINUTES
+ # Normalisation des unités
if "time" in travel_costs.columns:
t_hours = pd.to_numeric(travel_costs["time"], errors="coerce")
travel_costs["time_min"] = t_hours * 60.0
else:
travel_costs["time_min"] = np.nan
- # 2) DISTANCE :
- # - si max > 200 -> probablement des mètres -> /1000 en km
- # - sinon c'est déjà des km
if "distance" in travel_costs.columns:
d_raw = pd.to_numeric(travel_costs["distance"], errors="coerce")
d_max = d_raw.replace([np.inf, -np.inf], np.nan).max()
- if pd.notna(d_max) and d_max > 200:
- travel_costs["dist_km"] = d_raw / 1000.0
- else:
- travel_costs["dist_km"] = d_raw
+ travel_costs["dist_km"] = d_raw / 1000.0 if (pd.notna(d_max) and d_max > 200) else d_raw
else:
travel_costs["dist_km"] = np.nan
- # ---- Jointures d'identifiants zones ----
+ # ID joins
ids = transport_zones.get()[["local_admin_unit_id", "transport_zone_id"]].copy()
ori_dest_counts = (
@@ -189,12 +193,12 @@ def _get_costs(m, label):
ori_dest_counts["flow_volume"] = pd.to_numeric(ori_dest_counts["flow_volume"], errors="coerce").fillna(0.0)
ori_dest_counts = ori_dest_counts[ori_dest_counts["flow_volume"] > 0]
- # Parts modales OD (pondération par proba)
+ # Parts modales OD
modal_shares = mode_df.merge(ori_dest_counts, on=["from", "to"], how="inner")
modal_shares["prob"] = pd.to_numeric(modal_shares["prob"], errors="coerce").fillna(0.0)
modal_shares["flow_volume"] *= modal_shares["prob"]
- # Joindre les coûts par mode (from, to, mode)
+ # Join travel costs
costs_cols = ["from", "to", "mode", "time_min", "dist_km"]
available = [c for c in costs_cols if c in travel_costs.columns]
travel_costs_norm = travel_costs[available].copy()
@@ -205,16 +209,12 @@ def _get_costs(m, label):
# Agrégats par origine ("from")
den = od_mode.groupby("from", as_index=True)["flow_volume"].sum().replace(0, np.nan)
+ num_time = (od_mode["time_min"] * od_mode["flow_volume"]).groupby(od_mode["from"]).sum(min_count=1)
+ num_dist = (od_mode["dist_km"] * od_mode["flow_volume"]).groupby(od_mode["from"]).sum(min_count=1)
- # Temps moyen (minutes) par trajet
- num_time = (od_mode["time_min"] * od_mode["flow_volume"]).groupby(od_mode["from"]).sum(min_count=1)
- avg_time_min = (num_time / den).rename("average_travel_time")
-
- # Distance totale par personne et par jour (sans fréquence explicite -> distance moyenne pondérée)
- num_dist = (od_mode["dist_km"] * od_mode["flow_volume"]).groupby(od_mode["from"]).sum(min_count=1)
- per_person_dist_km = (num_dist / den).rename("total_dist_km")
+ avg_time_min = (num_time / den).rename("average_travel_time")
+ per_person_dist_km = (num_dist / den).rename("total_dist_km")
- # Parts modales par origine (car / bicycle / walk)
mode_flow_by_from = od_mode.pivot_table(
index="from", columns="mode", values="flow_volume", aggfunc="sum", fill_value=0.0
)
@@ -222,11 +222,11 @@ def _get_costs(m, label):
if col not in mode_flow_by_from.columns:
mode_flow_by_from[col] = 0.0
- share_car = (mode_flow_by_from["car"] / den).rename("share_car")
+ share_car = (mode_flow_by_from["car"] / den).rename("share_car")
share_bicycle = (mode_flow_by_from["bicycle"] / den).rename("share_bicycle")
- share_walk = (mode_flow_by_from["walk"] / den).rename("share_walk")
+ share_walk = (mode_flow_by_from["walk"] / den).rename("share_walk")
- # ---- Construction du GeoDataFrame des zones ----
+ # Zones GeoDataFrame
zones = transport_zones.get()[["transport_zone_id", "geometry", "local_admin_unit_id"]].copy()
zones_gdf = gpd.GeoDataFrame(zones, geometry="geometry")
@@ -241,34 +241,45 @@ def _get_costs(m, label):
zones_lookup = gpd.GeoDataFrame(zones[["transport_zone_id", "geometry"]], geometry="geometry", crs=zones_gdf.crs)
flows_df = ori_dest_counts.groupby(["from", "to"], as_index=False)["flow_volume"].sum()
- # Logs utiles (désactiver si trop verbeux)
- try:
- md_modes = sorted(pd.unique(mode_df["mode"]).tolist())
- tc_modes = sorted(pd.unique(travel_costs["mode"]).tolist())
- print("Modes (mode_df):", md_modes)
- print("Modes (travel_costs):", tc_modes)
- print("time_min (min) – min/med/max:",
- np.nanmin(travel_costs["time_min"]),
- np.nanmedian(travel_costs["time_min"]),
- np.nanmax(travel_costs["time_min"]))
- print("dist_km (km) – min/med/max:",
- np.nanmin(travel_costs["dist_km"]),
- np.nanmedian(travel_costs["dist_km"]),
- np.nanmax(travel_costs["dist_km"]))
- except Exception:
- pass
-
+ # Log utile
print(
- f"SCENARIO_META: source=mobility zones={len(zones_gdf)} "
- f"flows={len(flows_df)} time_unit=minutes distance_unit=kilometers"
+ f"SCENARIO_META: source=mobility lau={lau_norm} radius={radius} "
+ f"zones={len(zones_gdf)} flows={len(flows_df)} time_unit=minutes distance_unit=kilometers"
)
- return {
- "zones_gdf": zones_gdf, # average_travel_time, total_dist_km, share_car, share_bicycle, share_walk, local_admin_unit_id
- "flows_df": flows_df,
- "zones_lookup": _to_wgs84(zones_lookup),
- }
+ return {"zones_gdf": zones_gdf, "flows_df": flows_df, "zones_lookup": _to_wgs84(zones_lookup)}
except Exception as e:
print(f"[Fallback used due to error: {e}]")
return _fallback_scenario()
+
+
+# --------------------------------------------------------------------
+# Public API with LRU cache
+# --------------------------------------------------------------------
+def _normalized_key(local_admin_unit_id: str, radius: float) -> Tuple[str, float]:
+ """
+ Normalise la clé de cache :
+ - INSEE/LAU -> 'fr-XXXXX'
+ - radius arrondi (évite 40.0000001 vs 40.0)
+ """
+ lau = _normalize_lau_code(local_admin_unit_id)
+ rad = round(float(radius), 4)
+ return (lau, rad)
+
+
+@lru_cache(maxsize=8)
+def get_scenario(local_admin_unit_id: str = "31555", radius: float = 40.0) -> Dict[str, Any]:
+ """
+ Récupère un scénario avec cache LRU (jusqu’à 8 combinaisons récentes).
+ - Utilise (local_admin_unit_id, radius) normalisés.
+ - Retourne { zones_gdf, flows_df, zones_lookup } en WGS84.
+ """
+ lau, rad = _normalized_key(local_admin_unit_id, radius)
+ # On passe les normalisés à la compute pour cohérence des logs et appels.
+ return _compute_scenario(local_admin_unit_id=lau, radius=rad)
+
+
+def clear_scenario_cache() -> None:
+ """Vide le cache LRU (utile si les données sous-jacentes changent)."""
+ get_scenario.cache_clear()
diff --git a/mobility/choice_models/population_trips.py b/mobility/choice_models/population_trips.py
index e624277..bf2a9bb 100644
--- a/mobility/choice_models/population_trips.py
+++ b/mobility/choice_models/population_trips.py
@@ -675,4 +675,4 @@ def get_prominent_cities(self, n_cities=20, n_levels=3, distance_km=2):
xy_coords = geoflows["geometry"].centroid.get_coordinates()
geoflows = geoflows.merge(xy_coords, left_index=True, right_index=True)
- return geoflows
\ No newline at end of file
+ return geoflows
diff --git a/mobility/choice_models/results.py b/mobility/choice_models/results.py
index 90d8b7a..9ae0c57 100644
--- a/mobility/choice_models/results.py
+++ b/mobility/choice_models/results.py
@@ -8,6 +8,7 @@
from mobility.choice_models.evaluation.car_traffic_evaluation import CarTrafficEvaluation
from mobility.choice_models.evaluation.routing_evaluation import RoutingEvaluation
+
class Results:
def __init__(
@@ -806,7 +807,3 @@ def travel_costs(self, *args, **kwargs):
def routing(self, *args, **kwargs):
return RoutingEvaluation(self).get(*args, **kwargs)
-
-
-
-
diff --git a/mobility/choice_models/state_updater.py b/mobility/choice_models/state_updater.py
index 0f9566c..902ba7b 100644
--- a/mobility/choice_models/state_updater.py
+++ b/mobility/choice_models/state_updater.py
@@ -95,29 +95,7 @@ def get_possible_states_steps(
min_activity_time_constant,
tmp_folders
):
- """Enumerate candidate state steps and compute per-step utilities.
-
- Joins latest spatialized chains and mode sequences, merges costs and
- mean activity durations, filters out saturated destinations, and
- computes per-step utility = activity utility − travel cost.
-
- Args:
- current_states (pl.DataFrame): Current aggregate states (used for scoping).
- demand_groups (pl.DataFrame): Demand groups with csp and sizes.
- chains (pl.DataFrame): Chain steps with durations per person.
- costs_aggregator (TravelCostsAggregator): Per-mode OD costs.
- sinks (pl.DataFrame): Remaining sinks per (motive,to).
- motive_dur (pl.DataFrame): Mean durations per (csp,motive).
- iteration (int): Current iteration to pick latest artifacts.
- activity_utility_coeff (float): Coefficient for activity utility.
- tmp_folders (dict[str, pathlib.Path]): Must contain "spatialized-chains" and "modes".
-
- Returns:
- pl.DataFrame: Candidate per-step rows with columns including
- ["demand_group_id","csp","motive_seq_id","dest_seq_id","mode_seq_id",
- "seq_step_index","motive","from","to","mode",
- "duration_per_pers","utility"].
- """
+ """Enumerate candidate state steps and compute per-step utilities."""
cost_by_od_and_modes = (
@@ -197,21 +175,7 @@ def get_possible_states_utility(
stay_home_state,
min_activity_time_constant
):
- """Aggregate per-step utilities to state-level utilities (incl. home-night).
-
- Sums step utilities per state, adds home-night utility, prunes dominated
- states, and appends the explicit stay-home baseline.
-
- Args:
- possible_states_steps (pl.DataFrame): Candidate step rows with per-step utility.
- home_night_dur (pl.DataFrame): Mean home-night duration by csp.
- stay_home_utility_coeff (float): Coefficient for home-night utility.
- stay_home_state (pl.DataFrame): Baseline state rows to append.
-
- Returns:
- pl.DataFrame: State-level utilities with
- ["demand_group_id","motive_seq_id","mode_seq_id","dest_seq_id","utility"].
- """
+ """Aggregate per-step utilities to state-level utilities (incl. home-night)."""
possible_states_utility = (
@@ -263,21 +227,7 @@ def get_possible_states_utility(
def get_transition_probabilities(self, current_states, possible_states_utility):
- """Compute transition probabilities from current to candidate states.
-
- Uses softmax over Δutility (with stabilization and pruning) within each
- demand group and current state key.
-
- Args:
- current_states (pl.DataFrame): Current states with utilities.
- possible_states_utility (pl.DataFrame): Candidate states with utilities.
-
- Returns:
- pl.DataFrame: Transitions with
- ["demand_group_id","motive_seq_id","dest_seq_id","mode_seq_id",
- "motive_seq_id_trans","dest_seq_id_trans","mode_seq_id_trans",
- "utility_trans","p_transition"].
- """
+ """Compute transition probabilities from current to candidate states."""
state_cols = ["demand_group_id", "motive_seq_id", "dest_seq_id", "mode_seq_id"]
@@ -347,21 +297,7 @@ def get_transition_probabilities(self, current_states, possible_states_utility):
def apply_transitions(self, current_states, transition_probabilities):
- """Apply transition probabilities to reweight populations and update states.
-
- Left-joins transitions onto current states, defaults to self-transition
- when absent, redistributes `n_persons` by `p_transition`, and aggregates
- by the new state keys.
-
- Args:
- current_states (pl.DataFrame): Current states with ["n_persons","utility"].
- transition_probabilities (pl.DataFrame): Probabilities produced by
- `get_transition_probabilities`.
-
- Returns:
- pl.DataFrame: Updated `current_states` aggregated by
- ["demand_group_id","motive_seq_id","dest_seq_id","mode_seq_id"].
- """
+ """Apply transition probabilities to reweight populations and update states."""
state_cols = ["demand_group_id", "motive_seq_id", "dest_seq_id", "mode_seq_id"]
@@ -392,20 +328,7 @@ def apply_transitions(self, current_states, transition_probabilities):
def get_current_states_steps(self, current_states, possible_states_steps):
- """Expand aggregate states to per-step rows (flows).
-
- Joins selected states back to their step sequences and converts
- per-person durations to aggregate durations.
-
- Args:
- current_states (pl.DataFrame): Updated aggregate states.
- possible_states_steps (pl.DataFrame): Candidate steps universe.
-
- Returns:
- pl.DataFrame: Per-step flows with columns including
- ["demand_group_id","motive_seq_id","dest_seq_id","mode_seq_id",
- "seq_step_index","motive","from","to","mode","n_persons","duration"].
- """
+ """Expand aggregate states to per-step rows (flows)."""
current_states_steps = (
current_states.lazy()
@@ -430,27 +353,10 @@ def get_current_states_steps(self, current_states, possible_states_steps):
return current_states_steps
-
-
def get_new_costs(self, costs, iteration, n_iter_per_cost_update, current_states_steps, costs_aggregator):
- """Optionally recompute congested costs from current flows.
-
- Aggregates OD flows by mode, updates network/user-equilibrium in the
- `costs_aggregator`, and returns refreshed costs when the cadence matches.
-
- Args:
- costs (pl.DataFrame): Current OD costs.
- iteration (int): Current iteration (1-based).
- n_iter_per_cost_update (int): Update cadence; 0 disables updates.
- current_states_steps (pl.DataFrame): Step-level flows (by mode).
- costs_aggregator (TravelCostsAggregator): Cost updater.
-
- Returns:
- pl.DataFrame: Updated OD costs (or original if no update ran).
- """
+ """Optionally recompute congested costs from current flows."""
if n_iter_per_cost_update > 0 and (iteration-1) % n_iter_per_cost_update == 0:
-
logging.info("Updating costs...")
od_flows_by_mode = (
@@ -469,24 +375,10 @@ def get_new_costs(self, costs, iteration, n_iter_per_cost_update, current_states
def get_new_sinks(self, current_states_steps, sinks):
- """Recompute remaining opportunities per (motive, destination).
-
- Subtracts assigned durations from capacities, computes availability and a
- saturation utility factor.
-
- Args:
- current_states_steps (pl.DataFrame): Step-level assigned durations.
- sinks (pl.DataFrame): Initial capacities per (motive,to).
-
- Returns:
- pl.DataFrame: Updated sinks with
- ["motive","to","sink_capacity","sink_available","k_saturation_utility"].
- """
+ """Recompute remaining opportunities per (motive, destination)."""
logging.info("Computing remaining opportunities at destinations...")
- # Compute the remaining number of opportunities by motive and destination
- # once assigned flows are accounted for
remaining_sinks = (
current_states_steps
@@ -517,4 +409,4 @@ def get_new_sinks(self, current_states_steps, sinks):
)
- return remaining_sinks
\ No newline at end of file
+ return remaining_sinks
diff --git a/pyproject.toml b/pyproject.toml
index f5550bd..91e7d98 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,7 +25,10 @@ dependencies = [
"psutil",
"networkx",
"plotly",
- "scikit-learn"
+ "dash",
+ "dash-deck",
+ "pydeck",
+ "dash-mantine-components"
]
requires-python = ">=3.11"
@@ -50,8 +53,9 @@ dev = [
"pytest",
"pytest-cov",
"pytest-dependency",
+ "dash[testing]",
"sphinxcontrib-napoleon",
- "myst_parser"
+ "myst_parser"
]
spyder = [
@@ -76,4 +80,11 @@ mobility = [
[tool.setuptools.packages.find]
where = ["."]
include = ["mobility*"]
-exclude = ["certs", "certs.*"]
\ No newline at end of file
+exclude = ["certs", "certs.*"]
+
+[tool.pytest.ini_options]
+pythonpath = [
+ ".",
+ "front",
+ "mobility"
+]
diff --git a/tests/front/conftest.py b/tests/front/conftest.py
new file mode 100644
index 0000000..63f926e
--- /dev/null
+++ b/tests/front/conftest.py
@@ -0,0 +1,63 @@
+# tests/front/conftest.py
+import pytest
+import pandas as pd
+import geopandas as gpd
+from shapely.geometry import Polygon
+import sys
+from pathlib import Path
+
+REPO_ROOT = Path(__file__).resolve().parents[2] # -> repository root
+FRONT_DIR = REPO_ROOT / "front"
+if str(FRONT_DIR) not in sys.path:
+ sys.path.insert(0, str(FRONT_DIR))
+
+@pytest.fixture
+def sample_scn():
+ poly = Polygon([
+ (1.43, 43.60), (1.45, 43.60),
+ (1.45, 43.62), (1.43, 43.62),
+ (1.43, 43.60)
+ ])
+
+ zones_gdf = gpd.GeoDataFrame(
+ {
+ "transport_zone_id": ["Z1"],
+ "local_admin_unit_id": ["31555"],
+ "average_travel_time": [32.4],
+ "total_dist_km": [7.8],
+ "total_time_min": [45.0],
+ "share_car": [0.52],
+ "share_bicycle": [0.18],
+ "share_walk": [0.30],
+ "geometry": [poly],
+ },
+ crs="EPSG:4326",
+ )
+
+ flows_df = pd.DataFrame({"from": [], "to": [], "flow_volume": []})
+ zones_lookup = zones_gdf[["transport_zone_id", "geometry"]].copy()
+
+ return {"zones_gdf": zones_gdf, "flows_df": flows_df, "zones_lookup": zones_lookup}
+
+@pytest.fixture(autouse=True)
+def patch_services(monkeypatch, sample_scn):
+ # Patch le service scénario pour les tests d’intégration
+ import front.app.services.scenario_service as scn_mod
+ def fake_get_scenario(radius=40, local_admin_unit_id="31555"):
+ return sample_scn
+ monkeypatch.setattr(scn_mod, "get_scenario", fake_get_scenario, raising=True)
+
+ # Patch map_service option B (si présent)
+ try:
+ import front.app.services.map_service as map_service
+ from app.components.features.map.config import DeckOptions
+ from app.components.features.map.deck_factory import make_deck_json
+ def fake_get_map_deck_json_from_scn(scn, opts=None):
+ opts = opts or DeckOptions()
+ return make_deck_json(scn, opts)
+ monkeypatch.setattr(map_service, "get_map_deck_json_from_scn",
+ fake_get_map_deck_json_from_scn, raising=False)
+ except Exception:
+ pass
+
+ yield
diff --git a/tests/front/integration/test_001_main_app.py b/tests/front/integration/test_001_main_app.py
new file mode 100644
index 0000000..ba00e8c
--- /dev/null
+++ b/tests/front/integration/test_001_main_app.py
@@ -0,0 +1,76 @@
+import json
+import pandas as pd
+import geopandas as gpd
+from shapely.geometry import Polygon
+from dash.development.base_component import Component
+
+# On importe les briques "front" utilisées par le callback
+import front.app.services.scenario_service as scn_mod
+from app.components.features.map.config import DeckOptions
+from app.components.features.map.deck_factory import make_deck_json
+from app.components.features.study_area_summary import StudyAreaSummary
+
+MAPP = "map" # doit matcher l'id_prefix de la Map
+
+def compute_simulation_outputs_test(radius_val, lau_val, id_prefix=MAPP):
+ """
+ Helper local au test : reproduit la logique du callback _run_simulation
+ sans nécessiter Selenium / dash_duo.
+ """
+ r = 40 if radius_val is None else int(radius_val)
+ lau = (lau_val or "").strip() or "31555"
+ scn = scn_mod.get_scenario(radius=r, local_admin_unit_id=lau)
+ deck_json = make_deck_json(scn, DeckOptions())
+ summary = StudyAreaSummary(scn["zones_gdf"], visible=True, id_prefix=id_prefix)
+ return deck_json, summary
+
+
+def test_compute_simulation_outputs_smoke(monkeypatch):
+ # --- 1) scénario stable via monkeypatch ---
+ poly = Polygon([
+ (1.43, 43.60), (1.45, 43.60),
+ (1.45, 43.62), (1.43, 43.62),
+ (1.43, 43.60),
+ ])
+
+ zones_gdf = gpd.GeoDataFrame(
+ {
+ "transport_zone_id": ["Z1"],
+ "local_admin_unit_id": ["31555"],
+ "average_travel_time": [32.4],
+ "total_dist_km": [7.8],
+ "total_time_min": [45.0],
+ "share_car": [0.52],
+ "share_bicycle": [0.18],
+ "share_walk": [0.30],
+ "geometry": [poly],
+ },
+ crs="EPSG:4326",
+ )
+ flows_df = pd.DataFrame({"from": [], "to": [], "flow_volume": []})
+ zones_lookup = zones_gdf[["transport_zone_id", "geometry"]].copy()
+
+ def fake_get_scenario(radius=40, local_admin_unit_id="31555"):
+ return {
+ "zones_gdf": zones_gdf,
+ "flows_df": flows_df,
+ "zones_lookup": zones_lookup,
+ }
+
+ monkeypatch.setattr(scn_mod, "get_scenario", fake_get_scenario, raising=True)
+
+ # --- 2) exécute la logique "callback-like" ---
+ deck_json, summary = compute_simulation_outputs_test(30, "31555", id_prefix=MAPP)
+
+ # --- 3) assertions : deck_json DeckGL valide ---
+ assert isinstance(deck_json, str)
+ deck = json.loads(deck_json)
+ assert "initialViewState" in deck
+ assert isinstance(deck.get("layers", []), list)
+
+ # --- 4) assertions : summary est un composant Dash sérialisable ---
+ assert isinstance(summary, Component)
+ payload = summary.to_plotly_json()
+ assert isinstance(payload, dict)
+ # On peut vérifier l'ID racine utilisé dans StudyAreaSummary
+ assert payload.get("props", {}).get("id", "").endswith("-study-summary")
diff --git a/tests/front/unit/main/test_000_import_main.py b/tests/front/unit/main/test_000_import_main.py
new file mode 100644
index 0000000..0516adb
--- /dev/null
+++ b/tests/front/unit/main/test_000_import_main.py
@@ -0,0 +1,5 @@
+from front.app.pages.main import main
+
+def test_import_main_and_create_app():
+ app = main.create_app()
+ assert app is not None
diff --git a/tests/front/unit/main/test_004_main_import_branches.py b/tests/front/unit/main/test_004_main_import_branches.py
new file mode 100644
index 0000000..dd5a441
--- /dev/null
+++ b/tests/front/unit/main/test_004_main_import_branches.py
@@ -0,0 +1,56 @@
+import importlib
+import builtins
+import types
+import sys
+
+def test_main_uses_service_branch(monkeypatch):
+ # S'assure que le module service existe
+ mod = types.ModuleType("front.app.services.map_service")
+ def fake_get_map_deck_json_from_scn(scn, opts=None):
+ return "__deck_from_service__"
+ mod.get_map_deck_json_from_scn = fake_get_map_deck_json_from_scn
+
+ # Enregistre la hiérarchie dans sys.modules (si besoin)
+ sys.modules.setdefault("front", types.ModuleType("front"))
+ sys.modules.setdefault("front.app", types.ModuleType("front.app"))
+ sys.modules.setdefault("front.app.services", types.ModuleType("front.app.services"))
+ sys.modules["front.app.services.map_service"] = mod
+
+ from front.app.pages.main import main
+ importlib.reload(main)
+
+ assert main.USE_MAP_SERVICE is True
+ out = main._make_deck_json_from_scn({"k": "v"})
+ assert out == "__deck_from_service__"
+
+
+def test_main_uses_fallback_branch(monkeypatch):
+ # Force l'import de map_service à échouer pendant le reload
+ import front.app.pages.main.main as main
+ importlib.reload(main) # recharge une base propre
+
+ real_import = builtins.__import__
+ def fake_import(name, *a, **kw):
+ if name == "front.app.services.map_service":
+ raise ImportError("Simulated ImportError for test")
+ return real_import(name, *a, **kw)
+
+ monkeypatch.setattr(builtins, "__import__", fake_import)
+ importlib.reload(main)
+
+ assert main.USE_MAP_SERVICE is False
+
+ # On monkeypatch la fabrique fallback pour éviter de dépendre du geo
+ called = {}
+ def fake_make_deck_json(scn, opts):
+ called["ok"] = True
+ return "__deck_from_fallback__"
+
+ monkeypatch.setattr(
+ "front.app.pages.main.main.make_deck_json",
+ fake_make_deck_json,
+ raising=True,
+ )
+ out = main._make_deck_json_from_scn({"k": "v"})
+ assert out == "__deck_from_fallback__"
+ assert called.get("ok") is True
diff --git a/tests/front/unit/main/test_cover_sync_slider_from_input.py b/tests/front/unit/main/test_cover_sync_slider_from_input.py
new file mode 100644
index 0000000..472e385
--- /dev/null
+++ b/tests/front/unit/main/test_cover_sync_slider_from_input.py
@@ -0,0 +1,35 @@
+import importlib
+import dash
+from dash import no_update
+import front.app.pages.main.main as main
+
+
+def test_cover_sync_slider_from_input(monkeypatch):
+ captured = [] # (outputs_tuple, kwargs, func)
+
+ real_callback = dash.Dash.callback
+
+ def recording_callback(self, *outputs, **kwargs):
+ def decorator(func):
+ captured.append((outputs, kwargs, func))
+ return func # important: laisser Dash enregistrer la même fonction
+ return decorator
+
+ # 1) Capturer l’enregistrement des callbacks pendant create_app()
+ monkeypatch.setattr(dash.Dash, "callback", recording_callback, raising=True)
+ importlib.reload(main)
+ app = main.create_app()
+ monkeypatch.setattr(dash.Dash, "callback", real_callback, raising=True)
+
+ # 2) Retrouver directement la fonction par son nom
+ target = None
+ for _outs, _kw, func in captured:
+ if getattr(func, "__name__", "") == "_sync_slider_from_input":
+ target = func
+ break
+
+ assert target is not None, "Callback _sync_slider_from_input introuvable"
+
+ assert target(None, 10) is no_update # branche input_val is None
+ assert target(10, 10) is no_update # branche input_val == current_slider
+ assert target(8, 10) == 8 # branche return input_val
diff --git a/tests/front/unit/main/test_main_callbacks_sync_via_decorator_capture.py b/tests/front/unit/main/test_main_callbacks_sync_via_decorator_capture.py
new file mode 100644
index 0000000..8ea9f9d
--- /dev/null
+++ b/tests/front/unit/main/test_main_callbacks_sync_via_decorator_capture.py
@@ -0,0 +1,130 @@
+import importlib
+import json
+from typing import List, Tuple
+
+import pandas as pd
+import geopandas as gpd
+from shapely.geometry import Polygon
+import dash
+from dash import no_update
+from dash.development.base_component import Component
+
+import front.app.pages.main.main as main
+
+
+def _output_pairs(outputs_obj) -> List[Tuple[str, str]]:
+ pairs: List[Tuple[str, str]] = []
+
+
+ for out in outputs_obj:
+ cid = getattr(out, "component_id", None)
+ if cid is None and hasattr(out, "get"):
+ cid = out.get("id")
+ prop = getattr(out, "component_property", None)
+ if prop is None and hasattr(out, "get"):
+ prop = out.get("property")
+ if cid is not None and prop is not None:
+ pairs.append((cid, prop))
+ return pairs
+
+
+def _find_callback(captured, want_pairs: List[Tuple[str, str]]):
+
+ want = set(want_pairs)
+ for outputs_obj, _kwargs, func in captured:
+ outs = set(_output_pairs(outputs_obj))
+ if want.issubset(outs):
+ return func
+ raise AssertionError(f"Callback not found for outputs {want_pairs}")
+
+
+def test_callbacks_via_decorator_capture(monkeypatch):
+ captured = [] # list of tuples: (outputs_obj, kwargs, func)
+
+ # Wrap Dash.callback to record every callback registration
+ real_callback = dash.Dash.callback
+
+ def recording_callback(self, *outputs, **kwargs):
+ def decorator(func):
+ captured.append((outputs, kwargs, func))
+ return func # important: return the original for Dash
+ return decorator
+
+ monkeypatch.setattr(dash.Dash, "callback", recording_callback, raising=True)
+
+ # Reload module & build app (this registers all callbacks and gets captured)
+ importlib.reload(main)
+ app = main.create_app()
+
+ # Restore the original callback (optional hygiene)
+ monkeypatch.setattr(dash.Dash, "callback", real_callback, raising=True)
+
+ # -------- find the 3 callbacks by their outputs --------
+ cb_slider_to_input = _find_callback(
+ captured, [(f"{main.MAPP}-radius-input", "value")]
+ )
+ cb_input_to_slider = _find_callback(
+ captured, [(f"{main.MAPP}-radius-slider", "value")]
+ )
+ cb_run_sim = _find_callback(
+ captured,
+ [
+ (f"{main.MAPP}-deck-map", "data"),
+ (f"{main.MAPP}-summary-wrapper", "children"),
+ ],
+ )
+
+ # -------- test sync callbacks --------
+ # slider -> input: args order = [Inputs..., States...]
+ assert cb_slider_to_input(40, 40) is no_update
+ assert cb_slider_to_input(42, 40) == 42
+
+ # input -> slider
+ assert cb_input_to_slider(40, 40) is no_update
+ assert cb_input_to_slider(38, 40) == 38
+
+ # -------- success path for run simulation --------
+ poly = Polygon([(1.43, 43.60), (1.45, 43.60), (1.45, 43.62), (1.43, 43.62), (1.43, 43.60)])
+ zones_gdf = gpd.GeoDataFrame(
+ {
+ "transport_zone_id": ["Z1"],
+ "local_admin_unit_id": ["31555"],
+ "average_travel_time": [32.4],
+ "total_dist_km": [7.8],
+ "total_time_min": [45.0],
+ "share_car": [0.52],
+ "share_bicycle": [0.18],
+ "share_walk": [0.30],
+ "geometry": [poly],
+ },
+ crs="EPSG:4326",
+ )
+ flows_df = pd.DataFrame({"from": [], "to": [], "flow_volume": []})
+ zones_lookup = zones_gdf[["transport_zone_id", "geometry"]].copy()
+
+ def fake_get_scenario(radius=40, local_admin_unit_id="31555"):
+ return {"zones_gdf": zones_gdf, "flows_df": flows_df, "zones_lookup": zones_lookup}
+
+ monkeypatch.setattr("front.app.pages.main.main.get_scenario", fake_get_scenario, raising=True)
+
+ def fake_make(scn):
+ return json.dumps({"initialViewState": {}, "layers": []})
+ monkeypatch.setattr("front.app.pages.main.main._make_deck_json_from_scn", fake_make, raising=True)
+
+ deck_json, summary = cb_run_sim(1, 30, "31555")
+ assert isinstance(deck_json, str)
+ parsed = json.loads(deck_json)
+ assert "initialViewState" in parsed and "layers" in parsed
+ assert isinstance(summary, Component)
+ props_id = summary.to_plotly_json().get("props", {}).get("id", "")
+ assert props_id.endswith("-study-summary")
+
+ # -------- error path for run simulation --------
+ def boom_get_scenario(*a, **k):
+ raise RuntimeError("boom")
+
+ monkeypatch.setattr("front.app.pages.main.main.get_scenario", boom_get_scenario, raising=True)
+
+ deck_json2, panel = cb_run_sim(1, 40, "31555")
+ assert deck_json2 is no_update
+ assert isinstance(panel, Component)
diff --git a/tests/front/unit/test_002_color_scale.py b/tests/front/unit/test_002_color_scale.py
new file mode 100644
index 0000000..32aa4b4
--- /dev/null
+++ b/tests/front/unit/test_002_color_scale.py
@@ -0,0 +1,19 @@
+import pandas as pd
+from app.components.features.map.color_scale import fit_color_scale
+
+def test_fit_color_scale_basic():
+ s = pd.Series([10.0, 20.0, 25.0, 30.0])
+ scale = fit_color_scale(s)
+ assert scale.vmin == 10.0
+ assert scale.vmax == 30.0
+
+ # Couleur à vmin ~ froide, à vmax ~ chaude
+ c_min = scale.rgba(10.0)
+ c_mid = scale.rgba(20.0)
+ c_max = scale.rgba(30.0)
+ assert isinstance(c_min, list) and len(c_min) == 4
+ assert c_min[0] < c_max[0] # rouge augmente
+ assert c_min[2] > c_max[2] # bleu diminue
+
+ # Légende cohérente
+ assert scale.legend(11.0) in {"Accès rapide", "Accès moyen", "Accès lent"}
diff --git a/tests/front/unit/test_003_geo_utils.py b/tests/front/unit/test_003_geo_utils.py
new file mode 100644
index 0000000..20bdad8
--- /dev/null
+++ b/tests/front/unit/test_003_geo_utils.py
@@ -0,0 +1,24 @@
+import geopandas as gpd
+from shapely.geometry import Polygon
+from app.components.features.map.geo_utils import safe_center, ensure_wgs84
+
+def test_safe_center_simple_square():
+ gdf = gpd.GeoDataFrame(
+ {"id": [1]},
+ geometry=[Polygon([(0,0),(1,0),(1,1),(0,1),(0,0)])],
+ crs="EPSG:4326",
+ )
+ center = safe_center(gdf)
+ assert isinstance(center, tuple) and len(center) == 2
+ lon, lat = center
+ assert 0.4 < lon < 0.6
+ assert 0.4 < lat < 0.6
+
+def test_ensure_wgs84_no_crs():
+ gdf = gpd.GeoDataFrame(
+ {"id": [1]},
+ geometry=[Polygon([(0,0),(1,0),(1,1),(0,1),(0,0)])],
+ crs=None,
+ )
+ out = ensure_wgs84(gdf)
+ assert out.crs is not None
\ No newline at end of file