diff --git a/.gitignore b/.gitignore index 43cf552..b83760f 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,6 @@ coverage.xml /sim_data/*.png /sim_data/mc_results/*.log /sim_data/mc_results/*.png +/sim_data/mc_results_1000km +/sim_data/mc_results_2000km +/sim_data/comparison diff --git a/README.md b/README.md index 3068bc8..8cd6069 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,14 @@ The project is currently at TRL 0. The PoISE consensus mechanism is in the early ## Related Publications +* [B. Probert, R. A. Clark, E. Blasch, and M. Macdonald, “Cooperative Orbit Determination for Trusted, Autonomous, and Decentralised Satellite Operations,” in AIAA SCITECH 2026 Forum, in AIAA SciTech Forum. Orlando, Florida: American Institute of Aeronautics and Astronautics, Jan. 2026. doi: 10.2514/6.2026-0825](https://arc.aiaa.org/doi/10.2514/6.2026-0825) + * [B. Probert, R. A. Clark, E. Blasch, and M. Macdonald, “A Review of Distributed Ledger Technologies for Satellite Operations,” IEEE Access, vol. 13, pp. 123230–123258, 2025, doi: 10.1109/ACCESS.2025.3588688](https://ieeexplore.ieee.org/document/11079570) +## Citation +If you use this work, please cite it as: +> B. Probert, bprobert97/accord: v2.2. (Feb. 25, 2026). Python. University of Strathclyde, Glasgow. [DOI: 10.5281/zenodo.18776049](https://doi.org/10.5281/zenodo.18776049) + # Repository Layout @@ -34,6 +40,7 @@ The project is currently at TRL 0. The PoISE consensus mechanism is in the early │ └── dag.py # Code for the Directed Acyclic Graph ledger structure │ └── filter.py # Code for the orbit determination calculations │ └── logger.py # Code for the app logger +│ └── mc_comparison.py # Code for generating comparison plots for different Monte Carlo data sets | └── plotting.py # Code for plotting simulation results │ └── reputation.py # Code for the satellite reputation manager │ └── satellite_node.py # Code representing a satellite in the network diff --git a/src/mc_comparison.py b/src/mc_comparison.py new file mode 100644 index 0000000..4215d22 --- /dev/null +++ b/src/mc_comparison.py @@ -0,0 +1,220 @@ +# pylint: disable=protected-access, too-many-locals, too-many-statements, too-many-arguments, too-many-positional-arguments, broad-exception-caught, duplicate-code +""" +The Autonomous Cooperative Consensus Orbit Determination (ACCORD) framework. +Author: Beth Probert +Email: beth.probert@strath.ac.uk + +Copyright (C) 2025 Applied Space Technology Laboratory + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +""" +import os +from typing import Dict, List, Any +import numpy as np +import matplotlib.pyplot as plt + +# Paths +PATH_100KM = "sim_data/mc_results/mc_results.npz" +PATH_2000KM = "sim_data/mc_results_2000km/mc_results.npz" +OUTPUT_DIR = "sim_data/comparison" + +def load_results(path: str) -> List[Dict[str, Any]]: + """ + Loads Monte Carlo results from a compressed .npz file. + + Args: + path: The file path to the .npz results. + + Returns: + A list of result dictionaries, excluding any failed runs (None). + """ + if not os.path.exists(path): + print(f"Warning: File not found at {path}") + return [] + try: + with np.load(path, allow_pickle=True) as data: + results = list(data['results']) + return [res for res in results if res is not None] + except Exception as e: + print(f"Error loading {path}: {e}") + return [] + +def plot_reputation_comparison(results_100km: List[Dict[str, Any]], + results_2000km: List[Dict[str, Any]]) -> None: + """ + Plots reputation history for both datasets on the same graph. + """ + plt.figure(figsize=(12, 7)) + + def get_aggregated_reps(results): + honest_means = [] + faulty_means = [] + for kpi in results: + honest_means.append(np.mean(kpi["honest_matrix"], axis=0)) + faulty_means.append(np.mean(kpi["faulty_matrix"], axis=0)) + return np.array(honest_means), np.array(faulty_means) + + if results_100km: + h_100, f_100 = get_aggregated_reps(results_100km) + steps_100 = np.arange(h_100.shape[1]) + + h_mean_100 = np.mean(h_100, axis=0) + h_std_100 = np.std(h_100, axis=0) + plt.plot(steps_100, h_mean_100, color="green", label="Honest (100km ISL)") + plt.fill_between(steps_100, h_mean_100 - h_std_100, + h_mean_100 + h_std_100, color="green", alpha=0.1) + + f_mean_100 = np.mean(f_100, axis=0) + f_std_100 = np.std(f_100, axis=0) + plt.plot(steps_100, f_mean_100, color="red", label="Faulty (100km ISL)") + plt.fill_between(steps_100, f_mean_100 - f_std_100, + f_mean_100 + f_std_100, color="red", alpha=0.1) + + if results_2000km: + h_2000, f_2000 = get_aggregated_reps(results_2000km) + steps_2000 = np.arange(h_2000.shape[1]) + + h_mean_2000 = np.mean(h_2000, axis=0) + h_std_2000 = np.std(h_2000, axis=0) + plt.plot(steps_2000, h_mean_2000, color="green", + linestyle="--", label="Honest (2000km ISL)") + plt.fill_between(steps_2000, h_mean_2000 - h_std_2000, + h_mean_2000 + h_std_2000, color="green", alpha=0.1) + + f_mean_2000 = np.mean(f_2000, axis=0) + f_std_2000 = np.std(f_2000, axis=0) + plt.plot(steps_2000, f_mean_2000, color="red", linestyle="--", + label="Faulty (2000km ISL)") + plt.fill_between(steps_2000, f_mean_2000 - f_std_2000, + f_mean_2000 + f_std_2000, color="red", alpha=0.1) + + plt.axhline(0.5, color="gray", linestyle=":", label="Neutral") + plt.xlabel("Timestep [-]", fontsize=14) + plt.ylabel("Reputation [-]", fontsize=14) + plt.legend(loc='upper left') + plt.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(os.path.join(OUTPUT_DIR, "reputation_comparison.png")) + plt.show() + +def plot_kpi_comparison(results_100km: List[Dict[str, Any]], + results_2000km: List[Dict[str, Any]]) -> None: + """ + Plots a bar chart comparison of key KPIs, including TTD as a percentage of runtime. + """ + metrics = ["recall", "precision", "fpr"] + labels = ["Recall", "Precision", "False Positive Rate", "Normalised TTD"] + + # Calculate means for standard metrics + means_100 = [np.mean([k[m] for k in results_100km]) for m in metrics] + means_2000 = [np.mean([k[m] for k in results_2000km]) for m in metrics] + + # Calculate Normalised TTD (as % of total steps) + def get_ttd_percent(results): + ttd_pcts = [] + for k in results: + if k.get("avg_ttd") is not None: + total_steps = k["honest_matrix"].shape[1] + ttd_pcts.append((k["avg_ttd"] / total_steps) * 100) + return np.mean(ttd_pcts) if ttd_pcts else 0 + + means_100.append(get_ttd_percent(results_100km)) + means_2000.append(get_ttd_percent(results_2000km)) + + x = np.arange(len(labels)) + width = 0.35 + + _, ax = plt.subplots(figsize=(12, 6)) + ax.bar(x - width/2, means_100, width, label='100km ISL', color='skyblue') + ax.bar(x + width/2, means_2000, width, label='2000km ISL', color='salmon') + + ax.set_ylabel('Percentage [%]') + ax.set_xticks(x) + ax.set_xticklabels(labels) + ax.legend() + ax.grid(axis='y', alpha=0.3) + ax.set_ylim(0, 105) # Metrics are percentages + + plt.tight_layout() + plt.savefig(os.path.join(OUTPUT_DIR, "kpi_percentage_comparison.png")) + plt.show() + + # TTD Comparison + ttds_100 = [float(k.get("avg_ttd", 0)) for k in results_100km if k.get("avg_ttd") is not None] + ttds_2000 = [float(k.get("avg_ttd", 0)) for k in results_2000km if k.get("avg_ttd") is not None] + + if ttds_100 or ttds_2000: + plt.figure(figsize=(6, 6)) + data_to_plot = [] + labels_ttd = [] + if ttds_100: + data_to_plot.append(ttds_100) + labels_ttd.append("100km ISL") + if ttds_2000: + data_to_plot.append(ttds_2000) + labels_ttd.append("2000km ISL") + + # Reduce gap by setting positions closer and increasing widths + positions = [1, 1.5] + plt.boxplot(data_to_plot, tick_labels=labels_ttd, + positions=positions[:len(data_to_plot)], widths=0.35) + plt.xlim(0.5, 2.0) + plt.ylabel("Time to Detection [Timesteps]") + plt.grid(axis='y', alpha=0.3) + plt.tight_layout() + plt.savefig(os.path.join(OUTPUT_DIR, "ttd_comparison.png")) + plt.show() + +def main(): + """ + Main entry point for the comparison script. + Loads results for both 100km and 2000km ISL ranges, generates + comparison plots, and prints a summary to the console. + """ + os.makedirs(OUTPUT_DIR, exist_ok=True) + + print("Loading 100km results...") + res_100 = load_results(PATH_100KM) + print("Loading 2000km results...") + res_2000 = load_results(PATH_2000KM) + + if not res_100 and not res_2000: + print("No results found to compare.") + return + + print(f"Comparing {len(res_100)} runs (100km) vs {len(res_2000)} runs (2000km)") + + plot_reputation_comparison(res_100, res_2000) + plot_kpi_comparison(res_100, res_2000) + + # Also print summary + def print_summary(label, results): + if not results: + return + print(f"\n--- {label} Summary ---") + print(f"Mean Recall: {np.mean([k['recall'] for k in results]):.2f}%") + print(f"Mean Precision: {np.mean([k['precision'] for k in results]):.2f}%") + print(f"Mean FPR: {np.mean([k['fpr'] for k in results]):.2f}%") + ttds = [float(k.get("avg_ttd", 0)) for k in results if k.get("avg_ttd") is not None] + if ttds: + print(f"Mean TTD: {np.mean(ttds):.2f} steps") + print(f"Avg Detection Margin: {np.mean([k.get('detection_margin', 0) \ + for k in results]):.4f}") + + print_summary("100km ISL", res_100) + print_summary("2000km ISL", res_2000) + +if __name__ == "__main__": + main() diff --git a/streamlit_app.py b/streamlit_app.py index b6211bc..f051fa9 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -25,20 +25,68 @@ # --- Global Styling --- st.markdown(""" """, unsafe_allow_html=True) @@ -124,8 +193,8 @@ def load_mc_results(path: str) -> List[Any] | None: st.sidebar.warning("MC Data (mc_results.npz) not found.") # --- Tabs --- -tab0, tab1, tab2, tab3 = st.tabs(["📋 Configuration", "📊 Results Explorer", - "📈 Sensitivity Analysis", "🕸️ DAG Viewer"]) +tab0, tab1, tab2, tab3, tab4 = st.tabs(["📋 Configuration", "📊 Results Explorer", + "📈 Sensitivity Analysis", "🕸️ DAG Viewer", "📚 Resources"]) # --- Tab 0: Configuration --- with tab0: @@ -158,8 +227,8 @@ def display_config_item(item_label: str, item_value: Any) -> None: """Display a single configuration item with custom styling.""" st.markdown(f"""
-

{item_label}

-

{item_value}

+

{item_label}

+

{item_value}

""", unsafe_allow_html=True) @@ -176,54 +245,53 @@ def display_config_item(item_label: str, item_value: Any) -> None: if sim_data: st.header("Simulation Analysis") - col1, col2 = st.columns([1, 1]) - - with col1: - st.subheader("Satellite Reputation") - rep_history = sim_data["rep_history"] - faulty_ids = sim_data["faulty_ids"] - - # Convert to DataFrame for Plotly - rep_df_list = [] - for sid, history in rep_history.items(): - is_faulty = int(sid) in faulty_ids - for step, rep in enumerate(history): - rep_df_list.append({ - "Timestep": step, - "Reputation [-]": rep, - "Satellite": f"Sat {sid}", - "Status": "Faulty" if is_faulty else "Honest" - }) - rep_df = pd.DataFrame(rep_df_list) - - fig_rep = px.line( - rep_df, x="Timestep", y="Reputation [-]", color="Satellite", - line_dash="Status", - color_discrete_sequence=px.colors.qualitative.Safe + st.subheader("Satellite Reputation") + rep_history = sim_data["rep_history"].item() + faulty_ids = sim_data["faulty_ids"] + + # Convert to DataFrame for Plotly + rep_df_list = [] + for sid, history in rep_history.items(): + is_faulty = int(sid) in faulty_ids + for step, rep in enumerate(history): + rep_df_list.append({ + "Timestep": step, + "Reputation [-]": rep, + "Satellite": f"Sat {sid}", + "Status": "Faulty" if is_faulty else "Honest" + }) + rep_df = pd.DataFrame(rep_df_list) + + fig_rep = px.line( + rep_df, x="Timestep", y="Reputation [-]", color="Satellite", + line_dash="Status", + color_discrete_sequence=px.colors.qualitative.Safe + ) + fig_rep.add_hline( + y=0.5, line_dash="dot", annotation_text="Neutral", line_color="gray" + ) + st.plotly_chart(fig_rep, width="stretch") + + st.divider() + + st.subheader("Satellite Ground Tracks") + if sim_data and "truth" in sim_data: + # Calculate number of satellites (each state is 6 elements) + num_sats = sim_data["truth"].shape[1] // 6 + fig_map = plot_ground_tracks_plotly(sim_data["truth"], num_sats) + st.plotly_chart(fig_map, width="stretch") + elif os.path.exists("sim_data/orbit_map.png"): + st.image( + "sim_data/orbit_map.png", + caption="Satellite Ground Tracks (Static Backup)", + width="stretch" ) - fig_rep.add_hline( - y=0.5, line_dash="dot", annotation_text="Neutral", line_color="gray" + else: + st.info( + "Showing current positions. For full ground tracks, " + "see 'accord_demo.py' outputs." ) - st.plotly_chart(fig_rep, width="stretch") - - with col2: - st.subheader("Satellite Ground Tracks") - if sim_data and "truth" in sim_data: - # Calculate number of satellites (each state is 6 elements) - num_sats = sim_data["truth"].shape[1] // 6 - fig_map = plot_ground_tracks_plotly(sim_data["truth"], num_sats) - st.plotly_chart(fig_map, width="stretch") - elif os.path.exists("sim_data/orbit_map.png"): - st.image( - "sim_data/orbit_map.png", - caption="Satellite Ground Tracks (Static Backup)" - ) - else: - st.info( - "Showing current positions. For full ground tracks, " - "see 'accord_demo.py' outputs." - ) - st.warning("Orbit data not found.") + st.warning("Orbit data not found.") else: st.info("Please run `python accord.py` to generate simulation data.") @@ -352,44 +420,43 @@ def display_config_item(item_label: str, item_value: Any) -> None: ) st.plotly_chart(fig_mc, width="stretch") - # Plot 2: Distribution Row - col_p1, col_p2, col_p3 = st.columns(3) - - with col_p1: - # Reliability Scatter - recalls = [k.get("recall", 0) for k in valid_kpis_plot] - precisions = [k.get("precision", 0) for k in valid_kpis_plot] - - fig_rel = px.scatter(x=recalls, y=precisions, labels={'x': 'Recall (%)', - 'y': 'Precision (%)'}, - title="Reliability (Recall vs Precision)", - range_x=[-5, 105], range_y=[-5, 105]) - fig_rel.add_vline(x=np.mean(recalls), line_dash="dot", - line_color="purple", opacity=0.5) - fig_rel.add_hline(y=np.mean(precisions), line_dash="dot", - line_color="purple", opacity=0.5) - st.plotly_chart(fig_rel, width='stretch') - - with col_p2: - # TTD Histogram - ttds_flat = [k.get("avg_ttd") for k in valid_kpis_plot \ - if k.get("avg_ttd") is not None] - if ttds_flat: - fig_ttd = px.histogram(x=ttds_flat, nbins=15, labels={'x': 'Steps'}, - title="Time to Detection Distribution") - st.plotly_chart(fig_ttd, width='stretch') - else: - st.info("No detections occurred.") - - with col_p3: - # FPR Histogram - fprs_flat = [k.get("fpr", 0) for k in valid_kpis_plot] - fig_fpr = px.histogram( - x=fprs_flat, nbins=15, labels={'x': 'FPR (%)'}, - title="False Positive Rate Distribution", - color_discrete_sequence=['salmon'] - ) - st.plotly_chart(fig_fpr, width='stretch') + # Plot 2: Distributions in Rows + # Reliability Scatter + recalls = [k.get("recall", 0) for k in valid_kpis_plot] + precisions = [k.get("precision", 0) for k in valid_kpis_plot] + + fig_rel = px.scatter(x=recalls, y=precisions, labels={'x': 'Recall (%)', + 'y': 'Precision (%)'}, + title="Reliability (Recall vs Precision)", + range_x=[-5, 105], range_y=[-5, 105]) + fig_rel.add_vline(x=np.mean(recalls), line_dash="dot", + line_color="purple", opacity=0.5) + fig_rel.add_hline(y=np.mean(precisions), line_dash="dot", + line_color="purple", opacity=0.5) + st.plotly_chart(fig_rel, width="stretch") + + st.divider() + + # TTD Histogram + ttds_flat = [k.get("avg_ttd") for k in valid_kpis_plot \ + if k.get("avg_ttd") is not None] + if ttds_flat: + fig_ttd = px.histogram(x=ttds_flat, nbins=15, labels={'x': 'Steps'}, + title="Time to Detection Distribution") + st.plotly_chart(fig_ttd, width="stretch") + else: + st.info("No detections occurred.") + + st.divider() + + # FPR Histogram + fprs_flat = [k.get("fpr", 0) for k in valid_kpis_plot] + fig_fpr = px.histogram( + x=fprs_flat, nbins=15, labels={'x': 'FPR (%)'}, + title="False Positive Rate Distribution", + color_discrete_sequence=['salmon'] + ) + st.plotly_chart(fig_fpr, width="stretch") else: st.info("Please run `python mc_demo.py` to generate Monte Carlo results.") @@ -401,7 +468,7 @@ def display_config_item(item_label: str, item_value: Any) -> None: st.header("DAG Topology") st.markdown("Visualising the structure of the Distributed Ledger.") - ledger = sim_data["dag_ledger"] + ledger = sim_data["dag_ledger"].item() # Flatten ledger into a chronological list of transactions all_txs = [] @@ -476,6 +543,43 @@ def display_config_item(item_label: str, item_value: Any) -> None: # but respects the chronological layout (genesis on left) dot.edge(p_hash, tx_hash, dir='back') - st.graphviz_chart(dot, width='stretch') + st.graphviz_chart(dot, width="stretch") else: st.info("Simulation data required to visualise DAG.") + +# --- Tab 4: Resources --- +with tab4: + st.header("Project Resources") + + st.subheader("Source Code") + st.markdown(""" + The complete source code for the ACCORD project is available on GitHub. + + [![GitHub](https://img.shields.io/badge/GitHub-Repository-blue?logo=github)](https://github.com/bprobert97/accord) + + **License:** GNU General Public License v3.0 + """) + + st.divider() + + st.subheader("Cite this Work") + st.markdown(""" + If you use this work in your research, please cite it as: + + > B. Probert, bprobert97/accord: v2.2. (Feb. 25, 2026). Python. University of Strathclyde, Glasgow. [DOI: 10.5281/zenodo.18776049](https://doi.org/10.5281/zenodo.18776049) + """) + + st.divider() + + st.subheader("Related Publications") + st.markdown(""" + * B. Probert, R. A. Clark, E. Blasch, and M. Macdonald, + “Cooperative Orbit Determination for Trusted, Autonomous, and Decentralised Satellite Operations,” + in AIAA SCITECH 2026 Forum, in AIAA SciTech Forum. Orlando, + Florida: American Institute of Aeronautics and Astronautics, Jan. 2026. + [doi: 10.2514/6.2026-0825](https://arc.aiaa.org/doi/10.2514/6.2026-0825) + * B. Probert, R. A. Clark, E. Blasch, and M. Macdonald, + “A Review of Distributed Ledger Technologies for Satellite Operations,” + IEEE Access, vol. 13, pp. 123230–123258, 2025, + [doi: 10.1109/ACCESS.2025.3588688](https://ieeexplore.ieee.org/document/11079570) + """)