diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d446339 --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +.* +!.gitignore +/.idea +.DS_Store +__pycache__/ +*.ipynb +/examples +/starter_model +/mesa +site/ +sorted-out-tests +/benchmarks +/notes +/docs/work_in_progress_exclude +# short term: +Dockerfile +docker-compose.yml +Singularity.def +/app.py +/main.py +templates +/docs/images/CI-images +.coverage* +*.cache +ai_info.txt +convert_docstrings.py +TODO.txt +democracy_sim/simulation_output diff --git a/README.md b/README.md index 09752bf..111744b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![Pages](https://github.com/jurikane/DemocracySim/actions/workflows/ci.yml/badge.svg)](https://jurikane.github.io/DemocracySim/) [![pytest main](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml/badge.svg?branch=main)](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml) -[![codecov](https://codecov.io/gh/jurikane/DemocracySim/graph/badge.svg?token=QVNSXWIGNE)](https://codecov.io/gh/jurikane/DemocracySim) +[![codecov](https://codecov.io/gh/jurikane/DemocracySim/branch/main/graph/badge.svg)](https://codecov.io/gh/jurikane/DemocracySim) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [//]: # ([![pytest dev](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml/badge.svg?branch=dev)](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml)) @@ -18,53 +18,32 @@ This project is kindly supported by [OpenPetition](https://osd.foundation). For details see the [documentation](https://jurikane.github.io/DemocracySim/) on GitHub-pages. -## Short overview in German - -• Multi-Agenten Simulation - - untersucht werden soll die Partizipation der Agenten an den Wahlen - - Auswirkung von verschiedenen Wahlverfahren auf die Partizipation - - Verlauf der Partizipation über die Zeit (Umgebungsänderung, Änderung der Vermögensverteilung, ...) - -• Umgebung: - - Gitterstruktur ohne Ränder - - jedes Viereck im Gitter ist ein Feld, eine Menge von (zusammenhängenden) Feldern ist ein Gebiet - - Jedes Feld im Gitter hat eine von vier Farben (r, g, b, w) diese verändert sich mit einer bestimmten - "Mutationsrate", die Änderung ist abhängig von den Ergebnissen der letzten Gruppenentscheidung - über ein Gebiet, welches das Feld einschließt (d.h. die Umgebung reagiert auf die Gruppenentscheidungen) - -• Agenten - - Intelligenz: top-down approach d.h. die Agenten bekommen durch ein training eine einfache KI - (auf Basis von Entscheidungsbäumen um das Verhalten nachvollziehen zu können) - - Haben bestimmtes Budget (Motivation) - - Entscheidungen (Agenten können): - - umliegende Felder erkunden ("sich bilden") - kostet - - an Wahlen teilnehmen - kostet - - abwarten - geringe kosten - - "Agentenpersönlichkeit": jede Agentin besitzt zwei Präferenzrelationen über die Farben r, g und b - - ⇨ 15 Agententypen (zufällig und normalverteilt) - - diese wirken sich auf die Belohnungen der Abstimmungsergebnisse aus - -• Wahlen - - Abgestimmt wird über die Häufigkeitsverteilung der 4 Farben im Wahlgebiet (objektive Wahrheit soll - "kluge Gruppenentscheidung" simulieren) - - Belohnung wird an alle Agenten im Wahlgebiet ausgeschüttet: - - je näher (kendal-tau Distanz) die abgestimmte Verteilung (Wahlergebnis) an der wahren Verteilung liegt, - umso größer die Belohnung - - eine Hälfte der Belohnung geht an alle zu gleichen Teilen - - zweite Hälfte wird entsprechend der "Agentenpersönlichkeit" (siehe oben) ausgeschüttet - - - ⇨ abstimmende Agenten im Zwiespalt: - - so abstimmen wie die Verteilung vermutlich wirklich ist (gute Entscheidung für alle - nach bestem wissen) - - oder eher egoistisch, sodass jetzt und in Zukunft möglichst viel an den Agenten selbst geht - (beachte: Das Ergebnis beeinflusst auch die zukünftige Verteilung im Gebiet) - -Interessante Fragen: -- machen die verschiedenen Wahlverfahren einen Unterschied und wenn ja welchen? -- wie verhalten sich Agententypen die in der Minderheit/Mehrheit sind? -- wie wirkt sich (nicht) Partizipation langfristig aus? -- wie wirkt sich die Verteilung von Vermögen auf die Partizipation aus und umgekehrt? -- welche Muster entstehen in den Gebieten (lokal, regional, global)? -Auch interessant: -- was passiert, wenn die Gruppe der abstimmenden Agenten zufällig ausgewählt wird -(„Bürgerräte“ also kostenlose oder vergütete Teilnahme von x% aller Agenten)? -(werden die Entscheidungen „besser“, wie verteilt sich der Wohlstand, …) +## Overview + +**DemocracySim** is a multi-agent simulation framework designed to study democratic participation +and group decision-making in a dynamic, evolving environment. +Agents interact within a grid-based world, form beliefs about their surroundings, +and vote in elections that influence both their individual outcomes and the state of the system. + +The environment consists of a toroidal grid of colored fields, where neighboring cells form territories. +Each territory holds regular elections in which agents vote on the observed color distribution. +The results of these elections not only influence how agents are rewarded +but also shape the environment itself through controlled mutation processes. + +Agents have limited resources and face decisions about whether to participate in elections, or remain inactive. +Each agent belongs to a personality type defined by preferences over the possible field colors, +with types distributed to create majority and minority dynamics. +During elections, agents face a strategic trade-off between voting for what benefits them personally +and voting for what they believe to be the most accurate representation of their territory—decisions +that impact both immediate rewards and the system’s future state. + +The simulation tracks a range of metrics including participation rates, collective accuracy, +reward inequality (Gini index), and behavioral indicators such as altruism and diversity of expressed opinions. +**DemocracySim** also allows for the evaluation of group performance under different normative goals—utilitarian, +egalitarian, or Rawlsian—by comparing actual outcomes to theoretically optimal decisions. + +By modeling participation dilemmas, reward mechanisms, and personality-driven behavior, +**DemocracySim** provides a controlled environment for investigating how democratic systems +respond to different institutional rules and individual incentives. +It is intended both as a research tool and as a foundation for future explorations into deliberation, representation, +and fairness in collective choice. diff --git a/democracy_sim/__init__.py b/democracy_sim/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/democracy_sim/app.py b/democracy_sim/app.py new file mode 100644 index 0000000..f726047 --- /dev/null +++ b/democracy_sim/app.py @@ -0,0 +1,86 @@ +from mesa.experimental import JupyterViz, make_text, Slider +import solara +from model_setup import * +# Data visualization tools. +from matplotlib.figure import Figure + + +def get_agents_assets(model: ParticipationModel): + """ + Display a text count of how many happy agents there are. + """ + all_assets = list() + # Store the results + for agent in model.voting_agents: + all_assets.append(agent.assets) + return f"Agents wealth: {all_assets}" + + +def agent_portrayal(agent: VoteAgent): + # Construct and return the portrayal dictionary + portrayal = { + "size": agent.assets, + "color": "tab:orange", + } + return portrayal + + +def space_drawer(model, agent_portrayal): + fig = Figure(figsize=(8, 5), dpi=100) + ax = fig.subplots() + + # Set plot limits and aspect + ax.set_xlim(0, model.grid.width) + ax.set_ylim(0, model.grid.height) + ax.set_aspect("equal") + ax.invert_yaxis() # Match grid's origin + + fig.tight_layout() + + return solara.FigureMatplotlib(fig) + + +model_params = { + "height": grid_rows, + "width": grid_cols, + "draw_borders": False, + "num_agents": Slider("# Agents", 200, 10, 9999999, 10), + "num_colors": Slider("# Colors", 4, 2, 100, 1), + "color_adj_steps": Slider("# Color adjustment steps", 5, 0, 9, 1), + "heterogeneity": Slider("Color-heterogeneity factor", color_heterogeneity, 0.0, 0.9, 0.1), + "num_areas": Slider("# Areas", num_areas, 4, min(grid_cols, grid_rows)//2, 1), + "av_area_height": Slider("Av. Area Height", area_height, 2, grid_rows//2, 1), + "av_area_width": Slider("Av. Area Width", area_width, 2, grid_cols//2, 1), + "area_size_variance": Slider("Area Size Variance", area_var, 0.0, 1.0, 0.1), +} + + +def agent_portrayal(agent): + portrayal = participation_draw(agent) + if portrayal is None: + return {} + else: + return portrayal + +def agent_portrayal(agent): + portrayal = { + "Shape": "circle", + "Color": "red", + "Filled": "true", + "Layer": 0, + "r": 0.5, + } + return portrayal + +grid = mesa.visualization.CanvasGrid(agent_portrayal, 10, 10, 500, 500) + + +page = JupyterViz( + ParticipationModel, + model_params, + #measures=["wealth", make_text(get_agents_assets),], + agent_portrayal=agent_portrayal, + #agent_portrayal=participation_draw, + #space_drawer=space_drawer, +) +page # noqa diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py new file mode 100644 index 0000000..90af2fa --- /dev/null +++ b/democracy_sim/distance_functions.py @@ -0,0 +1,168 @@ +from math import comb +import numpy as np +from numpy.typing import NDArray +from typing import TypeAlias + +FloatArray: TypeAlias = NDArray[np.float64] + + +def kendall_tau_on_ranks(rank_arr_1, rank_arr_2, search_pairs, color_vec): + """ + DON'T USE + (don't use this for orderings!) + + This function calculates the kendal tau distance between two rank vektors. + (The Kendall tau rank distance is a metric that counts the number + of pairwise disagreements between two ranking lists. + The larger the distance, the more dissimilar the two lists are. + Kendall tau distance is also called bubble-sort distance). + Rank vectors hold the rank of each option (option = index). + Not to be confused with an ordering (or sequence) where the vector + holds options and the index is the rank. + + Args: + rank_arr_1: First (NumPy) array containing the ranks of each option + rank_arr_2: The second rank array + search_pairs: The pairs of indices (for efficiency) + color_vec: The vector of colors (for efficiency) + + Returns: + The kendall tau distance + """ + # Get the ordering (option names being 0 to length) + ordering_1 = np.argsort(rank_arr_1) + ordering_2 = np.argsort(rank_arr_2) + # print("Ord1:", list(ordering_1), " Ord2:", list(ordering_2)) + # Create the mapping array + mapping_array = np.empty_like(ordering_1) # Empty array with same shape + mapping_array[ordering_1] = color_vec # Fill the mapping + # Use the mapping array to rename elements in ordering_2 + renamed_arr_2 = mapping_array[ordering_2] # Uses NumPys advanced indexing + # print("Ren1:",list(range(len(color_vec))), " Ren2:", list(renamed_arr_2)) + # Count inversions using precomputed pairs + kendall_distance = 0 + # inversions = [] + for i, j in search_pairs: + if renamed_arr_2[i] > renamed_arr_2[j]: + # inversions.append((renamed_arr_2[i], renamed_arr_2[j])) + kendall_distance += 1 + # print("Inversions:\n", inversions) + return kendall_distance + + +def unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs): + """ + This function calculates the kendal tau distance on two orderings. + An ordering holds the option names in the order of their rank (rank=index). + + Args: + ordering_1: First (NumPy) array containing ranked options + ordering_2: The second ordering array + search_pairs: Containing search pairs of indices (for efficiency) + + Returns: + The kendall tau distance + """ + # Rename the elements to reduce the problem to counting inversions + mapping = {option: idx for idx, option in enumerate(ordering_1)} + renamed_arr_2 = np.array([mapping[option] for option in ordering_2]) + # Count inversions using precomputed pairs + kendall_distance = 0 + for i, j in search_pairs: + if renamed_arr_2[i] > renamed_arr_2[j]: + kendall_distance += 1 + return kendall_distance + + +def kendall_tau(ordering_1, ordering_2, search_pairs): + """ + This calculates the normalized Kendall tau distance of two orderings. + The Kendall tau rank distance is a metric that counts the number + of pairwise disagreements between two ranking lists. + The larger the distance, the more dissimilar the two lists are. + Kendall tau distance is also called bubble-sort distance. + An ordering holds the option names in the order of their rank (rank=index). + + Args: + ordering_1: First (NumPy) array containing ranked options + ordering_2: The second ordering array + search_pairs: Containing the pairs of indices (for efficiency) + + Returns: + The kendall tau distance + """ + # TODO: remove these tests (comment out) on actual simulations to speed up + n = ordering_1.size + if n > 0: + expected_arr = np.arange(n) + assert (np.array_equal(np.sort(ordering_1), expected_arr) + and np.array_equal(np.sort(ordering_2), expected_arr)) , \ + f"Error: Sequences {ordering_1}, {ordering_2} aren't comparable." + + # Get the unnormalized Kendall tau distance + dist = unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs) + # Maximum possible Kendall tau distance + max_distance = comb(n, 2) # This is n choose 2, or n(n-1)/2 + # Normalize the distance + normalized_distance = dist / max_distance + + return normalized_distance + + +def spearman_distance(rank_arr_1, rank_arr_2): + """ + Beware: don't use this for orderings! + + This function calculates the Spearman distance between two rank vektors. + Spearman's foot rule is a measure of the distance between ranked lists. + It is given as the sum of the absolute differences between the ranks + of the two lists. + This function is meant to work with numeric values as well. + Hence, we only assume the rank values to be comparable (e.q. normalized). + + Args: + rank_arr_1: First (NumPy) array containing the ranks of each option + rank_arr_2: The second rank array + + Returns: + The Spearman distance + """ + # TODO: remove these tests (comment out) on actual simulations + assert rank_arr_1.size == rank_arr_2.size, \ + "Rank arrays must have the same length" + if rank_arr_1.size > 0: + assert (rank_arr_1.min() == rank_arr_2.min() + and rank_arr_1.max() == rank_arr_2.max()), \ + f"Error: Sequences {rank_arr_1}, {rank_arr_2} aren't comparable." + return np.sum(np.abs(rank_arr_1 - rank_arr_2)) + + +def spearman(ordering_1, ordering_2, _search_pairs=None): + """ + This calculates the normalized Spearman distance between two orderings. + Spearman's foot rule is a measure of the distance between ranked lists. + It is given as the sum of the absolute differences between the ranks + of the two orderings (values from 0 to n-1 in any order). + + Args: + ordering_1: The first (NumPy) array containing the option's ranks. + ordering_2: The second rank array. + _search_pairs: This parameter is intentionally unused. + + Returns: + The Spearman distance + """ + # TODO: remove these tests (comment out) on actual simulations to speed up + n = ordering_1.size + if n > 0: + expected_arr = np.arange(n) + assert (np.array_equal(np.sort(ordering_1), expected_arr) + and np.array_equal(np.sort(ordering_2), expected_arr)) , \ + f"Error: Sequences {ordering_1}, {ordering_2} aren't comparable." + distance = np.sum(np.abs(ordering_1 - ordering_2)) + # Normalize + if n % 2 == 0: # Even number of elements + max_dist = n**2 / 2 + else: # Odd number of elements + max_dist = n * (n - 1) / 2 + return distance / max_dist diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py new file mode 100644 index 0000000..29348d1 --- /dev/null +++ b/democracy_sim/model_setup.py @@ -0,0 +1,255 @@ +""" +This file handles the definition of the canvas and model parameters. +""" +from typing import TYPE_CHECKING, cast +from mesa.visualization.modules import ChartModule +from democracy_sim.participation_agent import ColorCell +from democracy_sim.participation_model import (ParticipationModel, + distance_functions, + social_welfare_functions) +from math import factorial +import mesa + +# Parameters + +############# +# Elections # +############# +election_costs = 1 +max_reward = 50 +election_impact_on_mutation = 1.8 # 0.1-5.0 +mu = 0.05 # 0.001-0.5 +# Voting rules (see social_welfare_functions.py) +rule_idx = 1 +# Distance functions (see distance_functions.py) +distance_idx = 1 +#################### +# Model parameters # +#################### +num_agents = 800 +common_assets = 40000 +# Colors +num_colors = 3 +color_patches_steps = 3 +patch_power = 1.0 +color_heterogeneity = 0.3 +known_cells = 10 +# Voting Agents +num_personalities = 4 +# Grid +grid_rows = 100 # height +grid_cols = 80 # width +cell_size = 10 +canvas_height = grid_rows * cell_size +canvas_width = grid_cols * cell_size +draw_borders = True +# Voting Areas +num_areas = 16 +av_area_height = 25 +# area_height = grid_rows // int(sqrt(num_areas)) +av_area_width = 20 +# area_width = grid_cols // int(sqrt(num_areas)) +# num_areas = 4 +# av_area_height = 50 +# av_area_width = 40 +area_size_variance = 0.0 +######################## +# Statistics and Views # +######################## +show_area_stats = True + + +_COLORS = [ + "White", + "Red", + "Green", + "Blue", + "Yellow", + "Aqua", + "Fuchsia", + #"Lavender", + "Lime", + "Maroon", + #"Navy", + #"Olive", + "Orange", + #"Purple", + #"Silver", + #"Teal", + # "Pink", + # "Brown", + # "Gold", + # "Coral", + # "Crimson", + # "DarkBlue", + # "DarkRed", + # "DarkGreen", + # "DarkKhaki", + # "DarkMagenta", + # "DarkOliveGreen", + # "DarkOrange", + # "DarkTurquoise", + # "DarkViolet", + # "DeepPink", +] # 10 colors + + +def participation_draw(cell: ColorCell): + """ + This function is registered with the visualization server to be called + each tick to indicate how to draw the cell in its current color. + + Args: + cell: The cell in the simulation + + Returns: + The portrayal dictionary. + """ + if cell is None: + raise AssertionError + color = _COLORS[cell.color] + portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, + "x": cell.row, "y": cell.col, + "Color": color} + # TODO: maybe: draw the agent number in the opposing color + # If the cell is a border cell, change its appearance + if TYPE_CHECKING: # Type hint for IDEs + cell.model = cast(ParticipationModel, cell.model) + if cell.is_border_cell and cell.model.draw_borders: + portrayal["Shape"] = "circle" + portrayal["r"] = 0.9 # Adjust the radius to fit within the cell + if color == "White": + portrayal["Color"] = "LightGrey" + # Add position (x, y) to the hover-text + portrayal["Position"] = f"{cell.position}" + portrayal["Color - text"] = _COLORS[cell.color] + if cell.num_agents_in_cell > 0: + portrayal[f"text"] = str(cell.num_agents_in_cell) + portrayal["text_color"] = "Black" + for a in cell.areas: + unique_id = a.unique_id + if unique_id == -1: + unique_id = "global" + text = f"{a.num_agents} agents, color dist: {a.color_distribution}" + portrayal[f"Area {unique_id}"] = text + for voter in cell.agents: + text = f"personality: {voter.personality}, assets: {voter.assets}" + portrayal[f"Agent {voter.unique_id}"] = text + return portrayal + + +canvas_element = mesa.visualization.CanvasGrid( + participation_draw, grid_cols, grid_rows, canvas_width, canvas_height +) + + +wealth_chart = mesa.visualization.modules.ChartModule( + [{"Label": "Collective assets", "Color": "Black"}], + data_collector_name='datacollector' +) + + +color_distribution_chart = mesa.visualization.modules.ChartModule( + [{"Label": f"Color {i}", + "Color": "LightGrey" if _COLORS[i] == "White" else _COLORS[i]} + for i in range(len(_COLORS))], + data_collector_name='datacollector' + ) + +voter_turnout = mesa.visualization.ChartModule( + [{"Label": "Voter turnout globally (in percent)", "Color": "Black"}, + {"Label": "Gini Index (0-100)", "Color": "Red"}], + data_collector_name='datacollector') + + +model_params = { + "height": grid_rows, + "width": grid_cols, + "draw_borders": mesa.visualization.Checkbox( + name="Draw border cells", value=draw_borders + ), + "rule_idx": mesa.visualization.Slider( + name=f"Rule index {[r.__name__ for r in social_welfare_functions]}", + value=rule_idx, min_value=0, max_value=len(social_welfare_functions)-1, + ), + "distance_idx": mesa.visualization.Slider( + name=f"Dist-Function index {[f.__name__ for f in distance_functions]}", + value=distance_idx, min_value=0, max_value=len(distance_functions)-1, + ), + "election_costs": mesa.visualization.Slider( + name="Election costs", value=election_costs, min_value=0, max_value=100, + step=1, description="The costs for participating in an election" + ), + "max_reward": mesa.visualization.Slider( + name="Maximal reward", value=max_reward, min_value=0, + max_value=election_costs*100, + step=1, description="The costs for participating in an election" + ), + "mu": mesa.visualization.Slider( + name="Mutation rate", value=mu, min_value=0.001, max_value=0.5, + step=0.001, description="Probability of a color cell to mutate" + ), + "election_impact_on_mutation": mesa.visualization.Slider( + name="Election impact on mutation", value=election_impact_on_mutation, + min_value=0.1, max_value=5.0, step=0.1, + description="Factor determining how strong mutation accords to election" + ), + "num_agents": mesa.visualization.Slider( + name="# Agents", value=num_agents, min_value=10, max_value=99999, + step=10 + ), + "num_colors": mesa.visualization.Slider( + name="# Colors", value=num_colors, min_value=2, max_value=len(_COLORS), + step=1 + ), + "num_personalities": mesa.visualization.Slider( + name="# different personalities", value=num_personalities, + min_value=1, max_value=factorial(num_colors), step=1 + ), + "common_assets": mesa.visualization.Slider( + name="Initial common assets", value=common_assets, + min_value=num_agents, max_value=1000*num_agents, step=10 + ), + "known_cells": mesa.visualization.Slider( + name="# known fields", value=known_cells, + min_value=1, max_value=100, step=1 + ), + "color_patches_steps": mesa.visualization.Slider( + name="Patches size (# steps)", value=color_patches_steps, + min_value=0, max_value=9, step=1, + description="More steps lead to bigger color patches" + ), + "patch_power": mesa.visualization.Slider( + name="Patches power", value=patch_power, min_value=0.0, max_value=3.0, + step=0.2, description="Increases the power/radius of the color patches" + ), + "heterogeneity": mesa.visualization.Slider( + name="Global color distribution heterogeneity", + value=color_heterogeneity, min_value=0.0, max_value=0.9, step=0.1, + description="The higher the heterogeneity factor the greater the" + + "difference in how often some colors appear overall" + ), + "num_areas": mesa.visualization.Slider( + name=f"# Areas within the {grid_rows}x{grid_cols} world", step=1, + value=num_areas, min_value=4, max_value=min(grid_cols, grid_rows)//2 + ), + "av_area_height": mesa.visualization.Slider( + name="Av. area height", value=av_area_height, + min_value=2, max_value=grid_rows//2, + step=1, description="Select the average height of an area" + ), + "av_area_width": mesa.visualization.Slider( + name="Av. area width", value=av_area_width, + min_value=2, max_value=grid_cols//2, + step=1, description="Select the average width of an area" + ), + "area_size_variance": mesa.visualization.Slider( + name="Area size variance", value=area_size_variance, + # TODO there is a division by zero error for value=1.0 - check this + min_value=0.0, max_value=0.99, step=0.1, + description="Select the variance of the area sizes" + ), + "show_area_stats": mesa.visualization.Checkbox( + name="Show all statistics", value=show_area_stats + ), +} diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py new file mode 100644 index 0000000..adae33c --- /dev/null +++ b/democracy_sim/participation_agent.py @@ -0,0 +1,306 @@ +from typing import TYPE_CHECKING, cast, List, Optional +import numpy as np +from mesa import Agent +if TYPE_CHECKING: # Type hint for IDEs + from democracy_sim.participation_model import ParticipationModel + + +def combine_and_normalize(arr_1: np.array, arr_2: np.array, factor: float): + """ + Combine two arrays weighted by a factor favoring arr_1. + The first array is to be the estimated real distribution. + And the other is to be the personality vector of the agent. + + Args: + arr_1: The first array to be combined (real distribution). + arr_2: The second array to be combined (personality vector). + factor: The factor to weigh the two arrays. + + Returns: + result (np.array): The normalized weighted linear combination. + + Example: + TODO + """ + # Ensure f is between 0 and 1 TODO: remove this on simulations to speed up + if not (0 <= factor <= 1): + raise ValueError("Factor f must be between 0 and 1") + # Linear combination + res = factor * arr_1 + (1 - factor) * arr_2 + # Normalize/scale result s. t. it resembles a distribution vector (sum=1) + total = sum(res) + # assert total == 1.0, f"Sum of result is {total} and not 1.0" + return res / total + + +class VoteAgent(Agent): + """An agent that has limited knowledge and resources and + can decide to use them to participate in elections. + """ + + def __init__(self, unique_id, model, pos, personality, assets=1, add=True): + """ Create a new agent. + + Attributes: + unique_id: The unique identifier of the agent. + model: The simulation model of which the agent is part of. + pos: The position of the agent in the grid. + personality: Represents the agent's preferences among colors. + assets: The wealth/assets/motivation of the agent. + """ + super().__init__(unique_id=unique_id, model=model) + # The "pos" variable in mesa is special, so I avoid it here + try: + row, col = pos + except ValueError: + raise ValueError("Position must be a tuple of two integers.") + self._position = row, col + self._assets = assets + self._num_elections_participated = 0 + self.personality = personality + self.cell = model.grid.get_cell_list_contents([(row, col)])[0] + # ColorCell objects the agent knows (knowledge) + self.known_cells: List[Optional[ColorCell]] = [None] * model.known_cells + # Add the agent to the models' agent list and the cell + if add: + model.voting_agents.append(self) + cell = model.grid.get_cell_list_contents([(row, col)])[0] + cell.add_agent(self) + # Election relevant variables + self.est_real_dist = np.zeros(self.model.num_colors) + self.confidence = 0.0 + + def __str__(self): + return (f"Agent(id={self.unique_id}, pos={self.position}, " + f"personality={self.personality}, assets={self.assets})") + + @property + def position(self): + """Return the location of the agent.""" + return self._position + + @property + def row(self): + """Return the row location of the agent.""" + return self._position[0] + + @property + def col(self): + """Return the col location of the agent.""" + return self._position[1] + + @property + def assets(self): + """Return the assets of this agent.""" + return self._assets + + @assets.setter + def assets(self, value): + self._assets = value + + @assets.deleter + def assets(self): + del self._assets + + @property + def num_elections_participated(self): + return self._num_elections_participated + + @num_elections_participated.setter + def num_elections_participated(self, value): + self._num_elections_participated = value + + def update_known_cells(self, area): + """ + This method is to update the list of known cells before casting a vote. + + Args: + area: The area that holds the pool of cells in question + """ + n_cells = len(area.cells) + k = len(self.known_cells) + self.known_cells = ( + self.random.sample(area.cells, k) + if n_cells >= k + else area.cells + ) + + def ask_for_participation(self, area): + """ + The agent decides + whether to participate in the upcoming election of a given area. + + Args: + area: The area in which the election takes place. + + Returns: + True if the agent decides to participate, False otherwise + """ + #print("Agent", self.unique_id, "decides whether to participate", + # "in election of area", area.unique_id) + # TODO Implement this (is to be decided upon a learned decision tree) + return np.random.choice([True, False]) + + def decide_altruism_factor(self, area): + """ + Uses a trained decision tree to decide on the altruism factor. + """ + # TODO Implement this (is to be decided upon a learned decision tree) + # This part is important - also for monitoring - save/plot a_factors + a_factor = np.random.uniform(0.0, 1.0) + #print(f"Agent {self.unique_id} has an altruism factor of: {a_factor}") + return a_factor + + def compute_assumed_opt_dist(self, area): + """ + Computes a color distribution that the agent assumes to be an optimal + choice in any election (regardless of whether it exists as a real option + to vote for or not). It takes "altruistic" concepts into consideration. + + Args: + area (Area): The area in which the election takes place. + + Returns: + ass_opt: The assumed optimal color distribution (normalized). + """ + # Compute the "altruism_factor" via a decision tree + a_factor = self.decide_altruism_factor(area) # TODO: Implement this + # Compute the preference ranking vector as a mix between the agent's own + # preferences/personality traits and the estimated real distribution. + est_dist, conf = self.estimate_real_distribution(area) + ass_opt = combine_and_normalize(est_dist, self.personality, a_factor) + return ass_opt + + def vote(self, area): + """ + The agent votes in the election of a given area, + i.e., she returns a preference ranking vector over all options. + (Ranking: `index = option`, `value proportional to rank`) + The available options are set in the model. + + Args: + area (Area): The area in which the election takes place. + """ + # TODO Implement this (is to be decided upon a learned decision tree) + # Compute the color distribution that is assumed to be the best choice. + est_best_dist = self.compute_assumed_opt_dist(area) + # Make sure that r= is normalized! + # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!) + ############## + if TYPE_CHECKING: # Type hint for IDEs + self.model = cast(ParticipationModel, self.model) + + options = self.model.options + dist_func = self.model.distance_func + ranking = np.zeros(options.shape[0]) + color_search_pairs = self.model.color_search_pairs + for i, option in enumerate(options): + # TODO: is it possible to leave out white? + ranking[i] = dist_func(self.personality, option, color_search_pairs) + ranking /= ranking.sum() # Normalize the preference vector + return ranking + + def estimate_real_distribution(self, area): + """ + The agent estimates the real color distribution in the area based on + her own knowledge (self.known_cells). + + Args: + area (Area): The area the agent uses to estimate. + """ + known_colors = np.array([cell.color for cell in self.known_cells]) + # Get the unique color ids present and count their occurrence + unique, counts = np.unique(known_colors, return_counts=True) + # Update the est_real_dist and confidence values of the agent + self.est_real_dist.fill(0) # To ensure the ones not in unique are 0 + self.est_real_dist[unique] = counts / known_colors.size + self.confidence = len(self.known_cells) / area.num_cells + return self.est_real_dist, self.confidence + + +class ColorCell(Agent): + """ + Represents a single cell (a field in the grid) with a specific color. + + Attributes: + color (int): The color of the cell. + """ + + def __init__(self, unique_id, model, pos: tuple, initial_color: int): + """ + Initializes a ColorCell, at the given row, col position. + + Args: + unique_id (int): The unique identifier of the cell. + model (mesa.Model): The mesa model of which the cell is part of. + pos (Tuple[int, int]): The position of the cell in the grid. + initial_color (int): The initial color of the cell. + """ + super().__init__(unique_id, model) + # The "pos" variable in mesa is special, so I avoid it here + self._row = pos[0] + self._col = pos[1] + self.color = initial_color # The cell's current color (int) + self._next_color = None + self.agents = [] + self.areas = [] + self.is_border_cell = False + + def __str__(self): + return (f"Cell ({self.unique_id}, pos={self.position}, " + f"color={self.color}, num_agents={self.num_agents_in_cell})") + + @property + def col(self): + """The col location of this cell.""" + return self._col + + @property + def row(self): + """The row location of this cell.""" + return self._row + + @property + def position(self): # The variable pos is special in mesa! + """The location of this cell.""" + return self._row, self._col + + @property + def num_agents_in_cell(self): + """The number of agents in this cell.""" + return len(self.agents) + + def add_agent(self, agent): + self.agents.append(agent) + + def remove_agent(self, agent): + self.agents.remove(agent) + + def add_area(self, area): + self.areas.append(area) + + def color_step(self): + """ + Determines the cells' color for the next step. + TODO + """ + # _neighbor_iter = self.model.grid.iter_neighbors( + # (self._row, self._col), True) + # neighbors_opinion = Counter(n.get_state() for n in _neighbor_iter) + # # Following is a tuple (attribute, occurrences) + # polled_opinions = neighbors_opinion.most_common() + # tied_opinions = [] + # for neighbor in polled_opinions: + # if neighbor[1] == polled_opinions[0][1]: + # tied_opinions.append(neighbor) + # + # self._next_color = self.random.choice(tied_opinions)[0] + pass + + def advance(self): + """ + Set the state of the agent to the next state. + TODO + """ + # self._color = self._next_color + pass diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py new file mode 100644 index 0000000..8555dc5 --- /dev/null +++ b/democracy_sim/participation_model.py @@ -0,0 +1,998 @@ +from typing import TYPE_CHECKING, cast, List, Optional +import mesa +from democracy_sim.participation_agent import VoteAgent, ColorCell +from democracy_sim.social_welfare_functions import majority_rule, approval_voting +from democracy_sim.distance_functions import spearman, kendall_tau +from itertools import permutations, product, combinations +from math import sqrt +import numpy as np + +# Voting rules to be accessible by index +social_welfare_functions = [majority_rule, approval_voting] +# Distance functions +distance_functions = [spearman, kendall_tau] + + +class Area(mesa.Agent): + def __init__(self, unique_id, model, height, width, size_variance): + """ + Create a new area. + + Attributes: + unique_id (int): The unique identifier of the area. + model (ParticipationModel): The simulation model of which the area is part of. + height (int): The average height of the area (see size_variance). + width (int): The average width of the area (see size_variance). + size_variance (float): A variance factor applied to height and width. + """ + if TYPE_CHECKING: # Type hint for IDEs + model = cast(ParticipationModel, model) + super().__init__(unique_id=unique_id, model=model) + self._set_dimensions(width, height, size_variance) + self.agents = [] + self._personality_distribution = None + self.cells = [] + self._idx_field = None # An indexing position of the area in the grid + self._color_distribution = np.zeros(model.num_colors) # Initialize to 0 + self._voted_ordering = None + self._voter_turnout = 0 # In percent + self._dist_to_reality = None # Elected vs. actual color distribution + + def __str__(self): + return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " + f"at idx_field={self._idx_field}, " + f"num_agents={self.num_agents}, num_cells={self.num_cells}, " + f"color_distribution={self.color_distribution})") + + def _set_dimensions(self, width, height, size_var): + """ + Sets the area's dimensions based on the provided width, height, and variance factor. + + This function adjusts the width and height by a random factor drawn from + the range [1 - size_var, 1 + size_var]. If size_var is zero, no variance + is applied. + + Args: + width (int): The average width of the area. + height (int): The average height of the area. + size_var (float): A variance factor applied to width and height. + Must be in [0, 1]. + + Raises: + ValueError: If size_var is not between 0 and 1. + """ + if size_var == 0: + self._width = width + self._height = height + self.width_off, self.height_off = 0, 0 + elif size_var > 1 or size_var < 0: + raise ValueError("Size variance must be between 0 and 1") + else: # Apply variance + w_var_factor = self.random.uniform(1 - size_var, 1 + size_var) + h_var_factor = self.random.uniform(1 - size_var, 1 + size_var) + self._width = int(width * w_var_factor) + self.width_off = abs(width - self._width) + self._height = int(height * h_var_factor) + self.height_off = abs(height - self._height) + + @property + def num_agents(self): + return len(self.agents) + + @property + def num_cells(self): + return self._width * self._height + + @property + def personality_distribution(self): + return self._personality_distribution + + @property + def color_distribution(self): + return self._color_distribution + + @property + def voted_ordering(self): + return self._voted_ordering + + @property + def voter_turnout(self): + return self._voter_turnout + + @property + def dist_to_reality(self): + return self._dist_to_reality + + @property + def idx_field(self): + return self._idx_field + + @idx_field.setter + def idx_field(self, pos: tuple): + """ + Sets the indexing field (cell coordinate in the grid) of the area. + + This method sets the areas indexing-field (top-left cell coordinate) + which determines which cells and agents on the grid belong to the area. + The cells and agents are added to the area's lists of cells and agents. + + Args: + pos: (x, y) representing the areas top-left coordinates. + """ + # TODO: Check - isn't it better to make sure agents are added to the area when they are created? + # TODO -- There is something wrong here!!! (Agents are not added to the areas) + if TYPE_CHECKING: # Type hint for IDEs + self.model = cast(ParticipationModel, self.model) + try: + x_val, y_val = pos + except ValueError: + raise ValueError("The idx_field must be a tuple") + # Check if the values are within the grid + if x_val < 0 or x_val >= self.model.width: + raise ValueError(f"The x={x_val} value must be within the grid") + if y_val < 0 or y_val >= self.model.height: + raise ValueError(f"The y={y_val} value must be within the grid") + x_off = self.width_off // 2 + y_off = self.height_off // 2 + # Adjusting indices with offset and ensuring they wrap around the grid + adjusted_x = (x_val + x_off) % self.model.width + adjusted_y = (y_val + y_off) % self.model.height + # Assign the cells to the area + for x_area in range(self._width): + for y_area in range(self._height): + x = (adjusted_x + x_area) % self.model.width + y = (adjusted_y + y_area) % self.model.height + cell = self.model.grid.get_cell_list_contents([(x, y)])[0] + if TYPE_CHECKING: + cell = cast(ColorCell, cell) + self.add_cell(cell) # Add the cell to the area + # Add all voting agents to the area + for agent in cell.agents: + self.add_agent(agent) + cell.add_area(self) # Add the area to the color-cell + # Mark as a border cell if true + if (x_area == 0 or y_area == 0 + or x_area == self._width - 1 + or y_area == self._height - 1): + cell.is_border_cell = True + self._idx_field = (adjusted_x, adjusted_y) + self._update_color_distribution() + self._update_personality_distribution() + + def _update_personality_distribution(self) -> None: + """ + This method calculates the areas current distribution of personalities. + """ + personalities = list(self.model.personalities) + p_counts = {str(i): 0 for i in personalities} + # Count the occurrence of each personality + for agent in self.agents: + p_counts[str(agent.personality)] += 1 + # Normalize the counts + self._personality_distribution = [p_counts[str(p)] / self.num_agents + for p in personalities] + + def add_agent(self, agent: VoteAgent) -> None: + """ + Appends an agent to the areas agents list. + + Args: + agent (VoteAgent): The agent to be added to the area. + """ + self.agents.append(agent) + + def add_cell(self, cell: ColorCell) -> None: + """ + Appends a cell to the areas cells list. + + Args: + cell (ColorCell): The agent to be added to the area. + """ + self.cells.append(cell) + + + def _conduct_election(self) -> int: + """ + Simulates the election within the area and manages rewards. + + The election process asks agents to participate, collects votes, + aggregates preferences using the model's voting rule, + and saves the elected option as the latest winning option. + Agents incur costs for participation + and may receive rewards based on the outcome. + + Returns: + int: The voter turnout in percent. Returns 0 if no agent participates. + """ + # Ask agents for participation and their votes + preference_profile = self._tally_votes() + # Check for the case that no agent participated + if preference_profile.ndim != 2: + print("Area", self.unique_id, "no one participated in the election") + return 0 # TODO: What to do in this case? Cease the simulation? + # Aggregate the preferences ⇒ returns an option ordering + aggregated = self.model.voting_rule(preference_profile) + # Save the "elected" ordering in self._voted_ordering + winning_option = aggregated[0] + self._voted_ordering = self.model.options[winning_option] + # Calculate and distribute rewards + self._distribute_rewards() + # TODO check whether the current color dist and the mutation of the + # colors is calculated and applied correctly and does not interfere + # in any way with the election process + # Statistics + n = preference_profile.shape[0] # Number agents participated + return int((n / self.num_agents) * 100) # Voter turnout in percent + + def _tally_votes(self): + """ + Gathers votes from agents who choose to (and can afford to) participate. + + Each participating agent contributes a vector of dissatisfaction values with + respect to the available options. These values are combined into a NumPy array. + + Returns: + np.ndarray: A 2D array representing the preference profiles of all + participating agents. Each row corresponds to an agent's vote. + """ + preference_profile = [] + for agent in self.agents: + model = self.model + el_costs = model.election_costs + # Give agents their (new) known fields + agent.update_known_cells(area=self) + if (agent.assets >= el_costs + and agent.ask_for_participation(area=self)): + agent.num_elections_participated += 1 + # Collect the participation fee + agent.assets = agent.assets - el_costs + # Ask the agent for her preference + preference_profile.append(agent.vote(area=self)) + # agent.vote returns an array containing dissatisfaction values + # between 0 and 1 for each option, interpretable as rank values. + return np.array(preference_profile) + + def _distribute_rewards(self) -> None: + """ + Calculates and distributes rewards (or penalties) to agents based on outcomes. + + The function measures the difference between the actual color distribution + and the elected outcome using a distance metric. It then increments or reduces + agent assets accordingly, ensuring assets do not fall below zero. + """ + model = self.model + # Calculate the distance to the real distribution using distance_func + real_color_ord = np.argsort(self.color_distribution)[::-1] # Descending + dist_func = model.distance_func + self._dist_to_reality = dist_func(real_color_ord, self.voted_ordering, + model.color_search_pairs) + # Calculate the rpa - rewards per agent (can be negative) + rpa = (0.5 - self.dist_to_reality) * model.max_reward # TODO: change this (?) + # Distribute the two types of rewards + color_search_pairs = model.color_search_pairs + for a in self.agents: + # Personality-based reward factor + p = dist_func(a.personality, real_color_ord, color_search_pairs) + # + common reward (reward_pa) for all agents + a.assets = int(a.assets + (0.5 - p) * model.max_reward + rpa) + if a.assets < 0: # Correct wealth if it fell below zero + a.assets = 0 + + def _update_color_distribution(self) -> None: + """ + Recalculates the area's color distribution and updates the _color_distribution attribute. + + This method counts how many cells of each color belong to the area, normalizes + the counts by the total number of cells, and stores the result internally. + """ + color_count = {} + for cell in self.cells: + color = cell.color + color_count[color] = color_count.get(color, 0) + 1 + for color in range(self.model.num_colors): + dist_val = color_count.get(color, 0) / self.num_cells # Float + self._color_distribution[color] = dist_val + + def _filter_cells(self, cell_list): + """ + This method is used to filter a given list of cells to return only + those which are within the area. + + Args: + cell_list: A list of ColorCell cells to be filtered. + + Returns: + A list of ColorCell cells that are within the area. + """ + cell_set = set(self.cells) + return [c for c in cell_list if c in cell_set] + + def step(self) -> None: + """ + Run one step of the simulation. + + Conduct an election in the area, + mutate the cells' colors according to the election outcome + and update the color distribution of the area. + """ + self._voter_turnout = self._conduct_election() # The main election logic! + if self.voter_turnout == 0: + return # TODO: What to do if no agent participated..? + + # Mutate colors in cells + # Take some number of cells to mutate (i.e., 5 %) + n_to_mutate = int(self.model.mu * self.num_cells) + # TODO/Idea: What if the voter_turnout determines the mutation rate? + # randomly select x cells + cells_to_mutate = self.random.sample(self.cells, n_to_mutate) + # Use voted ordering to pick colors in descending order + # To pre-select colors for all cells to mutate + # TODO: Think about this: should we take local color-structure + # into account - like in color patches - to avoid colors mutating into + # very random structures? # Middendorf + colors = np.random.choice(self.voted_ordering, size=n_to_mutate, + p=self.model.color_probs) + # Assign the newly selected colors to the cells + for cell, color in zip(cells_to_mutate, colors): + cell.color = color + # Important: Update the color distribution (because colors changed) + self._update_color_distribution() + + +def compute_collective_assets(model): + sum_assets = sum(agent.assets for agent in model.voting_agents) + return sum_assets + + +def compute_gini_index(model): + # TODO: separate to be able to calculate it zone-wise as well as globally + # TODO: Unit-test this function + # Extract the list of assets for all agents + assets = [agent.assets for agent in model.voting_agents] + n = len(assets) + if n == 0: + return 0 # No agents, no inequality + # Sort the assets + sorted_assets = sorted(assets) + # Calculate the Gini Index + cumulative_sum = sum((i + 1) * sorted_assets[i] for i in range(n)) + total_sum = sum(sorted_assets) + if total_sum == 0: + return 0 # No agent has any assets => view as total equality + gini_index = (2 * cumulative_sum) / (n * total_sum) - (n + 1) / n + return int(gini_index * 100) # Return in "percent" (0-100) + + +def get_voter_turnout(model): + voter_turnout_sum = 0 + num_areas = model.num_areas + for area in model.areas: + voter_turnout_sum += area.voter_turnout + if not model.global_area is None: + # TODO: Check the correctness and whether it makes sense to include the global area here + voter_turnout_sum += model.global_area.voter_turnout + num_areas += 1 + elif num_areas == 0: + return 0 + return voter_turnout_sum / num_areas + + +def create_personality(num_colors): + """ NOT USED + Creates and returns a list of 'personalities' that are to be assigned + to agents. Each personality is a NumPy array of length 'num_colors' + but it is not a full ranking vector since the number of colors influencing + the personality is limited. The array is therefore not normalized. + White (color 0) is never part of a personality. + + Args: + num_colors: The number of colors in the simulation. + """ + # TODO add unit tests for this function + personality = np.random.randint(0, 100, num_colors) # TODO low=0 or 1? + # Save the sum to "normalize" the values later (no real normalization) + sum_value = sum(personality) + 1e-8 # To avoid division by zero + # Select only as many features as needed (num_personality_colors) + # to_del = num_colors - num_personality_colors # How many to be deleted + # if to_del > 0: + # # The 'replace=False' ensures that indexes aren't chosen twice + # indices = np.random.choice(num_colors, to_del, replace=False) + # personality[indices] = 0 # 'Delete' the values + personality[0] = 0 # White is never part of the personality + # "Normalize" the rest of the values + personality = personality / sum_value + return personality + + +def get_color_distribution_function(color): + """ + This method returns a lambda function for the color distribution chart. + + Args: + color: The color number (used as index). + """ + return lambda m: m.av_area_color_dst[color] + + +def get_area_voter_turnout(area): + if isinstance(area, Area): + return area.voter_turnout + return None + +def get_area_dist_to_reality(area): + if isinstance(area, Area): + return area.dist_to_reality + return None + +def get_area_color_distribution(area): + if isinstance(area, Area): + return area.color_distribution.tolist() + return None + +def get_election_results(area): + """ + Returns the voted ordering as a list or None if not available. + + Returns: + List of voted ordering or None. + """ + if isinstance(area, Area) and area.voted_ordering is not None: + return area.voted_ordering.tolist() + return None + + +class CustomScheduler(mesa.time.BaseScheduler): + def step(self): + """ + Execute the step function for all area- and cell-agents by type, + first for Areas then for ColorCells. + """ + model = self.model + if TYPE_CHECKING: + model = cast(ParticipationModel, model) + # Step through Area agents first (and in "random" order) + # TODO think about randomization process + model.random.shuffle(model.areas) + for area in model.areas: + area.step() + # TODO: add global election? + self.steps += 1 + self.time += 1 + + +class ParticipationModel(mesa.Model): + """ + The ParticipationModel class provides a base environment for + multi-agent simulations within a grid-based world (split into territories) + that reacts dynamically to frequently held collective decision-making + processes ("elections"). It incorporates voting agents with personalities, + color cells (grid fields), and areas (election territories). This model is + designed to analyze different voting rules and their impact. + + This class provides mechanisms for creating and managing cells, agents, + and areas, along with data collection for analysis. Colors in the model + mutate depending on a predefined mutation rate and are influenced by + elections. Agents interact based on their personalities, knowledge, and + experiences. + + Attributes: + grid (mesa.space.SingleGrid): Grid representing the environment + with a single occupancy per cell (the color). + height (int): The height of the grid. + width (int): The width of the grid. + colors (ndarray): Array containing the unique color identifiers. + voting_rule (Callable): A function defining the social welfare + function to aggregate agent preferences. This callable typically + takes agent rankings as input and returns a single aggregate result. + distance_func (Callable): A function used to calculate a + distance metric when comparing rankings. It takes two rankings + and returns a numeric distance score. + mu (float): Mutation rate; the probability of each color cell to mutate + after an elections. + color_probs (ndarray): + Probabilities used to determine individual color mutation outcomes. + options (ndarray): Matrix (array of arrays) where each subarray + represents an option (color-ranking) available to agents. + option_vec (ndarray): Array holding the indices of the available options + for computational efficiency. + color_cells (list[ColorCell]): List of all color cells. + Initialized during the model setup. + voting_agents (list[VoteAgent]): List of all voting agents. + Initialized during the model setup. + personalities (list): List of unique personalities available for agents. + personality_distribution (ndarray): The (global) probability + distribution of personalities among all agents. + areas (list[Area]): List of areas (regions or territories within the + grid) in which elections take place. Initialized during model setup. + global_area (Area): The area encompassing the entire grid. + av_area_height (int): Average height of areas in the simulation. + av_area_width (int): Average width of areas created in the simulation. + area_size_variance (float): Variance in area sizes to introduce + non-uniformity among election territories. + common_assets (int): Total resources to be distributed among all agents. + av_area_color_dst (ndarray): Current (area)-average color distribution. + election_costs (float): Cost associated with participating in elections. + max_reward (float): Maximum reward possible for an agent each election. + known_cells (int): Number of cells each agent knows the color of. + datacollector (mesa.DataCollector): A tool for collecting data + (metrics and statistics) at each simulation step. + scheduler (CustomScheduler): The scheduler responsible for executing the + step function. + draw_borders (bool): Only for visualization (no effect on simulation). + _preset_color_dst (ndarray): A predefined global color distribution + (set randomly) that affects cell initialization globally. + """ + + def __init__(self, height, width, num_agents, num_colors, num_personalities, + mu, election_impact_on_mutation, common_assets, known_cells, + num_areas, av_area_height, av_area_width, area_size_variance, + patch_power, color_patches_steps, draw_borders, heterogeneity, + rule_idx, distance_idx, election_costs, max_reward, + show_area_stats): + super().__init__() + # TODO clean up class (public/private variables) + self.height = height + self.width = width + self.colors = np.arange(num_colors) + # Create a scheduler that goes through areas first then color cells + self.scheduler = CustomScheduler(self) + # The grid + # SingleGrid enforces at most one agent per cell; + # MultiGrid allows multiple agents to be in the same cell. + self.grid = mesa.space.SingleGrid(height=height, width=width, torus=True) + # Random bias factors that affect the initial color distribution + self._vertical_bias = self.random.uniform(0, 1) + self._horizontal_bias = self.random.uniform(0, 1) + self.draw_borders = draw_borders + # Color distribution (global) + self._preset_color_dst = self.create_color_distribution(heterogeneity) + self._av_area_color_dst = self._preset_color_dst + # Elections + self.election_costs = election_costs + self.max_reward = max_reward + self.known_cells = known_cells # Integer + self.voting_rule = social_welfare_functions[rule_idx] + self.distance_func = distance_functions[distance_idx] + self.options = self.create_all_options(num_colors) + # Simulation variables + self.mu = mu # Mutation rate for the color cells (0.1 = 10 % mutate) + self.common_assets = common_assets + # Election impact factor on color mutation through a probability array + self.color_probs = self.init_color_probs(election_impact_on_mutation) + # Create search pairs once for faster iterations when comparing rankings + self.search_pairs = list(combinations(range(0, self.options.size), 2)) # TODO check if correct! + self.option_vec = np.arange(self.options.size) # Also to speed up + self.color_search_pairs = list(combinations(range(0, num_colors), 2)) + # Create color cells + self.color_cells: List[Optional[ColorCell]] = [None] * (height * width) + self._initialize_color_cells() + # Create agents + # TODO: Where do the agents get there known cells from and how!? + self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents + self.personalities = self.create_personalities(num_personalities) + self.personality_distribution = self.pers_dist(num_personalities) + self.initialize_voting_agents() + # Area variables + self.global_area = self.initialize_global_area() # TODO create bool variable to make this optional + self.areas: List[Optional[Area]] = [None] * num_areas + self.av_area_height = av_area_height + self.av_area_width = av_area_width + self.area_size_variance = area_size_variance + # Adjust the color pattern to make it less random (see color patches) + self.adjust_color_pattern(color_patches_steps, patch_power) + # Create areas + self.initialize_all_areas() + # Data collector + self.datacollector = self.initialize_datacollector() + # Collect initial data + self.datacollector.collect(self) + # Statistics + self.show_area_stats = show_area_stats + + @property + def num_colors(self): + return len(self.colors) + + @property + def av_area_color_dst(self): + return self._av_area_color_dst + + @av_area_color_dst.setter + def av_area_color_dst(self, value): + self._av_area_color_dst = value + + @property + def num_agents(self): + return len(self.voting_agents) + + @property + def num_areas(self): + return len(self.areas) + + @property + def preset_color_dst(self): + return len(self._preset_color_dst) + + def _initialize_color_cells(self): + """ + This method initializes a color cells for each cell in the model's grid. + """ + # Create a color cell for each cell in the grid + for unique_id, (_, (row, col)) in enumerate(self.grid.coord_iter()): + # The colors are chosen by a predefined color distribution + color = self.color_by_dst(self._preset_color_dst) + # Create the cell + cell = ColorCell(unique_id, self, (row, col), color) + # Add it to the grid + self.grid.place_agent(cell, (row, col)) + # Add the color cell to the scheduler + #self.scheduler.add(cell) # TODO: check speed diffs using this.. + # And to the 'model.color_cells' list (for faster access) + self.color_cells[unique_id] = cell # TODO: check if its not better to simply use the grid when finally changing the grid type to SingleGrid + + def initialize_voting_agents(self): + """ + This method initializes as many voting agents as set in the model with + a randomly chosen personality. It places them randomly on the grid. + It also ensures that each agent is assigned to the color cell it is + standing on. + """ + dist = self.personality_distribution + rng = np.random.default_rng() + assets = self.common_assets // self.num_agents + for a_id in range(self.num_agents): + # Get a random position + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + personality = rng.choice(self.personalities, p=dist) + # Create agent without appending (add to the pre-defined list) + agent = VoteAgent(a_id, self, (x, y), personality, + assets=assets, add=False) # TODO: initial assets?! + self.voting_agents[a_id] = agent # Add using the index (faster) + # Add the agent to the grid by placing it on a cell + cell = self.grid.get_cell_list_contents([(x, y)])[0] + if TYPE_CHECKING: + cell = cast(ColorCell, cell) + cell.add_agent(agent) + + def init_color_probs(self, election_impact): + """ + This method initializes a probability array for the mutation of colors. + The probabilities reflect the election outcome with some impact factor. + + Args: + election_impact (float): The impact the election has on the mutation. + """ + p = (np.arange(self.num_colors, 0, -1)) ** election_impact + # Normalize + p = p / sum(p) + return p + + def initialize_area(self, a_id: int, x_coord, y_coord): + """ + This method initializes one area in the models' grid. + """ + area = Area(a_id, self, self.av_area_height, self.av_area_width, + self.area_size_variance) + # Place the area in the grid using its indexing field + # this adds the corresponding color cells and voting agents to the area + area.idx_field = (x_coord, y_coord) + # Save in the models' areas-list + self.areas[a_id] = area + + def initialize_all_areas(self) -> None: + """ + Initializes all areas on the grid in the model. + + This method divides the grid into approximately evenly distributed areas, + ensuring that the areas are spaced as uniformly as possible based + on the grid dimensions and the average area size specified by + `av_area_width` and `av_area_height`. + + The grid may contain more or fewer areas than an exact square + grid arrangement due to `num_areas` not always being a perfect square. + If the number of areas is not a perfect square, the remaining areas + are placed randomly on the grid to ensure that `num_areas` + areas are initialized. + + Args: + None. + + Returns: + None. initializes `num_areas` and places them directly on the grid. + + Raises: + None, but if `self.num_areas == 0`, the method exits early. + + Example: + - Given `num_areas = 4` and `grid.width = grid.height = 10`, + this method might initialize areas with approximate distances + to maximize uniform distribution (like a 2x2 grid). + - For `num_areas = 5`, four areas will be initialized evenly, and + the fifth will be placed randomly due to the uneven distribution. + """ + if self.num_areas == 0: + return + # Calculate the number of areas in each direction + roo_apx = round(sqrt(self.num_areas)) + nr_areas_x = self.grid.width // self.av_area_width + nr_areas_y = self.grid.width // self.av_area_height + # Calculate the distance between the areas + area_x_dist = self.grid.width // roo_apx + area_y_dist = self.grid.height // roo_apx + print(f"roo_apx: {roo_apx}, nr_areas_x: {nr_areas_x}, " + f"nr_areas_y: {nr_areas_y}, area_x_dist: {area_x_dist}, " + f"area_y_dist: {area_y_dist}") # TODO rm print + x_coords = range(0, self.grid.width, area_x_dist) + y_coords = range(0, self.grid.height, area_y_dist) + # Add additional areas if necessary (num_areas not a square number) + additional_x, additional_y = [], [] + missing = self.num_areas - len(x_coords) * len(y_coords) + for _ in range(missing): + additional_x.append(self.random.randrange(self.grid.width)) + additional_y.append(self.random.randrange(self.grid.height)) + # Create the area's ids + a_ids = iter(range(self.num_areas)) + # Initialize all areas + for x_coord in x_coords: + for y_coord in y_coords: + a_id = next(a_ids, -1) + if a_id == -1: + break + self.initialize_area(a_id, x_coord, y_coord) + for x_coord, y_coord in zip(additional_x, additional_y): + self.initialize_area(next(a_ids), x_coord, y_coord) + + + def initialize_global_area(self): + """ + This method initializes the global area spanning the whole grid. + + Returns: + Area: The global area (with unique_id set to -1 and idx to (0, 0)). + """ + global_area = Area(-1, self, self.height, self.width, 0) + # Place the area in the grid using its indexing field + # this adds the corresponding color cells and voting agents to the area + global_area.idx_field = (0, 0) + return global_area + + + def create_personalities(self, n: int): + """ + Creates n unique "personalities," where a "personality" is a specific + permutation of self.num_colors color indices. + + Args: + n (int): Number of unique personalities to generate. + + Returns: + np.ndarray: Array of shape `(n, num_colors)`. + + Raises: + ValueError: If `n` exceeds the possible unique permutations. + + Example: + for n=2 and self.num_colors=3, the function could return: + + [[1, 0, 2], + [2, 1, 0]] + """ + # p_colors = range(1, self.num_colors) # Personalities exclude white + max_permutations = np.math.factorial(self.num_colors) + if n > max_permutations or n < 1: + raise ValueError(f"Cannot generate {n} unique personalities: " + f"only {max_permutations} unique ones exist.") + selected_permutations = set() + while len(selected_permutations) < n: + # Sample a permutation lazily and add it to the set + perm = tuple(self.random.sample(range(self.num_colors), + self.num_colors)) + selected_permutations.add(perm) + + return np.array(list(selected_permutations)) + + + def initialize_datacollector(self): + color_data = {f"Color {i}": get_color_distribution_function(i) for i in + range(self.num_colors)} + return mesa.DataCollector( + model_reporters={ + "Collective assets": compute_collective_assets, + "Gini Index (0-100)": compute_gini_index, + "Voter turnout globally (in percent)": get_voter_turnout, + **color_data + }, + agent_reporters={ + # "Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + # "Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, + # + #"VoterTurnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + "VoterTurnout": get_area_voter_turnout, + "DistToReality": get_area_dist_to_reality, + "ColorDistribution": get_area_color_distribution, + "ElectionResults": get_election_results, + # "Personality-Based Reward": get_area_personality_based_reward, + # "Gini Index": get_area_gini_index + }, + # tables={ + # "AreaData": ["Step", "AreaID", "ColorDistribution", + # "VoterTurnout"] + # } + ) + + + def step(self): + """ + Advance the model by one step. + """ + + # Conduct elections in the areas + # and then mutate the color cells according to election outcomes + self.scheduler.step() + # Update the global color distribution + self.update_av_area_color_dst() + # Collect data for monitoring and data analysis + self.datacollector.collect(self) + + + def adjust_color_pattern(self, color_patches_steps: int, patch_power: float): + """Adjusting the color pattern to make it less predictable. + + Args: + color_patches_steps: How often to run the color-patches step. + patch_power: The power of the patching (like a radius of impact). + """ + cells = self.color_cells + for _ in range(color_patches_steps): + print(f"Color adjustment step {_}") + self.random.shuffle(cells) + for cell in cells: + most_common_color = self.color_patches(cell, patch_power) + cell.color = most_common_color + + + def create_color_distribution(self, heterogeneity: float): + """ + This method is used to create a color distribution that has a bias + according to the given heterogeneity factor. + + Args: + heterogeneity (float): Factor used as sigma in 'random.gauss'. + """ + colors = range(self.num_colors) + values = [abs(self.random.gauss(1, heterogeneity)) for _ in colors] + # Normalize (with float division) + total = sum(values) + dst_array = [value / total for value in values] + return dst_array + + + def color_patches(self, cell: ColorCell, patch_power: float): + """ + This method is used to create a less random initial color distribution + using a similar logic to the color patches model. + It uses a (normalized) bias coordinate to center the impact of the + color patches structures impact around. + + Args: + cell: The cell that may change its color accordingly + patch_power: Like a radius of impact around the bias point. + + Returns: + int: The consensus color or the cell's own color if no consensus. + """ + # Calculate the normalized position of the cell + normalized_x = cell.row / self.height + normalized_y = cell.col / self.width + # Calculate the distance of the cell to the bias point + bias_factor = (abs(normalized_x - self._horizontal_bias) + + abs(normalized_y - self._vertical_bias)) + # The closer the cell to the bias-point, the less often it is + # to be replaced by a color chosen from the initial distribution: + if abs(self.random.gauss(0, patch_power)) < bias_factor: + return self.color_by_dst(self._preset_color_dst) + # Otherwise, apply the color patches logic + neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), + moore=True, + include_center=False) + color_counts = {} # Count neighbors' colors + for neighbor in neighbor_cells: + if isinstance(neighbor, ColorCell): + color = neighbor.color + color_counts[color] = color_counts.get(color, 0) + 1 + if color_counts: + max_count = max(color_counts.values()) + most_common_colors = [color for color, count in color_counts.items() + if count == max_count] + return self.random.choice(most_common_colors) + return cell.color # Return the cell's own color if no consensus + + + def update_av_area_color_dst(self): + """ + This method updates the av_area_color_dst attribute of the model. + Beware: On overlapping areas, cells are counted several times. + """ + sums = np.zeros(self.num_colors) + for area in self.areas: + sums += area.color_distribution + # Return the average color distributions + self.av_area_color_dst = sums / self.num_areas + + + @staticmethod + def pers_dist(size): + """ + This method creates a normalized normal distribution array for picking + and depicting the distribution of personalities in the model. + + Args: + size: The mean value of the normal distribution. + + Returns: + np.array: Normalized (sum is one) array mimicking a gaussian curve. + """ + # Generate a normal distribution + rng = np.random.default_rng() + dist = rng.normal(0, 1, size) + dist.sort() # To create a gaussian curve like array + dist = np.abs(dist) # Flip negative values "up" + # Normalize the distribution to sum to one + dist /= dist.sum() + return dist + + + @staticmethod + def create_all_options(n: int, include_ties=False): + """ + Creates a matrix (an array of all possible ranking vectors), + if specified including ties. + Rank values start from 0. + + Args: + n (int): The number of items to rank (number of colors in our case) + include_ties (bool): If True, rankings include ties. + + Returns: + np.array: A matrix containing all possible ranking vectors. + """ + if include_ties: + # Create all possible combinations and sort out invalid rankings + # i.e. [1, 1, 1] or [1, 2, 2] aren't valid as no option is ranked first. + r = np.array([np.array(comb) for comb in product(range(n), repeat=n) + if set(range(max(comb))).issubset(comb)]) + else: + r = np.array([np.array(p) for p in permutations(range(n))]) + return r + + @staticmethod + def color_by_dst(color_distribution: np.array) -> int: + """ + Selects a color (int) from range(len(color_distribution)) + based on the given color_distribution array, where each entry represents + the probability of selecting that index. + + Args: + color_distribution: Array determining the selection probabilities. + + Returns: + int: The selected index based on the given probabilities. + + Example: + color_distribution = [0.2, 0.3, 0.5] + Color 1 will be selected with a probability of 0.3. + """ + if abs(sum(color_distribution) -1) > 1e-8: + raise ValueError("The color_distribution array must sum to 1.") + r = np.random.random() # Random float between 0 and 1 + cumulative_sum = 0.0 + for color_idx, prob in enumerate(color_distribution): + if prob < 0: + raise ValueError("color_distribution contains negative value.") + cumulative_sum += prob + if r < cumulative_sum: # Compare r against the cumulative probability + return color_idx + + # This point should never be reached. + raise ValueError("Unexpected error in color_distribution.") diff --git a/democracy_sim/run.py b/democracy_sim/run.py new file mode 100644 index 0000000..2e6f0fa --- /dev/null +++ b/democracy_sim/run.py @@ -0,0 +1,43 @@ +from mesa.visualization.ModularVisualization import ModularServer +from democracy_sim.participation_model import ParticipationModel +from democracy_sim.model_setup import (model_params as params, canvas_element, + voter_turnout, wealth_chart, + color_distribution_chart) +from democracy_sim.visualisation_elements import * + + +class CustomModularServer(ModularServer): + """ This is to prevent double initialization of the model. + For some reason, the Server resets the model once on initialization + and again on server launch. + """ + def __init__(self, model_cls, visualization_elements, + name="Mesa Model", model_params=None, port=None): + self.initialized = False + super().__init__(model_cls, visualization_elements, name, model_params, + port) + + def reset_model(self): + if not self.initialized: + self.initialized = True + return # This ensures that the first reset-call is ignored + super().reset_model() + + +personality_distribution = PersonalityDistribution() +area_stats = AreaStats() +vto_areas = VoterTurnoutElement() +area_personality_dists = AreaPersonalityDists() + +server = CustomModularServer( + model_cls=ParticipationModel, + visualization_elements=[canvas_element, color_distribution_chart, + wealth_chart, voter_turnout, vto_areas, + personality_distribution, area_stats, + area_personality_dists], + name="DemocracySim", + model_params=params, +) + +if __name__ == "__main__": + server.launch(open_browser=True) diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py new file mode 100644 index 0000000..ff933f1 --- /dev/null +++ b/democracy_sim/social_welfare_functions.py @@ -0,0 +1,194 @@ +""" +Here we define the social welfare functions that can be used in the simulation. +Beware: +We assume the preference relation in the following (unconventional) way +on purpose. +pref_table: numpy matrix with one row per agent, column number is option number + and the values (each in [0,1]) are normalized ranking values. +The purpose of this is to allow for non-discrete and non-equidistant rankings. +""" +from typing import TYPE_CHECKING + +import numpy as np + + +def complete_ranking(ranking: np.array, num_options: int): + """ + This function adds options that are not in the ranking in a random order. + + Args: + ranking: The ranking to be completed with the missing options. + num_options: The total number of options. + + Returns: + The completed ranking. + """ + all_options = np.arange(num_options) + mask = np.isin(all_options, ranking, invert=True) + non_included_options = all_options[mask] + np.random.shuffle(non_included_options) + return np.concatenate((ranking, non_included_options)) + +def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): + """ + This function prepares the preference table for majority rule such that + it handles ties in the voters' preferences. + Because majority rule cannot usually deal with ties. + The tie breaking is randomized to ensure anonymity and neutrality. + + Args: + pref_table: The agent's preferences. + noise_factor: Influences the amount of noise to be added + + Returns: + The preference table without ties for first choices. + """ + # Add some random noise to break ties (based on the variances) + variances = np.var(pref_table, axis=1) + # If variances are zero, all values are equal, then select a random option + mask = (variances == 0) + # Split + pref_tab_var_zero = pref_table[mask] + pref_tab_var_non_zero = pref_table[~mask] + n, m = pref_tab_var_non_zero.shape + + # Set exactly one option to 0 (the first choice) and the rest to 1/(m-1) + pref_tab_var_zero.fill(1 / (m-1)) + for i in range(pref_tab_var_zero.shape[0]): + rand_option = np.random.randint(0, m) + pref_tab_var_zero[i, rand_option] = 0 + # On the non-zero part, add some noise to the values to break ties + non_zero_variances = variances[~mask] + # Generate noise based on the variances + noise_eps = non_zero_variances / noise_factor + noise = np.random.uniform(-noise_eps[:, np.newaxis], + noise_eps[:, np.newaxis], (n, m)) + # `noise_eps[:, np.newaxis]` reshapes noise_eps from shape `(n,)` to (n, 1) + pref_tab_var_non_zero += noise + + # Put the parts back together + return np.concatenate((pref_tab_var_non_zero, pref_tab_var_zero)) + +def majority_rule(pref_table): + """ + This function implements the majority rule social welfare function. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: its not a pref. relation) + """ + n, m = pref_table.shape # n agents, m options + # Break ties if they exist + pref_table = run_tie_breaking_preparation_for_majority(pref_table) + # Count how often an option is ranked first (indexes of the min values) + first_choices = np.argmin(pref_table, axis=1) + # To avoid bias toward voters of low indices in the counting, we shuffle + np.random.shuffle(first_choices) # (crucial when counting shows ties later) + first_choice_counts = {} + for choice in first_choices: + first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 + # Get the ranking from the counts + option_count_pairs = list(first_choice_counts.items()) + option_count_pairs.sort(key=lambda x: x[1], reverse=True) + ranking = np.array([pair[0] for pair in option_count_pairs]) + # Faster: + # count_pairs = np.array(option_count_pairs) + # # Sort the array by the second element in descending order + # sorted_indices = np.argsort(count_pairs[:, 1])[::-1] + # count_pairs = count_pairs[sorted_indices] + # ranking = count_pairs[:, 0].astype(int) + # Fill up the ranking with the missing options (if any) + if ranking.shape[0] < m: + ranking = complete_ranking(ranking, m) + return ranking + +def preprocessing_for_approval(pref_table, threshold=None): + """ + This function prepares the preference table for approval voting + by interpreting every value below a threshold as an approval. + Beware: the values are distance/disagreement => smaller = less disagreement + The standard threshold is 1/m (m = number of options). + The reasoning is that if the preferences are normalized, + 1/m ensures the threshold to be proportionate to the number of options. + It also ensures that, on average, half of the options will be approved. + The actual number of approved options, however, + can still vary depending on the specific values in the preference table. + + Args: + pref_table: The agent's preferences. + threshold: The threshold for approval. + + Returns: + The preference table with the options approved or not. + """ + if threshold is None: + threshold = 1 / pref_table.shape[1] + return (pref_table < threshold).astype(int) + + +def imp_prepr_for_approval(pref_table): + """ + This is just like preprocessing_for_approval, but more intelligent. + It sets the threshold depending on the variances. + + Args: + pref_table: The agent's preferences. + + Returns: + The preference table with the options approved or not. + """ + # The threshold is set according to the variances + threshold = np.mean(pref_table, axis=1) - np.var(pref_table, axis=1) + if TYPE_CHECKING: + assert isinstance(threshold, np.ndarray) + return (pref_table < threshold.reshape(-1, 1)).astype(int) + + +def approval_voting(pref_table): + """ TODO: does this take the meaning of the values into account? value = dist. = disagreement ! + This function implements the approval voting social welfare function. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: not a pref. relation). + """ + pref_table = imp_prepr_for_approval(pref_table) + # Count how often each option is approved + approval_counts = np.sum(pref_table, axis=0) + # Add noise to break ties TODO check for bias + eps = 1e-4 + noise = np.random.uniform(-eps, eps, len(approval_counts)) + #option_count_pairs = list(enumerate(approval_counts + noise)) + #option_count_pairs.sort(key=lambda x: x[1], reverse=True) + #return [pair[0] for pair in option_count_pairs] + return np.argsort(-(approval_counts + noise)) # TODO: check order (ascending/descending) - np.argsort sorts ascending + + +def continuous_score_voting(pref_table): + """ + TODO: integrate and test + This function implements a continuous score voting based on disagreement. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: not a pref. relation). + """ + # Sum up the disagreement for each option + scores = np.sum(pref_table, axis=0) + # Add noise to break ties + eps = 1e-8 + noise = np.random.uniform(-eps, eps, len(scores)) + ranking = np.argsort(-(scores + noise)) + return ranking diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py new file mode 100644 index 0000000..bbc4bdf --- /dev/null +++ b/democracy_sim/visualisation_elements.py @@ -0,0 +1,244 @@ +import matplotlib.pyplot as plt +from typing import TYPE_CHECKING, cast +from mesa.visualization import TextElement +import matplotlib.patches as patches +from model_setup import _COLORS +import base64 +import math +import io + +_COLORS[0] = "LightGray" + +def save_plot_to_base64(fig): + buf = io.BytesIO() + plt.savefig(buf, format='png') + plt.close(fig) + buf.seek(0) + image_base64 = base64.b64encode(buf.read()).decode('utf-8') + buf.close() + return f'' + + +class AreaStats(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.scheduler.steps + if not model.show_area_stats or step == 0: + return "" + + # Fetch data from the datacollector + data = model.datacollector.get_agent_vars_dataframe() + color_distribution = data['ColorDistribution'].dropna() + dist_to_reality = data['DistToReality'].dropna() + election_results = data['ElectionResults'].dropna() + + # Extract unique area IDs (excluding the global area) + area_ids = color_distribution.index.get_level_values(1).unique()[1:] + num_colors = len(color_distribution.iloc[0]) + num_areas = len(area_ids) + + # Create subplots with two columns (two plots per area). + fig, axes = plt.subplots(nrows=num_areas, ncols=2, + figsize=(8, 4 * num_areas), sharex=True) + + for area_id in area_ids: + row = area_id + # Left plot: distance to reality value and color distribution + ax1 = axes[row, 0] + area_data = color_distribution.xs(area_id, level=1) + a_data = dist_to_reality.xs(area_id, level=1) + ax1.plot(a_data.index, a_data.values, color='Black', linestyle='--') + for color_idx in range(num_colors): + color_data = area_data.apply(lambda x: x[color_idx]) + ax1.plot(color_data.index, color_data.values, + color=_COLORS[color_idx]) + ax1.set_title(f'Area {area_id} \n' + f'--- deviation from voted distribution') + ax1.set_xlabel('Step') + ax1.set_ylabel('Color Distribution') + + # Right plot: election result + ax2 = axes[row, 1] + area_data = election_results.xs(area_id, level=1) + for color_id in range(num_colors): + color_data = area_data.apply(lambda x: list(x).index( + color_id) if color_id in x else None) + ax2.plot(color_data.index, color_data.values, marker='o', + label=f'Color {color_id}', color=_COLORS[color_id], + linewidth=0.2) + ax2.set_title(f'Area {area_id} \n') + ax2.set_xlabel('Step') + ax2.set_ylabel('Elected ranking (rank values)') + ax2.invert_yaxis() + + plt.tight_layout() + combined_plot = save_plot_to_base64(fig) + + return combined_plot + + +class PersonalityDistribution(TextElement): + + def __init__(self): + super().__init__() + self.personality_distribution = None + self.pers_dist_plot = None + + def create_once(self, model): + if TYPE_CHECKING: + model = cast('ParticipationModel', model) + # Fetch data + dists = model.personality_distribution + personalities = model.personalities + num_personalities = personalities.shape[0] + num_agents = model.num_agents + colors = _COLORS[:model.num_colors] + num_colors = len(personalities[0]) + + fig, ax = plt.subplots(figsize=(6, 4)) + heights = dists * num_agents + bars = ax.bar(range(num_personalities), heights, width=0.6) + + for bar, personality in zip(bars, personalities): + height = bar.get_height() + width = bar.get_width() + + for i, color_idx in enumerate(personality): + rect_width = width / num_colors + coords = (bar.get_x() + i * rect_width, 0) + rect = patches.Rectangle(coords, rect_width, height, + color=colors[color_idx]) + ax.add_patch(rect) + + ax.set_xlabel('"Personality" ID') + ax.set_ylabel('Number of Agents') + ax.set_title('Global distribution of personalities among agents') + + plt.tight_layout() + self.pers_dist_plot = save_plot_to_base64(fig) + + def render(self, model): + # Only create a new plot at the start of a simulation + if model.scheduler.steps == 0: + self.create_once(model) + return self.pers_dist_plot + + +class VoterTurnoutElement(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.scheduler.steps + if not model.show_area_stats or step == 0: + return "" + # Fetch data from the datacollector + data = model.datacollector.get_agent_vars_dataframe() + voter_turnout = data['VoterTurnout'].dropna() + + # Extract unique area IDs + area_ids = voter_turnout.index.get_level_values(1).unique() + + # Create a single plot + fig, ax = plt.subplots(figsize=(8, 6)) + + for i, area_id in enumerate(area_ids): + area_data = voter_turnout.xs(area_id, level=1) + if i < 10: + line_style = '-' + elif i < 20: + line_style = ':' + else: + line_style = '--' + ax.plot(area_data.index, area_data.values, label=f'Area {area_id}', + linestyle=line_style) + ax.set_title('Voter Turnout by Area Over Time') + ax.set_xlabel('Step') + ax.set_ylabel('Voter Turnout (%)') + ax.legend() + + return save_plot_to_base64(fig) + + +class MatplotlibElement(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.scheduler.steps + if not model.show_area_stats or step == 0: + return "" + # Fetch data from the datacollector + data = model.datacollector.get_model_vars_dataframe() + collective_assets = data["Collective assets"] + + # Create a plot + fig, ax = plt.subplots() + ax.plot(collective_assets, label="Collective assets") + ax.set_title("Collective Assets Over Time") + ax.set_xlabel("Time") + ax.set_ylabel("Collective Assets") + ax.legend() + + return save_plot_to_base64(fig) + +class StepsTextElement(TextElement): + def render(self, model): + step = model.scheduler.steps + # TODO clean up + first_agents = [str(a) for a in model.voting_agents[:5]] + text = (f"Step: {step} | cells: {len(model.color_cells)} | " + f"areas: {len(model.areas)} | First 5 voters of " + f"{len(model.voting_agents)}: {first_agents}") + return text + + +class AreaPersonalityDists(TextElement): + + def __init__(self): + super().__init__() + self.personality_distributions = None + self.areas_pers_dist_plot = None + + def create_once(self, model): + if TYPE_CHECKING: + model = cast('ParticipationModel', model) + + colors = _COLORS[:model.num_colors] + personalities = model.personalities + num_colors = len(personalities[0]) + num_personalities = personalities.shape[0] + + # Create subplots within a single figure + num_areas = len(model.areas) + num_cols = math.ceil(math.sqrt(num_areas)) + num_rows = math.ceil(num_areas / num_cols) + fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(8, 8), sharex=True) + for ax, area in zip(axes.flatten(), model.areas): + # Fetch data + p_dist = area.personality_distribution + num_agents = area.num_agents + # Subplot + heights = [int(val * num_agents) for val in p_dist] + bars = ax.bar(range(num_personalities), heights, color='skyblue') + + for bar, personality in zip(bars, personalities): + height = bar.get_height() + width = bar.get_width() + + for i, color_idx in enumerate(personality): + rect_width = width / num_colors + coords = (bar.get_x() + i * rect_width, height) + rect = patches.Rectangle(coords, rect_width, 2, + color=colors[color_idx]) + ax.add_patch(rect) + + ax.set_xlabel('"Personality" ID') + ax.set_ylabel('Number of Agents') + ax.set_title(f'Area {area.unique_id}') + + plt.tight_layout() + self.areas_pers_dist_plot = save_plot_to_base64(fig) + + def render(self, model): + # Only create a new plot at the start of a simulation + if model.scheduler.steps == 0: + self.create_once(model) + return self.areas_pers_dist_plot diff --git a/docs/de/QA.md b/docs/de/QA.md new file mode 100644 index 0000000..f3cbc1e --- /dev/null +++ b/docs/de/QA.md @@ -0,0 +1,91 @@ +### Warum ist die Granularität so grob, bzw. die Komplexität so niedrig? + +Die Granularität ist weit weg von der Realität. +Das ist Absicht und hat vor allem zwei Gründe. +Zum einen ist die Arbeit noch sehr grundlegend, +weil es in der Literatur zu simulativen Vergleichen von Wahlverfahren noch nicht viel gibt, +auf dem eine höere Komplexität oder Granularität aufgebaut werden kann. +Zum zweiten schwindet mit einer höheren Komplexität/Granularität sehr schnell die Interpretierbarkeit +(und möglicherweise auch Reproduzierbarkeit). +In Simulationen können aber nicht selten schon anhand sehr einfacher Modelle unerwartete Effekte +und Mechanismen auftauchen. +Das ist auch hier die Hoffnung. + + +### Was ist der Hauptkonflikt, den die Simulation untersucht? +Der Hauptkonflikt ist die Teilnahme an der Wahl. +Da die einzelne Agentin zunächst (auf kurze Sicht) i.d.R. nicht erwarten kann +ihre assets durch die Teilnahme zu steigern (außer alle Agenten denken so +und die Wahlbeteiligung ist entsprechend niedrig), +hat sie einen Anreiz sich die Kosten zu sparen und auf die Teilnahme zu verzichten +(nach dem Motto "meine Stimme macht eh keinen Unterschied, dann muss ich auch nicht abstimmen"). +Auf lange Sicht würde sich eine dauerhafte Nichtteilnahme aber vermutlich negativ für die Agentin auswirken, +da zu erwarten ist, dass sich die Umgebung entgegen ihrer Interessen entwickelt. + + +Ein weiterer Konflikt ist die Abstimmung der Agentin selbst, +also ob sie vorrangig ihr Wissen für die tatsächliche Verteilung (der Farben) in die Abstimmung einbringt +(und damit allen hilft), oder eher ihren Interessen (eigenen Präferenzen) nach abstimmt +(um einerseits selbst einen höheren Anteil an der Belohnung zu bekommen +und andererseits die Umgebung zu ihren Gunsten zu beeinflussen). +Ob sie also eher "egoistisch" oder eher "altruistisch" bzw. "Gemeinwohl-orientiert" abstimmt. + + +### Wonach wird optimiert? +Für die Partizipation gibt es hoffentlich kein leicht zu berechnendes Optimum, +da eine Simulation sonst überflüssig wäre, also das müssen wir meinem Verständnis nach verhindern +(in dem Fall müssten wir das Modell komplexer machen). +Die Optimierungsfunktion für das Training der Agenten ist nicht ganz leicht zu lösende Aufgabe. +Gut wäre, wenn es ausreichte, die eigene Belohnung zu maximieren, +weil das i.d.R. die Standardannahme ist. +Ob das ausreicht oder die Agenten Modelle dann zu simpel werden ist noch nicht klar. +Auf jeden Fall dürfen die Agenten weder zu intelligent, noch zu simpel sein. +Vor allem weder zu kurz, noch zu weitsichtig. +Das dürfte aber nicht nur eine Frage der Optimierungsfunktion sein, +sondern auch der genauen Ausgestaltung des Trainings und der Input-Variablen. +Auf jeden Fall ist das Training sehr wahrscheinlich der heikelste Part. + +### Wie funktioniert die Ausschüttung der Belohnung(en)? +1. Nähe des Konsenses an der "Realität": + Jede Agentin (nicht nur die Teilnehmenden) erhält eine Belohnung $b_1$, + welche von dem Ergebnis der Wahl abhängt. + Je näher das Ergebnis der Wahl (die durch die Wahl geschätzte Häufigkeitsreihenfolge der Feldfarben) + an der tatsächlichen Häufigkeitsreihenfolge der Feldfarben ist, + desto größer $b_1$. +2. Nähe des Ergebnisses zur (fixen) persönlichen Präferenz (Persönlichkeit): + Jede Agentin, bekommt eine Belohnung $b_2$ + (wahrscheinlich mit $0 ≤ b_2 ≤ b_1$ oder sogar $-b_1 ≤ b_2 ≤ b_1$), + je nachdem wie gut das Ergebnis mit ihrer fixen persönlichen Präferenz + (also ihrer "Persönlichkeit", nicht der von ihr abgegebenen Präferenz) übereinstimmt. + +Dabei soll $b_1$ den Umstand abbilden, dass die Beteiligung an einer Wahl einen +(zwar eigentlich in seiner Höhe sehr subjektiven, aber dennoch vorhandenen) Aufwand bedeutet. +Und dass das Ergebnis bzw. die Folgen des Wahlausganges für alle Personen gleichermaßen gültig sind, +egal ob diese an der Wahl teilgenommen haben oder nicht. + +Durch $b_2$ soll die Tatsache abgebildet werden, dass die Agenten auch eigene Vorlieben oder Bedürfnisse haben, +dass also das Ergebnis für sie persönlich lebensqualitätsbeeinflussend sein kann. +Außerdem ermöglicht $b_2$ die konfliktive Situation, +dass die Wählenden eine Abwägung zwischen einer eher nach persönlicher Präferenz geprägten Stimmabgabe +und einer eher nach eigenem Wissen geprägten (tendenziell eher dem Gemeinwohl dienenden) Stimmabgabe treffen müssen. + +### Welche Wahlverfahren werden untersucht? +Die Wahl (und Anzahl) der Wahlverfahren steht noch nicht ganz fest. +Im Moment ist geplant die folgenden Wahlverfahren zu untersuchen: +- "Plurality" als Standardverfahren +- "Approval-Voting" da weitläufig als bestes Verfahren unter ComSoc-WissenschaftlerInnen angesehen +- "Kemeny" (Ebenfalls oft als bestes Verfahren angesehen, allerdings NP-Schwer). + +Und möglicherweise noch einige Standardverfahren. +Interessant wären auch "exotischere" (weniger gut mathematisch untersuchte oder verbreitete) Verfahren +wie "Systemisches-Konsensieren", "liquid-democracy" +oder repräsentative Wahlverfahren (Wahl eines Gremiums) zu untersuchen. + +### Weitere bzw. weiter führende Forschungsfragen +Ebenfalls interessant wäre am Ende der Vergleiche zu untersuchen, +wie sich die Simulation verändert, wenn stets ein fixer Anteil an Agenten zufällig bestimmt wird, +um (kostenlos oder sogar mit Aufwandsentschädigung) an der Wahr teilzunehmen +(anstelle einer Freiwilligkeit welche mit Kosten verbunden ist). + +Des Weiteren könnte untersucht werden was passiert, wenn Agenten zusätzliches "Wissen" (über Feldfarben) kaufen +oder durch "laufen" bzw. springen "erkunden" können. diff --git a/docs/de/teaser.md b/docs/de/teaser.md index 788ae6a..b0c43e7 100644 --- a/docs/de/teaser.md +++ b/docs/de/teaser.md @@ -1,8 +1,8 @@ -# Zukunft gestalten: Demokratische Forschung zur Bewältigung globaler Herausforderungen +# Zukunft gestalten: Demokratie-Forschung zur Bewältigung globaler Herausforderungen In einer Welt voller Komplexität und Unsicherheit stehen wir an einem Scheideweg. Dringende globale Herausforderungen wie der Klimawandel[^9], soziale Ungleichheit[^8] und die ethischen Dilemmata[^4], -die mit dem rasanten Fortschritt der künstlichen Intelligenz (KI) einhergehen, drängen nach Lösungen. +die mit dem rasanten Fortschritt künstlicher Intelligenz (KI) einhergehen, drängen nach Lösungen. Eine Zukunft, in der KI die menschliche Intelligenz in vielen Bereichen übertrifft, erscheint zunehmend wahrscheinlich[^3]. Können wir diese Technologie sinnvoll und sicher in bestehende Governance-Verfahren und Strukturen integrieren? @@ -47,13 +47,17 @@ Reale demokratische Verwaltung geht weit über das bloße Ineinklangbringen indi beinhaltet Pfadabhängigkeiten vergangener Entscheidungen, Desinformation und mangelnde Beteiligung, um nur einige offensichtliche Herausforderungen zu nennen. -Diese Herausforderungen zu bewältigen, erfordert innovative Ansätze. -Dieses Projekt zielt darauf ab, langfristig möglichst viele dieser Einflüsse durch Multi-Agenten-basierte Simulationen integriert zu untersuchen. - -Während das Modell und die Forschungsfragen der vorgeschlagenen Masterarbeit -diesen Ansatz nur in seinen absoluten Grundzügen darstellen können, sollte das Potenzial, durch Multi-Agenten-basiertes Modellieren -langfristig alle wesentlichen Aspekte der realen demokratischen Selbstverwaltung untersuchen zu können, -nicht unterschätzt werden. +Um diese Herausforderungen zu bewältigen, sind innovative Ansätze erforderlich. +Das vorliegende Projekt zielt darauf ab, langfristig möglichst viele dieser Einflüsse durch Multi-Agenten-basierte +Simulationen integriert zu untersuchen. + +Das Modell und die Forschungsfragen einer Masterarbeit +können diesen Ansatz natürlich nur in ihren absoluten Grundzügen darstellen. +Soweit uns bekannt, wurde dieser Ansatz noch nicht systematisch in der +Erforschung kollektiver Entscheidungsfindung angewandt, weshalb wir auch sehr grundlegend beginnen. +Dennoch sollte das Potenzial von Multi-Agenten-basiertem Modellieren nicht unterschätzt werden. +Langfristig wird dadurch sehr wahrscheinlich ermöglicht, +alle wesentlichen Aspekte der realen demokratischen Selbstverwaltung zu untersuchen[^10]. Die kollektive Intelligenz einer Gesellschaft durch verbesserte Governance-Verfahren zu stärken, ist wahrscheinlich der effektivste Weg und unser größter Hebel, die Herausforderungen unserer Zeit zu meistern. @@ -76,3 +80,5 @@ ist wahrscheinlich der effektivste Weg und unser größter Hebel, die Herausford [^8]: Thomas Piketty. Das Kapital im 21. Jahrhundert. CH Beck, 2014. [^9]: Hans-Otto Pörtner, Debra C Roberts, H Adams, C Adler, P Aldunce, E Ali, R Ara Begum, R Betts, R Bezner Kerr, R Biesbroek, et al. [Climate change 2022: Impacts, adaptation and vulnerability](https://hal.science/hal-03774939/document). IPCC Sixth Assessment Report, 2022. + +[^10]: Robert L Axtell and J Doyne Farmer. [Agent-based modeling in economics and finance](https://oms-inet.files.svdcdn.com/production/files/JEL-v2.0.pdf): Past, present, and future. In: Journal of Economic Literature (2022), pp. 1–10 \ No newline at end of file diff --git a/docs/mesa_docs.md b/docs/mesa_docs.md index 03e2ef6..5c2f93b 100644 --- a/docs/mesa_docs.md +++ b/docs/mesa_docs.md @@ -10,7 +10,7 @@ Mesa is highly flexible, allowing to simulate complex systems and observe emerge ### Agent-Based Modeling and Complex Societal Questions -Multi-agent based simulation is a valuable tool to research voting rules and collective decision-making +Multi-agent-based simulation is a valuable tool to research voting rules and collective decision-making as it allows for the modeling of very complex interactions that are challenging to capture with traditional methods[^3]. ABM is mainly used to research and analyze complex relationships. The focus is on understanding how individual behaviors and interactions lead to collective outcomes. diff --git a/docs/research/goals.md b/docs/research/goals.md new file mode 100644 index 0000000..70caa24 --- /dev/null +++ b/docs/research/goals.md @@ -0,0 +1,20 @@ +**DemocracySim** is a multi-agent simulation framework designed to explore the effects of different voting rules on democratic participation and welfare. +Its broader aim is to be able to include complex and diverse factors via simulation to investigate how collective decision-making processes can be improved. +With a focus on agent-based modeling, the simulation ties together elements of participatory dynamics, resource allocation, and group decision effects in a controlled, evolving system. + +--- + +## Research Questions + +DemocracySim seeks to answer several critical questions: + +- Do different voting procedures produce varying dynamics, and if so, how? +- How do minority and majority agent types behave in collective decision-making? +- What are the long-term effects of (non-)participation on the system? +- How does wealth distribution impact participation and welfare in the simulation? + +--- + +## Broader Implications + +This project offers a controlled testbed for understanding the complex interplay of individual and collective interest in democratic systems. DemocracySim has the potential to reveal valuable insights into real-world voting dynamics. diff --git a/docs/research/metrics.md b/docs/research/metrics.md new file mode 100644 index 0000000..7def853 --- /dev/null +++ b/docs/research/metrics.md @@ -0,0 +1,39 @@ +## Simulation Metrics / Indicators + +### **Participation Rate** *(Aggregate Behavioral Variable)* +- Measures the percentage of agents actively participating in elections at a given time. +- Helps evaluate the *participation dilemma* by analyzing participation across the group and comparing rates for majority vs. minority groups. + +### **Altruism Factor** *(Individual Behavioral Variable)* +- Quantifies the extent to which agents prioritize the **collective good** (e.g., the group's accuracy in guessing) over **individual preferences**, including cases of non-cooperation with a majority they belong to when it conflicts with the (expected) collective good. +- Additionally, tracking the average altruism factor of personality groups can provide insights, though this may be misleading if agents/groups do not participate. + +### **Gini Index** *(Inequality Metric)* +- Measures the inequality in asset distribution among agents within the system. +- Ranges from **0** (perfect equality) to **1** (maximum inequality, where one agent holds all assets). +- Offers insights into how electoral decisions impact wealth/resource distribution over time. + +### **Collective Accuracy** +- Measures how accurately the group, as a collective, estimates the actual color distribution. +- This directly influences rewards and serves as a metric for evaluating group performance against a ground truth. + +### **Diversity of Shared Opinions** +- Evaluates the variation in agents' expressed preferences. +- To track whether participating agents provide diverse input or converge on overly similar opinions (e.g., due to majority influence). + +### **Distance to Optimum** +In principle, the optimal decision can be determined based on a predefined goal, allowing the distance between this optimum and the group's actual decision to be measured. + +**Possible predefined goals include:** + +1. **Utilitarian**: + - *Maximize the total sum of distributed rewards.* + - Focus on the *total reward*, regardless of how it is distributed. + +2. **Egalitarian**: + - *Minimize the overall inequality in individual rewards.* + - Focus on **fairness**, aiming for a more just distribution of rewards among members. + +3. **Rawlsian**: + - *Maximize the rewards for the poorest (personality-based) group.* + - Inspired by **John Rawls' Difference Principle**, the focus is on improving the well-being of the least advantaged group while tolerating inequalities elsewhere. diff --git a/docs/research/research_concept.md b/docs/research/research_concept.md new file mode 100644 index 0000000..0f78663 --- /dev/null +++ b/docs/research/research_concept.md @@ -0,0 +1,27 @@ +DemocracySim is set in a grid-based environment where agents interact with their surroundings and participate in group decision-making through elections. The system explores various scenarios and voting rules to understand key dynamics and challenges in democratic participation. + +## Key Features + +### Simulated Environment: +- The grid is designed without boundaries, and each unit (field) within it adopts one of **x** colors. Fields change color based on election results, with a mutation rate affected by prior outcomes. +- Groups of fields form **territories**, which serve as the basis for elections and influence grid evolution. + +### Agents: +- Agents are equipped with a basic artificial intelligence system and operate under a **"top-down" model**, learning decision-making strategies via training. +- Each agent has a **limited budget** and must decide whether to participate in elections. +- Agents have individual **preferences** over colors (called *personalities*) and are divided into **y** randomly distributed personality types. + *(The distribution of types forms majority-minority situations.)* + +### Elections and Rewards (Two Dilemmas): +1. **Elections:** + - Elections concern the frequency distribution of field colors in a given territory, representing an "objective truth" aimed at emulating wise group decisions. + - For an intuitive understanding, the election addresses the question: + *"What is — or should be — the current color distribution within your territory?"* + +2. **Rewards:** + - Rewards are distributed to all agents in the territory, regardless of participation (*participation dilemma*). + These rewards consist of: + - **Base reward:** Distributed equally based on how well agents guess the true color distribution. + - **Personal reward:** Allocated based on the alignment between election results and agent preferences, introducing a second dilemma: + - *Should agents vote selfishly (favoring their preferences) or vote with a focus on the group's accuracy (collective good)?* + diff --git a/docs/teaser.md b/docs/teaser.md index 2196f7f..923cbbf 100644 --- a/docs/teaser.md +++ b/docs/teaser.md @@ -48,7 +48,9 @@ This research aims to pioneer a new path by incorporating these complexities thr While the model and research inquiries within the proposed master thesis can only represent this approach in its infancy, the potential of multi-agent-based modeling -to eventually encapsulate all essential facets of real-world democratic governance can hardly be overstated. +to eventually encapsulate all essential facets of real-world democratic governance can hardly be overstated[^10]. +To the best of our knowledge, this approach has not been systematically applied to researching social choice +or collective decision-making. It stands poised to boost what may be the most underestimated cornerstone of human society: our collective intelligence. @@ -72,3 +74,5 @@ our collective intelligence. [^8]: Thomas Piketty. Das Kapital im 21. Jahrhundert. CH Beck, 2014. [^9]: Hans-Otto Pörtner, Debra C Roberts, H Adams, C Adler, P Aldunce, E Ali, R Ara Begum, R Betts, R Bezner Kerr, R Biesbroek, et al. [Climate change 2022: Impacts, adaptation and vulnerability](https://hal.science/hal-03774939/document). IPCC Sixth Assessment Report, 2022. + +[^10]: Robert L Axtell and J Doyne Farmer. [Agent-based modeling in economics and finance](https://oms-inet.files.svdcdn.com/production/files/JEL-v2.0.pdf): Past, present, and future. In: Journal of Economic Literature (2022), pp. 1–10 \ No newline at end of file diff --git a/docs/technical/api/Area.md b/docs/technical/api/Area.md new file mode 100644 index 0000000..e43d86c --- /dev/null +++ b/docs/technical/api/Area.md @@ -0,0 +1,7 @@ +# Class `Area` + +::: democracy_sim.participation_model.Area + +## Private Method + +::: democracy_sim.participation_model.Area._conduct_election diff --git a/docs/technical/api/ColorCell.md b/docs/technical/api/ColorCell.md new file mode 100644 index 0000000..7152413 --- /dev/null +++ b/docs/technical/api/ColorCell.md @@ -0,0 +1,3 @@ +# Class `ColorCell` + +::: democracy_sim.participation_agent.ColorCell \ No newline at end of file diff --git a/docs/technical/api/Model.md b/docs/technical/api/Model.md new file mode 100644 index 0000000..c8cb990 --- /dev/null +++ b/docs/technical/api/Model.md @@ -0,0 +1,3 @@ +# Class `ParticipationModel` + +::: democracy_sim.participation_model.ParticipationModel diff --git a/docs/technical/api/Utility_functions.md b/docs/technical/api/Utility_functions.md new file mode 100644 index 0000000..0f333ad --- /dev/null +++ b/docs/technical/api/Utility_functions.md @@ -0,0 +1,3 @@ +# Utility functions + +::: democracy_sim.participation_agent.combine_and_normalize \ No newline at end of file diff --git a/docs/technical/api/VoteAgent.md b/docs/technical/api/VoteAgent.md new file mode 100644 index 0000000..c4c8375 --- /dev/null +++ b/docs/technical/api/VoteAgent.md @@ -0,0 +1,3 @@ +# Class `VoteAgent` + +::: democracy_sim.participation_agent.VoteAgent diff --git a/docs/technical/api/inherited.md b/docs/technical/api/inherited.md new file mode 100644 index 0000000..007aeff --- /dev/null +++ b/docs/technical/api/inherited.md @@ -0,0 +1,11 @@ +## Mesa Base Model Class + +:::mesa.Model + +--- +--- + +## Mesa Base Agent Class + +:::mesa.Agent + diff --git a/docs/technical/approval_voting.md b/docs/technical/approval_voting.md new file mode 100644 index 0000000..9b5d0ef --- /dev/null +++ b/docs/technical/approval_voting.md @@ -0,0 +1,56 @@ +# Problem of threshold in approval voting + +If we choose an architecture in which voters always provide a sum-normalized preference vector +for all voting rules, then approval voting has to have a threshold value to determine which options are approved. +This may take autonomy away from the voters, but it ensures that every voting rule is based on the same conditions +increasing comparability. It may also help to add more rules later on. + +### Idea + +Setting a fixed threshold of $ \frac{1}{m} $ for approval voting where m is the number of options. + +### Definitions and Setup + +- **Sum-normalized vector**: A preference vector $ \mathbf{p} = (p_1, p_2, \ldots, p_m) $ where each entry $ p_i $ represents the preference score for option $ i $, with the constraint $ \sum_{i=1}^m p_i = 1 $. +- **Threshold**: A fixed threshold of $ \frac{1}{m} $ is used to determine approval. If $ p_i \geq \frac{1}{m} $, the option $ i $ is considered "approved." + +### Average Number of Approved Values + +To find the average number of values approved, let's consider how many entries $ p_i $ would meet the threshold $ p_i \geq \frac{1}{m} $. + +1. **Expectation Calculation**: + - The expected number of approvals can be found by looking at the expected value of each $ p_i $ being greater than or equal to $ \frac{1}{m} $. + - For a sum-normalized vector, the average value of any $ p_i $ is $ \frac{1}{m} $. This is because the sum of all entries equals 1, and there are $ m $ entries. + +2. **Probability of Approval**: + - If the vector entries are randomly distributed, the probability of any given $ p_i $ being above the threshold is approximately 50%. This stems from the fact that the mean is $ \frac{1}{m} $, and assuming a uniform or symmetric distribution around this mean, half the entries would be above, and half below, in expectation. + +3. **Expected Number of Approvals**: + - Since each entry has a 50% chance of being above $ \frac{1}{m} $ in a uniform random distribution, the expected number of approved values is $ \frac{m}{2} $. + +Therefore, **on average, $ \frac{m}{2} $ values will be approved**. + +### Range of the Number of Approved Values + +The number of approved values can vary depending on how the preference scores are distributed. Here's the possible range: + +1. **Minimum Approved Values**: + - If all entries are below $ \frac{1}{m} $, then none would be approved. However, given the constraint that the vector sums to 1, at least one entry must be $ \frac{1}{m} $ or higher. Hence, the minimum number of approved values is **1**. + +2. **Maximum Approved Values**: + - The maximum occurs when as many values as possible are at least $ \frac{1}{m} $. In the extreme case, you could have all $ m $ entries equal $ \frac{1}{m} $ exactly, making them all approved. Thus, the maximum number of approved values is **m**. + +### Conclusion + +- **Average number of approved values**: $ \frac{m}{2} $. +- **Range of approved values**: From 1 (minimum) to $ m $ (maximum). + +Hence, in theory, voters can still approve between 1 and $ m $ options, +giving them the whole range of flexibility that approval voting offers. + +### Possibility for improvement + +We should consider implementing rule-specific voting into the agent's decision-making process +instead of leaving all rule-specifics to the aggregation process. +This would allow for a more realistic comparison of the rules. +For some rules, it would also give opportunities to significantly speed up the simulation process. \ No newline at end of file diff --git a/docs/technical/preference_relations.md b/docs/technical/preference_relations.md new file mode 100644 index 0000000..9bb916e --- /dev/null +++ b/docs/technical/preference_relations.md @@ -0,0 +1,23 @@ +# How preference relations are defined and represented in the system + +## Introduction + +... + +## Definition + +A preference relation $\tau\in\mathbb{R}_{\geq 0}^m$ is a numpy vector of length $m$, +where $m$ is the number of options and each element $\tau[i]$ represents the normalized preference for option $i$, +with $\sum_{\tau}=1$. + +### Why using sum normalization? + +In computational social choice, **sum normalization** is more common than magnitude normalization. +This is because sum normalization aligns well with the interpretation of preference vectors as distributions +or weighted votes, which are prevalent in social choice scenarios. + +### Why using non-negative values? + +The preference values $\tau[i]$ are non-negative because they represent the strength of preference for each option. +Equvalently, they can be interpreted as the probability of selecting each option +or the (inverted or negative) distance of an option to the agents' ideal solution. \ No newline at end of file diff --git a/docs/technical/technical_overview.md b/docs/technical/technical_overview.md new file mode 100644 index 0000000..5d0fb3f --- /dev/null +++ b/docs/technical/technical_overview.md @@ -0,0 +1,31 @@ +# Technical overview + +**DemocracySim** is a multi-agent simulation framework designed to examine democratic participation. +This project models agents (with personal interests forming majority-minority groups), environments +(evolving under the influence of the collective behavior of the agents), +and elections to analyze how voting rules influence participation, +welfare, system dynamics and overall collective outcomes. + +Key features: + +- Multi-agent system simulation using **Mesa framework**. +- **Grid-based environment** with wrap-around support (toroidal topology). +- Explore societal outcomes under different voting rules. + +--- + +### Features +- **Agents**: + - Independently acting entities modeled with preferences, budgets, and decision-making strategies. + - Can participate in elections, have personal preferences and limited information about surroundings. + - Trained with decision-tree methods to simulate behavior. + +- **Environment**: + - Structured as a grid divided into "territories" or "areas." + - A single unit of the grid is a "cell" or "field." + - Each cell has a specific "color" representing a state. Elections influence these states, and areas mutate over time. + +- **Metrics**: + - Participation rates, altruism factors, and metrics such as the Gini Index to analyze inequalities and long-term trends. + +Learn more in the following sections. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index c1aceb8..6523426 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,26 @@ edit_uri: edit/dev/docs/ nav: - Home: index.md - Teaser: teaser.md + - Research: + - Concept: research/research_concept.md + - Metrics: research/metrics.md + - Goals: research/goals.md + - Technical: + - Overview: technical/technical_overview.md + #- Installation Instructions: technical/installation_instructions.md + - API Reference: + - Model (Environment): technical/api/Model.md + - Area (Territory): technical/api/Area.md + - Grid Cell: technical/api/ColorCell.md + - Voting Agent: technical/api/VoteAgent.md + - Inherited Classes: technical/api/inherited.md + - Utility Functions: technical/api/Utility_functions.md + #- User Guide: technical/user_guide.md #1. Provide step-by-step guides for common project usage. + #- Examples: technical/examples.md #1. Show key use cases via practical code examples or interactive demos. + #- Developer Docs: technical/dev_docs.md #Offer guidelines for contributing or extending the project (e.g., folder structure, conventions, CI/CD pipelines). + #- Architecture Overview: technical/architecture_overview.md + #- Overview: overview.md + #- Code: the_voting_process_step_by_step.md - Mesa: mesa_docs.md theme: @@ -48,6 +68,8 @@ theme: - toc.follow - toc.integrate - search.share + - search.highlight + - search.suggest - content.action.edit # Plugins plugins: @@ -62,6 +84,8 @@ plugins: name: Deutsch build: true - search + - mkdocstrings: + default_handler: python # Extensions markdown_extensions: @@ -70,4 +94,10 @@ markdown_extensions: - attr_list - def_list - footnotes - - md_in_html \ No newline at end of file + - md_in_html + - pymdownx.arithmatex + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format diff --git a/np_performance_test_1.py b/np_performance_test_1.py deleted file mode 100644 index d2d677a..0000000 --- a/np_performance_test_1.py +++ /dev/null @@ -1,15 +0,0 @@ -import time -import numpy as np -np.random.seed(42) -a = np.random.uniform(size=(300, 300)) -runtimes = 10 - -timecosts = [] -for _ in range(runtimes): - s_time = time.time() - for i in range(100): - a += 1 - np.linalg.svd(a) - timecosts.append(time.time() - s_time) - -print(f'mean of {runtimes} runs: {np.mean(timecosts):.5f}s') diff --git a/np_performance_test_2.py b/np_performance_test_2.py deleted file mode 100644 index 743283a..0000000 --- a/np_performance_test_2.py +++ /dev/null @@ -1,64 +0,0 @@ -# SOURCE: https://gist.github.com/markus-beuckelmann/8bc25531b11158431a5b09a45abd6276 - -import numpy as np -from time import time -from datetime import datetime - -start_time = datetime.now() - -# Let's take the randomness out of random numbers (for reproducibility) -np.random.seed(0) - -size = 4096 -A, B = np.random.random((size, size)), np.random.random((size, size)) -C, D = np.random.random((size * 128,)), np.random.random((size * 128,)) -E = np.random.random((int(size / 2), int(size / 4))) -F = np.random.random((int(size / 2), int(size / 2))) -F = np.dot(F, F.T) -G = np.random.random((int(size / 2), int(size / 2))) - -# Matrix multiplication -N = 20 -t = time() -for i in range(N): - np.dot(A, B) -delta = time() - t -print('Dotted two %dx%d matrices in %0.2f s.' % (size, size, delta / N)) -del A, B - -# Vector multiplication -N = 5000 -t = time() -for i in range(N): - np.dot(C, D) -delta = time() - t -print('Dotted two vectors of length %d in %0.2f ms.' % (size * 128, 1e3 * delta / N)) -del C, D - -# Singular Value Decomposition (SVD) -N = 3 -t = time() -for i in range(N): - np.linalg.svd(E, full_matrices = False) -delta = time() - t -print("SVD of a %dx%d matrix in %0.2f s." % (size / 2, size / 4, delta / N)) -del E - -# Cholesky Decomposition -N = 3 -t = time() -for i in range(N): - np.linalg.cholesky(F) -delta = time() - t -print("Cholesky decomposition of a %dx%d matrix in %0.2f s." % (size / 2, size / 2, delta / N)) - -# Eigendecomposition -t = time() -for i in range(N): - np.linalg.eig(G) -delta = time() - t -print("Eigendecomposition of a %dx%d matrix in %0.2f s." % (size / 2, size / 2, delta / N)) - -print('') -end_time = datetime.now() -print(f'TOTAL TIME = {(end_time - start_time).seconds} seconds') diff --git a/requirements.txt b/requirements.txt index 13585c3..cdd98e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ Mesa~=2.3.0 numpy~=1.26.4 -solara~=1.32.1 +solara~=1.35.1 matplotlib~=3.9.0 ipyvuetify~=1.9.4 seaborn~=0.13.2 @@ -12,7 +12,10 @@ pytest-cov~=5.0.0 toml~=0.10.2 Flask~=3.0.3 altair~=5.3.0 -streamlit~=1.34.0 +streamlit~=1.37.0 mkdocs-git-revision-date-localized-plugin~=0.9.0 mkdocs-static-i18n -mkdocs-static-i18n[material] \ No newline at end of file +mkdocs-static-i18n[material] +mkdocstrings +mkdocstrings[python] +git+https://github.com/Logende/mesa-replay@main#egg=Mesa-Replay \ No newline at end of file diff --git a/tests/factory.py b/tests/factory.py new file mode 100644 index 0000000..3379e66 --- /dev/null +++ b/tests/factory.py @@ -0,0 +1,31 @@ +from democracy_sim.participation_model import ParticipationModel + + +def create_default_model(**overrides): + """Create a ParticipationModel instance, with optional parameter overrides.""" + params = { + "height": 100, + "width": 80, + "num_agents": 800, + "num_colors": 3, + "num_personalities": 4, + "mu": 0.05, + "election_impact_on_mutation": 1.8, + "common_assets": 40000, + "known_cells": 10, + "num_areas": 16, + "av_area_height": 25, + "av_area_width": 20, + "area_size_variance": 0.0, + "patch_power": 1.0, + "color_patches_steps": 3, + "draw_borders": True, + "heterogeneity": 0.3, + "rule_idx": 1, + "distance_idx": 1, + "election_costs": 1, + "max_reward": 50, + "show_area_stats": False + } + params.update(overrides) + return ParticipationModel(**params) diff --git a/tests/test_approval_voting.py b/tests/test_approval_voting.py new file mode 100644 index 0000000..60ec936 --- /dev/null +++ b/tests/test_approval_voting.py @@ -0,0 +1,177 @@ +from democracy_sim.social_welfare_functions import approval_voting +from tests.test_majority_rule import simple, paradoxical +import numpy as np + +# TODO adapt to approval voting (state = merely copied from majority_rule.py) + +# Simple and standard cases +approval_simple_cases = [ + (simple, [[2, 1, 0]]), # TODO: Whats the expected result? + (paradoxical, [[2, 1, 0, 3, 4], [2, 1, 3, 0, 4]]) # TODO '' '' +] + +# Following "paradoxical" example is taken from +# https://pub.dss.in.tum.de/brandt-research/minpara.pdf +# +# 5 4 3 2 +# ------- +# a e d b +# c b c d +# b c b e +# d d e c +# e a a a + +def test_approval_voting(): + # Test predefined cases + for pref_table, expected in approval_simple_cases: + res_ranking = approval_voting(pref_table) + is_correct = False + for exp in expected: + if list(res_ranking) == exp: + is_correct = True + assert is_correct + +# Cases with ties - "all equally possible" + +with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_overall_tie = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], +]) + +with_ties_mixed = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.25, 0.25, 0.25, 0.25], + [0.3, 0.2, 0.1, 0.4], +]) + +all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] + +def test_equally_possible(): + for pref_rel in all_equally_possible: + winners = set() + for _ in range(500): + winner = approval_voting(pref_rel) + winners.add(winner[0]) + assert set(winners) == {0, 1, 2, 3} + +# # Cases with ties - "not all equally possible" +# with_ties_unequal = np.array([ +# [0.25, 0.25, 0.25, 0.25], +# [0.4, 0.3, 0.2, 0.1], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25] +# ]) +# +# with_ties_all_ab = np.array([ +# [0.3, 0.3, 0.2, 0.2], +# [0.25, 0.25, 0.25, 0.25] +# ]) # all possible (a or b up first is more likely) +# +# with_ties_ab = np.array([ +# [0.3, 0.3, 0.2, 0.2], +# [0.3, 0.3, 0.2, 0.2], +# [0.25, 0.25, 0.25, 0.25] +# ]) # all possible (a or b up first is more likely) +# +# with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] +# +# def test_with_ties_unequal(): +# for pref_rel in with_ties_unequal: +# cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) +# print(f"CV: {cv}") +# assert cv > 0.125 +# +# # Random matrix +# +# def random_pref_profile(num_agents, num_options): +# rand_matrix = np.random.rand(num_agents, num_options) +# # Normalize the matrix +# matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) +# return matrix_rand +# +# def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): +# """ +# Run majority rule with ties multiple times, check winners +# and calculate the coefficient of variation (CV) of the winners. +# :param num_agents: Number of agents. +# :param num_options: Number of options. +# :param iterations: Number of iterations. +# ------- +# :return: Dictionary of winner counts {option: count}. +# """ +# winner_counts = {} +# for _ in range(iterations): +# # Create random matrix +# matrix_rand = random_pref_profile(num_agents, num_options) +# ranking = majority_rule(matrix_rand) +# winner = ranking[0] +# # Count winners +# winner_counts[winner] = winner_counts.get(winner, 0) + 1 +# return winner_counts +# +# +# def test_with_random_matrix_small(): +# """ +# Test majority rule on a small random matrix with many iterations. +# """ +# num_agents = np.random.randint(2, 200) +# # Keep num options small to expect all options to win at least once. +# num_options = np.random.randint(2, 90) +# iterations = 100*num_options +# start_time = time.time() +# wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) +# stop_time = time.time() +# # Extract winners from winner-counts dictionary and sort them +# sorted_winners = list(wc.keys()) +# sorted_winners.sort() +# assert sorted_winners == list(range(num_options)) +# # Extract count values +# counts = np.array(list(wc.values())) +# # Calculate the coefficient of variation (CV) +# cv = np.std(counts) / np.mean(counts) +# assert cv < 0.125 +# print(f"\nCV: {cv}") +# # Print the time taken +# elapsed_time = stop_time - start_time +# print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." +# f"With {num_agents} agents and {num_options} options.") +# +# +# def test_with_random_matrix_large(): +# """ +# Test majority rule on a large random matrix (many agents, many options). +# """ +# num_its = 100 +# num_agents = np.random.randint(1000, 3000) +# num_options = np.random.randint(2000, 3000) +# # Run majority rule test with random matrix +# start_time = time.time() +# wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) +# stop_time = time.time() +# # Len of winners should be approximately equal to the number of iterations +# # because with a large number of options, winners should be mostly unique. +# winners, counts = list(wc.keys()), list(wc.values()) +# assert abs(np.mean(counts) - 1) < 0.1 +# assert abs((len(winners) / num_its) - 1) < 0.1 +# # Calculate the coefficient of variation (CV) +# cv = np.std(counts) / np.mean(counts) +# assert cv < 0.2 +# # Print the time taken +# elapsed_time = stop_time - start_time +# print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." +# f"With {num_agents} agents and {num_options} options.") diff --git a/tests/test_color_by_dst.py b/tests/test_color_by_dst.py new file mode 100644 index 0000000..ce80cc0 --- /dev/null +++ b/tests/test_color_by_dst.py @@ -0,0 +1,65 @@ +import unittest +import numpy as np +from democracy_sim.participation_model import ParticipationModel + +class TestColorByDst(unittest.TestCase): + + def test_valid_output(self): + """Test that the function always returns a valid index.""" + color_distribution = np.array([0.2, 0.3, 0.5]) + for _ in range(1000): + result = ParticipationModel.color_by_dst(color_distribution) + self.assertIn(result, range(len(color_distribution)), + "Output index is out of range") + + def test_sum_to_one(self): + """Test that it correctly handles distributions summing to one.""" + color_distribution = np.array([0.1, 0.2, 0.1, 0.4, 0.2]) + for _ in range(50): + result = ParticipationModel.color_by_dst(color_distribution) + self.assertIn(result, range(len(color_distribution))) + + def test_single_color(self): + """Test that a single-color distribution always returns index 0.""" + color_distribution = np.array([1.0]) + for _ in range(10): + self.assertEqual( + ParticipationModel.color_by_dst(color_distribution), 0) + + def test_edge_cases(self): + """Test edge cases like a uniform distribution.""" + color_distribution = np.array([0.5, 0.5]) + results = [ParticipationModel.color_by_dst( + color_distribution) for _ in range(1000)] + unique, counts = np.unique(results, return_counts=True) + self.assertEqual(set(unique), {0, 1}, + "Function should only return 0 or 1") + self.assertGreater(int(counts[0]), 400, "Dst not uniform") + self.assertGreater(int(counts[1]), 400, "Dst not uniform") + + def test_invalid_distribution(self): + """Test that an invalid distribution raises an error.""" + with self.assertRaises(ValueError): # Negative probability + ParticipationModel.color_by_dst(np.array([-0.1, 0.3, 0.8])) + + with self.assertRaises(ValueError): # Doesn't sum to 1 + ParticipationModel.color_by_dst(np.array([0.2, 0.3])) + + with self.assertRaises(ValueError): # All zeros + ParticipationModel.color_by_dst(np.array([0.0, 0.0, 0.0])) + + def test_probability_distribution(self): + """Test if the function follows the given probability distribution.""" + color_distribution = np.array([0.2, 0.3, 0.5]) + num_samples = 10000 + results = [ParticipationModel.color_by_dst( + color_distribution) for _ in range(num_samples)] + + counts = np.bincount(results, + minlength=len(color_distribution)) / num_samples + err_message = "Generated samples do not match expected distribution" + np.testing.assert_almost_equal(counts, color_distribution, decimal=1, + err_msg=err_message) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_conduct_election.py b/tests/test_conduct_election.py new file mode 100644 index 0000000..38a883a --- /dev/null +++ b/tests/test_conduct_election.py @@ -0,0 +1,22 @@ +import unittest +from unittest.mock import MagicMock +from tests.factory import create_default_model + +# TODO add more complex tests + +class TestConductElection(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_election_returns_integer_turnout(self): + area = self.model.areas[0] + turnout = area._conduct_election() + self.assertIsInstance(turnout, int) + + def test_no_participation_scenario(self): + for agent in self.model.voting_agents: + agent.assets = 0 + area = self.model.areas[0] + turnout = area._conduct_election() + self.assertEqual(turnout, 0) \ No newline at end of file diff --git a/tests/test_create_personalities.py b/tests/test_create_personalities.py new file mode 100644 index 0000000..98a90cd --- /dev/null +++ b/tests/test_create_personalities.py @@ -0,0 +1,58 @@ +import unittest +import numpy as np +from itertools import permutations +from tests.factory import create_default_model +from unittest.mock import MagicMock + + +class TestParticipationModel(unittest.TestCase): + + def setUp(self): + """Create a fresh model instance before each test and mock `initialize_area`.""" + self.model = create_default_model( + height=10, width=10, num_agents=100, num_colors=4, + num_personalities=10, area_size_variance=0.2, + num_areas=4, av_area_height=5, av_area_width=5, + heterogeneity=0.5, + ) + self.model.initialize_area = MagicMock() + + + def test_create_personalities_shape(self): + """Test that the generated personalities array has the correct shape.""" + for n_personalities in range(2, 15): + personalities = self.model.create_personalities(n_personalities) + self.assertEqual(personalities.shape, + (n_personalities, self.model.num_colors)) + + def test_create_personalities_uniqueness(self): + """Test that the generated personalities are unique.""" + n_personalities = 12 + personalities = self.model.create_personalities(n_personalities) + unique_personalities = set(map(tuple, personalities)) + self.assertEqual(len(unique_personalities), n_personalities) + + def test_create_personalities_max_limit(self): + """Test that the method raises an error when + n exceeds the total number of permutations.""" + assert self.model.num_colors == 4 # 4! = 24 unique permutations + n_personalities = 25 + with self.assertRaises(ValueError): + self.model.create_personalities(n_personalities) + + def test_create_personalities_minimum_input(self): + """Test that the method can handle generating a single personality.""" + personalities = self.model.create_personalities(1) + self.assertEqual(personalities.shape, (1, self.model.num_colors)) + + def test_create_personalities_full_permutation(self): + """Test that generating the full set of permutations does return all.""" + num_colors = self.model.num_colors + n_personalities = np.math.factorial(num_colors) + personalities = self.model.create_personalities(n_personalities) + expected_permutations = set(permutations(range(num_colors))) + self.assertEqual(set(map(tuple, personalities)), expected_permutations) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/test_distance_functions.py b/tests/test_distance_functions.py new file mode 100644 index 0000000..2bdae8b --- /dev/null +++ b/tests/test_distance_functions.py @@ -0,0 +1,156 @@ +import unittest +from democracy_sim.distance_functions import * +import numpy as np +from itertools import combinations + + +class TestKendallTauDistance(unittest.TestCase): + + #TODO test normalized version + + def test_kendall_tau_on_ranks(self): + + print("TEST kendall_tau_on_ranks function") + + # Test cases kendall tau (rank-vektors) + sequences = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([0, 3, 1, 6, 2, 5, 4], [1, 0, 3, 6, 4, 2, 5], 6), + # Because: + # convert to orderings => + # ['A','C','E','B','G','F','D'], ['B','A','F','C','E','G','D'] + # rename items s.t. first vector is sorted int vector => + # ['0','1','2','3','4','5','6'], ['3','0','5','1','2','4','6'] + # count inversions => + # (3, 0), (3, 1), (3, 2), (5, 1), (5, 2), (5, 4) + # => 6 inversions + # If it were an ordering instead of a rank-vektor it'd be: + # => ['A','D','B','G','C','F','E'], ['B','A','D','G','E','C','F'], + # => ['0','1','2','3','4','5','6'], ['2','0','1','3','6','4','5'] + # => 4 inversions: (2,0), (2,1), (6,4), (6,5) (like on wikipedia) + ([0, 5, 2, 3, 1, 4], [5, 0, 3, 2, 4, 1], 15), + # ordering => ['A','E','C','D','F','B'], ['B','F','D','C','E','A'] + # rename => ['0','1','2','3','4','5'], ['5','4','3','2','1','0'] + # count => (5, 4), (5, 3), (5, 2), (5, 1), (5, 0), (4, 3), (4, 2), + # (4, 1), (4, 0), (3, 2), (3, 1), (3, 0), (2, 1), (2, 0), (1, 0) + # => 15 inversions + # ([1, 2, 3], [4, 5, 6], 0), # No common elements + # Again, if it were an ordering instead of a rank-vektor it'd be: + # => ['A','F','C','D','B','E'], ['F','A','D','C','E','B'], + # => ['0','1','2','3','4','5'], ['1','0','3','2','5','4'] + # => 3 inversions: (1,0), (3,2), (5,4) + ([2, 3, 1], [2, 1, 3], 3), + # C, A, B -- B, A, C + # 3, 1, 2 (ordering but named with ints) + # 0, 1, 2 -- 2, 1, 0 + # => inversions: (2,1), (2,0), (1,0) => 3 inversions + ([3, 1, 2], [2, 1, 3], 1), + # B, C, A -- B, A, C + # 0, 1, 2 -- 0, 2, 1 + # => inversions: (2,1) => 1 inversion + ([0.5, 1.0, 0.0], [0.5, 0.0, 1.0], 3), # Using floats + ([0.5, 1.0, 0.0], [0.2, 0.1, 0.8], 3), # Using floats but not equal + # Ties are problematic as they break the metric property here + # see 10.1137/05063088X + ([1, 2, 2, 3], [2, 1, 3, 2], 2), # Testing orderings with *ties* + # 'A'>'B'='C'>'D' - 'B'>'A'='D'>'C' + # Ord1: [0, 1, 2, 3] Ord2: [1, 0, 3, 2] + # Ren1: [0, 1, 2, 3] Ren2: [1, 0, 3, 2] + # 2 inversions: [(1, 0), (3, 2)] + ([2, 1, 1, 1, 3], [2, 2, 3, 3, 1], 7), # more ties + # 'B'='C'='D'>'A'>'E' - 'E'>'A'='B'>'C'='D' + # Ord1: [1, 2, 3, 0, 4] Ord2: [4, 0, 1, 2, 3] + # Ren1: [0, 1, 2, 3, 4] Ren2: [4, 3, 0, 1, 2] + # 7 Inversions: + # [(4, 3), (4, 0), (4, 1), (4, 2), (3, 0), (3, 1), (3, 2)] + ([3, 1, 1, 2, 2, 1, 3], [2, 2, 1, 3, 1, 1, 1], 10), # more ties + # 'B'='C'='F'>'D'='E'>'A'='G' - 'C'='E'='F'='G'>'A'='B'>'D' + # Ord1: [1, 2, 5, 3, 4, 0, 6] - Ord2: [2, 4, 5, 6, 0, 1, 3] + # Ren1: [0, 1, 2, 3, 4, 5, 6] - Ren2: [1, 4, 2, 6, 5, 0, 3] + ([0.1, 0.2, 0.2, 0.3], [0.2, 0.01, 0.9, 0.2], 2), + # Ties with floats + ] + + for seq1, seq2, expected in sequences: + print(f"# Next #\nSeq1: {seq1}, Seq2: {seq2}") + n = len(seq1) + assert n == len(seq2), \ + "Test failed: sequences must have the same length" + pairs = combinations(range(0, n), 2) + item_vec = np.arange(n) + # assert set(np.unique(seq1)) == set(np.unique(seq2)), \ + # "Test failed: sequences must have the same elements" + d = kendall_tau_on_ranks(np.array(seq1), np.array(seq2), + pairs, item_vec) + print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") + assert d == expected, f"Test failed for input {seq1}, {seq2}" + + def test_kendall_tau_on_orderings(self): + + print("\nTEST kendall_tau_on_orderings (not normalized) function\n") + + # Test cases kendall tau (on orderings) + ordering_seqs = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([0, 3, 1, 6, 2, 5, 4], [1, 0, 3, 6, 4, 2, 5], 4), + # Because: + # => ['A','D','B','G','C','F','E'], ['B','A','D','G','E','C','F'], + # => ['0','1','2','3','4','5','6'], ['2','0','1','3','6','4','5'] + # => 4 inversions: (2,0), (2,1), (6,4), (6,5) (like on wikipedia) + ([0, 5, 2, 3, 1, 4], [5, 0, 3, 2, 4, 1], 3), + # => ['A','F','C','D','B','E'], ['F','A','D','C','E','B'], + # => ['0','1','2','3','4','5'], ['1','0','3','2','5','4'] + # => 3 inversions: (1,0), (3,2), (5,4) + ([2, 3, 1], [2, 1, 3], 1), + # B, C, A -- B, A, C + # 0, 1, 2 -- 0, 2, 1 + # => inversions: (2,1) => 1 inversion + ([3, 1, 2], [2, 1, 3], 3), + # C, A, B -- B, A, C + # 0, 1, 2 -- 2, 1, 0 + # => inversions: (2,1), (2,0), (1,0) => 3 inversions + ] + + for seq1, seq2, expected in ordering_seqs: + print(f"# Next #\nSeq1: {seq1}, Seq2: {seq2}") + n = len(seq1) + assert n == len(seq2), \ + "Test failed: sequences must have the same length" + pairs = list(combinations(range(0, n), 2)) + # Test the ordering version + d = unnormalized_kendall_tau(np.array(seq1), np.array(seq2), pairs) + print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") + assert d == expected, f"Test failed for input {seq1}, {seq2}" + + +class TestSpearmanDistance(unittest.TestCase): + + #TODO test normalized version + + def test_spearman_distance(self): + + print("\nTEST spearman_distance function\n") + + sequences = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([1, 2, 3], [3, 2, 1], 4), # Reversed sequences + ([1, 2, 3], [2, 3, 1], 4), # Different ranks + ([1, 1, 1], [1, 1, 1], 0), # Sequences with ties + ([1, 2, 2, 3], [2, 1, 3, 2], 4), # Sequences with ties + ([0.0, 0.2, 1.0], [1.0, 0.2, 0.0], 2), # Reversed (using floats) + ([0.5, 1.0, 0.0], [0.5, 0.0, 1.0], 2.0), # Using floats + ([0.5, 1.0, 0.0], [0.2, 0.0, 1.0], 2.3), # Non-equidistant ranks + # ([0.5, 1.0, 0.0], [0.2, 0.1, 0.8], 2.3), # Non-normalized + # Using floats but not equal + ] + + for seq1, seq2, expected in sequences: + distance = spearman_distance(np.array(seq1), np.array(seq2)) + self.assertEqual(distance, expected, + f"Test failed for input {seq1}, {seq2}") diff --git a/tests/test_distribute_rewards.py b/tests/test_distribute_rewards.py new file mode 100644 index 0000000..4197a3e --- /dev/null +++ b/tests/test_distribute_rewards.py @@ -0,0 +1,15 @@ +import unittest +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestDistributeRewards(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_distribute(self): + area = self.model.areas[0] + area._conduct_election() # Ensure there's a result + area._distribute_rewards() + for agent in area.agents: + self.assertGreaterEqual(agent.assets, 0) diff --git a/tests/test_initialize_all_areas.py b/tests/test_initialize_all_areas.py new file mode 100644 index 0000000..bea2faa --- /dev/null +++ b/tests/test_initialize_all_areas.py @@ -0,0 +1,97 @@ +import unittest +from unittest.mock import MagicMock +from numpy import sqrt +from tests.factory import create_default_model # Import from factory.py + + +class TestParticipationModelInitializeAllAreas(unittest.TestCase): + + def setUp(self): + """Create a fresh model instance before each test and mock `initialize_area`.""" + self.model = create_default_model( + num_areas=4, # Override num_areas to 4 + height=10, # Set grid height + width=10, # Set grid width + av_area_height=5, # Set average area height + av_area_width=5, # Set average area width + ) + self.model.initialize_area = MagicMock() # Mock `initialize_area` for side effect tracking + + def test_initialize_all_areas_uniform_distribution(self): + """Test that areas are initialized uniformly if num_areas is a perfect square.""" + + # Check if the areas are initialized in a roughly uniform grid-like pattern + expected_calls = [(0, 0), (5, 0), (0, 5), (5, 5)] + # Check if 4 areas were initialized + self.assertEqual(self.model.num_areas, 4) # Check num_areas==4 + idx_fields = [area.idx_field for area in self.model.areas] + # Collect idx_fields from all areas + for idx_field in idx_fields: + assert idx_field in expected_calls + + def test_initialize_all_areas_with_non_square_number(self): + """Test that the method handles non-square numbers by adding extra areas randomly.""" + model = create_default_model( + num_areas=5, # Override num_areas to 5 + ) + # model.initialize_all_areas() # Runs on initialization + # Check that 5 areas were initialized after calling the function + self.assertEqual(model.num_areas, 5) + + def test_initialize_all_areas_no_areas(self): + """Test that the method does nothing if num_areas is 0.""" + model = create_default_model( + num_areas=0, # Set num_areas to 0 + ) + assert model.num_areas == 0 # Verify no areas were initialized + + def test_initialize_all_areas_random_additional_areas(self): + """Test that additional areas are placed randomly if num_areas exceeds uniform grid capacity.""" + model = create_default_model( + num_areas=5, # Override num_areas to 5 + height=10, + width=10, + av_area_height=5, + av_area_width=5 + ) + + # Check that the number of initialized areas matches num_areas + self.assertEqual(model.num_areas, 5) # Check that exactly 5 areas are initialized + + # Check that at least one area was placed outside the uniform pattern + idx_fields = [area.idx_field for area in model.areas] + expected_calls = [(0, 0), (5, 0), (0, 5), (5, 5)] + random_area_detected = any( + idx_field not in expected_calls for idx_field in idx_fields + ) + self.assertTrue(random_area_detected) + + def test_initialize_all_areas_handles_non_square_distribution(self): + """Test that the number of areas matches `num_areas` even for non-square cases.""" + model = create_default_model( + num_areas=6, # Override num_areas to 6 + ) + # Check that exactly 6 areas are initialized + self.assertEqual(model.num_areas, 6) + + def test_initialize_all_areas_calculates_distances_correctly(self): + """Test that area distances are calculated correctly.""" + model = create_default_model( + num_areas=4, # Override num_areas to 4 + height=10, + width=10, + av_area_height=5, + av_area_width=5 + ) + # Calculate the expected distances + roo_apx = round(sqrt(model.num_areas)) + expected_distance_x = model.grid.width // roo_apx + expected_distance_y = model.grid.height // roo_apx + + # Check the calculated distances + self.assertEqual(expected_distance_x, 5) + self.assertEqual(expected_distance_y, 5) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/test_majority_rule.py b/tests/test_majority_rule.py new file mode 100644 index 0000000..be68c3f --- /dev/null +++ b/tests/test_majority_rule.py @@ -0,0 +1,224 @@ +import numpy as np +import time +from democracy_sim.social_welfare_functions import majority_rule + +# Simple and standard cases (lower values = higher rank) + +simple = np.array([ + [0.1, 0.4, 0.5], + [0.5, 0.1, 0.4], + [0.4, 0.1, 0.5], + [0.5, 0.4, 0.1], + [0.5, 0.4, 0.1], + [0.5, 0.4, 0.1] +]) # => c, b, a ~ 2, 1, 0 + +# Following "paradoxical" example is taken from +# https://pub.dss.in.tum.de/brandt-research/minpara.pdf +# +# 5 4 3 2 +# ------- +# a e d b +# c b c d +# b c b e +# d d e c +# e a a a + +paradoxical = np.array([ + # 5 times a,c,b,d,e --> 0.4, 0.2, 0.3, 0.1, 0. + # 5 times a,c,b,d,e --> 0., 0.2, 0.1, 0.3, 0.4 + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + # 4 times e,b,c,d,a + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], + # 3 times d,c,b,e,a + [0.4, 0.2, 0.1, 0. , 0.3], + [0.4, 0.2, 0.1, 0. , 0.3], + [0.4, 0.2, 0.1, 0. , 0.3], + # 2 times b,d,e,c,a + [0.4, 0. , 0.3, 0.1, 0.2], + [0.4, 0. , 0.3, 0.1, 0.2] +]) # Plurality => a, e, d, b, c ~ 0, 4, 3, 1, 2 + +majority_simple_cases = [ + (simple, [2, 1, 0]), + (paradoxical, [0, 4, 3, 1, 2]) +] + +def test_majority_rule(): + # Test predefined cases + for pref_table, expected in majority_simple_cases: + res_ranking = majority_rule(pref_table) + assert list(res_ranking) == expected + +def majority_rule_with_ties_all(pref_rel, expected_winners, iterations=1000): + """ + Run majority rule with ties multiple times, check winners + and calculate the coefficient of variation (CV) of the winners. + :param pref_rel: Preference relation matrix. + :param expected_winners: An ordered list of expected winners, i.e. [0, 1]. + :param iterations: Number of iterations. + ------- + :return: Coefficient of variation (CV) of the winners. + """ + winners_from_ties = {} + for _ in range(iterations): + ranking = majority_rule(pref_rel) + winner = ranking[0] + winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 + winners = list(winners_from_ties.keys()) + winners.sort() + assert winners == expected_winners + counts = np.array(list(winners_from_ties.values())) + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + return cv + +# Cases with ties - "all equally possible" + +with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_overall_tie = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], +]) + +with_ties_mixed = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.25, 0.25, 0.25, 0.25], + [0.3, 0.2, 0.1, 0.4], +]) + +all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] + +def test_equally_possible(cv_threshold=0.125): + for pref_rel in all_equally_possible: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv < cv_threshold + +# Cases with ties - "not all equally possible" +with_ties_unequal = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_ties_all_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] + +def test_with_ties_unequal(): + for pref_rel in with_ties_unequal: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv > 0.125 + +# Random matrix + +def random_pref_profile(num_agents, num_options): + rand_matrix = np.random.rand(num_agents, num_options) + # Normalize the matrix + matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) + return matrix_rand + +def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): + """ + Run majority rule with ties multiple times, check winners + and calculate the coefficient of variation (CV) of the winners. + :param num_agents: Number of agents. + :param num_options: Number of options. + :param iterations: Number of iterations. + ------- + :return: Dictionary of winner counts {option: count}. + """ + winner_counts = {} + for _ in range(iterations): + # Create random matrix + matrix_rand = random_pref_profile(num_agents, num_options) + ranking = majority_rule(matrix_rand) + winner = ranking[0] + # Count winners + winner_counts[winner] = winner_counts.get(winner, 0) + 1 + return winner_counts + + +def test_with_random_matrix_small(): + """ + Test majority rule on a small random matrix with many iterations. + """ + num_agents = np.random.randint(2, 200) + # Keep num options small to expect all options to win at least once. + num_options = np.random.randint(2, 90) + iterations = 100*num_options + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) + stop_time = time.time() + # Extract winners from winner-counts dictionary and sort them + sorted_winners = list(wc.keys()) + sorted_winners.sort() + assert sorted_winners == list(range(num_options)) + # Extract count values + counts = np.array(list(wc.values())) + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.15 + print(f"\nCV: {cv}") + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." + f"With {num_agents} agents and {num_options} options.") + + +def test_with_random_matrix_large(): + """ + Test majority rule on a large random matrix (many agents, many options). + """ + num_its = 100 + num_agents = np.random.randint(1000, 3000) + num_options = np.random.randint(2000, 3000) + # Run majority rule test with random matrix + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) + stop_time = time.time() + # Len of winners should be approximately equal to the number of iterations + # because with a large number of options, winners should be mostly unique. + winners, counts = list(wc.keys()), list(wc.values()) + assert abs(np.mean(counts) - 1) < 0.1 + assert abs((len(winners) / num_its) - 1) < 0.1 + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.2 + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." + f"With {num_agents} agents and {num_options} options.") diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py new file mode 100644 index 0000000..dc84c86 --- /dev/null +++ b/tests/test_participation_area_agent.py @@ -0,0 +1,112 @@ +import unittest +import random +import numpy as np +from democracy_sim.participation_model import Area +from democracy_sim.participation_agent import VoteAgent +from .test_participation_model import TestParticipationModel, num_agents +from democracy_sim.social_welfare_functions import majority_rule, approval_voting +from democracy_sim.distance_functions import kendall_tau, spearman + + +class TestArea(unittest.TestCase): + + def setUp(self): + test_model = TestParticipationModel() + test_model.setUp() + self.model = test_model.model + + def test_update_color_distribution(self): + rand_area = random.sample(self.model.areas, 1)[0] + init_dst = rand_area.color_distribution.copy() + print(f"Area {rand_area.unique_id}s initial color dist.: {init_dst}") + # Assign new (randomly chosen) cells to the area + all_color_cells = self.model.color_cells + rand_area.cells = random.sample(all_color_cells, len(rand_area.cells)) + # Run/test the update_color_distribution method + rand_area._update_color_distribution() + new_dst = rand_area.color_distribution + print(f"Area {rand_area.unique_id}s new color distribution: {new_dst}") + # Check if the distribution has changed + assert not np.array_equal(init_dst, new_dst), \ + "Error: The color distribution did not change" + + def test_filter_cells(self): + # Get existing area + existing_area = random.sample(self.model.areas, 1)[0] + print(f"The areas color-cells: " + f"{[c.unique_id for c in existing_area.cells]}") + area_cell_sample = random.sample(existing_area.cells, 4) + other_cells = random.sample(self.model.color_cells, 4) + raw_cell_list = area_cell_sample + other_cells + print(f"Cells to be filtered: {[c.unique_id for c in raw_cell_list]}") + filtered_cells = existing_area._filter_cells(raw_cell_list) + print(f"Filtered cells: {[c.unique_id for c in filtered_cells]}") + # Check if the cells are filtered correctly + add_cells = existing_area._filter_cells(other_cells) + if len(add_cells) > 0: + print(f"Additional cells: {[c.unique_id for c in add_cells]}") + area_cell_sample += add_cells + self.assertEqual(area_cell_sample, filtered_cells) + + def test_conduct_election(self): + area = random.sample(self.model.areas, 1)[0] + # Test with majority_rule and spearman + self.model.voting_rule = majority_rule + self.model.distance_func = spearman + area._conduct_election() + # Test with approval_voting and spearman + self.model.voting_rule = approval_voting + area._conduct_election() + # Test with approval_voting and kendall_tau + self.model.distance_func = kendall_tau + area._conduct_election() + # Test with majority_rule and kendall_tau + self.model.voting_rule = majority_rule + area._conduct_election() + # TODO + + def test_adding_new_area_and_agent_within_it(self): + # Additional area and agent + personality = random.choice(self.model.personalities) + a = VoteAgent(num_agents + 1, self.model, pos=(0, 0), + personality=personality, assets=25) + additional_test_area = Area(self.model.num_areas + 1, + model=self.model, height=5, + width=5, size_variance=0) + additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) + test_area = additional_test_area + print(f"Test-Area: id={test_area.unique_id}, width={test_area._width}," + f" height={test_area._height}, idx={test_area.idx_field}") + assert a in test_area.agents # Test if agent is present + print(f"Agent {a.unique_id} is in area {test_area.unique_id}") + print(f"Areas color-cells: {[c.unique_id for c in test_area.cells]}") + + def test_estimate_real_distribution(self): + # Get any existing area + rnd_area = random.sample(self.model.areas, 1)[0] + a = random.sample(rnd_area.agents, 1)[0] + # Test the estimate_real_distribution method + a.update_known_cells(area=rnd_area) + k = len(a.known_cells) + print(f"Sample size: {k}") + a_colors = [c.color for c in a.known_cells] # To test against + print(f"Cells that agent {a.unique_id} knows of:\n" + f"{[c.unique_id for c in a.known_cells]} with colors: {a_colors}") + filtered = rnd_area._filter_cells(a.known_cells) + select_wrong = [c not in filtered for c in a.known_cells] + wrong = [c.unique_id for i, c in enumerate(a.known_cells) + if select_wrong[i]] + assert not any(wrong), f"Error: Cells {wrong} are not part of the area!" + est_distribution, conf = a.estimate_real_distribution(rnd_area) + assert 0.0 < conf < 1.0, "Error: Confidence out of range [0, 1]!" + print(f"{a.unique_id}s' estimated color dist is: {est_distribution}", + f"with confidence: {conf}") + self.assertAlmostEqual(sum(est_distribution), 1.0, places=7) + len_colors = self.model.num_colors + self.assertEqual(len(est_distribution), len_colors) + counts = [a_colors.count(color) for color in range(len_colors)] + print(f"Color counts: {counts}") + s = sum(counts) + expected_distribution = [i / s for i in counts] + print(f"Expected distribution: {expected_distribution}") + self.assertEqual(list(est_distribution), expected_distribution) diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py new file mode 100644 index 0000000..31cfaa6 --- /dev/null +++ b/tests/test_participation_model.py @@ -0,0 +1,112 @@ +import unittest +from democracy_sim.participation_model import (ParticipationModel, Area, + distance_functions, + social_welfare_functions) +from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, + num_agents, num_colors, num_areas, + num_personalities, common_assets, mu, + known_cells, + election_impact_on_mutation as e_impact, + draw_borders, rule_idx, distance_idx, + color_heterogeneity as heterogeneity, + color_patches_steps, av_area_height, + av_area_width, area_size_variance, + patch_power, election_costs, max_reward) +import mesa + + +class TestParticipationModel(unittest.TestCase): + + def setUp(self): + self.model = ParticipationModel(height=height, width=width, + num_agents=num_agents, + num_colors=num_colors, + num_personalities=num_personalities, + known_cells=known_cells, + common_assets=common_assets, mu=mu, + election_impact_on_mutation=e_impact, + num_areas=num_areas, + draw_borders=draw_borders, + election_costs=election_costs, + rule_idx=rule_idx, + distance_idx=distance_idx, + heterogeneity=heterogeneity, + color_patches_steps=color_patches_steps, + av_area_height=av_area_height, + av_area_width=av_area_width, + area_size_variance=area_size_variance, + patch_power=patch_power, + max_reward=max_reward, + show_area_stats=False) + + # def test_empty_model(self): + # # TODO: Test empty model + # model = ParticipationModel(10, 10, 0, 1, 0, 1, 0, 1, 1, 0.1, 1, 0, False, 1, 1, 1, 1, 1, False) + # self.assertEqual(model.num_agents, 0) + + def test_initialization(self): + areas_count = len([area for area in self.model.areas + if isinstance(area, Area)]) + self.assertEqual(areas_count, self.model.num_areas) + self.assertIsInstance(self.model.datacollector, mesa.DataCollector) + # TODO ... more tests + + def test_model_options(self): + self.assertEqual(self.model.num_agents, num_agents) + self.assertEqual(self.model.num_colors, num_colors) + self.assertEqual(self.model.num_areas, num_areas) + self.assertEqual(self.model.area_size_variance, area_size_variance) + self.assertEqual(self.model.draw_borders, draw_borders) + v_rule = social_welfare_functions[rule_idx] + dist_func = distance_functions[distance_idx] + self.assertEqual(self.model.common_assets, common_assets) + self.assertEqual(self.model.voting_rule, v_rule) + self.assertEqual(self.model.distance_func, dist_func) + self.assertEqual(self.model.election_costs, election_costs) + + def test_create_color_distribution(self): + eq_dst = self.model.create_color_distribution(heterogeneity=0) + self.assertEqual([1/num_colors for _ in eq_dst], eq_dst) + print(f"Color distribution with heterogeneity=0: {eq_dst}") + het_dst = self.model.create_color_distribution(heterogeneity=1) + print(f"Color distribution with heterogeneity=1: {het_dst}") + mid_dst = self.model.create_color_distribution(heterogeneity=0.5) + print(f"Color distribution with heterogeneity=0.5: {mid_dst}") + assert het_dst != eq_dst + assert mid_dst != eq_dst + assert het_dst != mid_dst + + def test_distribution_of_personalities(self): + p_dist = self.model.personality_distribution + self.assertAlmostEqual(sum(p_dist), 1.0) + self.assertEqual(len(p_dist), num_personalities) + voting_agents = self.model.voting_agents + nr_agents = self.model.num_agents + personalities = list(self.model.personalities) + p_counts = {str(i): 0 for i in personalities} + # Count the occurrence of each personality + for agent in voting_agents: + p_counts[str(agent.personality)] += 1 + # Normalize the counts to get the real personality distribution + real_dist = [p_counts[str(p)] / nr_agents for p in personalities] + # Simple tests + self.assertEqual(len(real_dist), len(p_dist)) + self.assertAlmostEqual(float(sum(real_dist)), 1.0) + # Compare each value + my_delta = 0.4 / num_personalities # The more personalities, the smaller the delta + for p_dist_val, real_p_dist_val in zip(p_dist, real_dist): + self.assertAlmostEqual(p_dist_val, real_p_dist_val, delta=my_delta) + + + def test_initialize_areas(self): + # TODO (very non-trivial) - has been tested manually so far. + pass + + def test_step(self): + pass + # TODO add test_step + # def test_step(self): + # initial_data = self.model.datacollector.get_model_vars_dataframe().copy() + # self.model.step() + # new_data = self.model.datacollector.get_model_vars_dataframe() + # self.assertNotEqual(initial_data, new_data) diff --git a/tests/test_participation_voting_agent.py b/tests/test_participation_voting_agent.py new file mode 100644 index 0000000..4f49a6a --- /dev/null +++ b/tests/test_participation_voting_agent.py @@ -0,0 +1,77 @@ +from .test_participation_model import * +from democracy_sim.participation_model import Area +from democracy_sim.participation_agent import VoteAgent, combine_and_normalize +import numpy as np +import random + + +class TestVotingAgent(unittest.TestCase): + + def setUp(self): + test_model = TestParticipationModel() + test_model.setUp() + self.model = test_model.model + personality = random.choice(self.model.personalities) + self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), + personality=personality, assets=25) + self.additional_test_area = Area(self.model.num_areas + 1, + model=self.model, height=5, + width=5, size_variance=0) + self.additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) + + def test_combine_and_normalize_rank_arrays(self): + print("Test function combine_and_normalize_estimates") + a = np.array([0.0, 0.2, 0.7, 0.5, 0.1, 0.8, 1.0]) + a_rank = np.argsort(a) + print(f"Ranking of a: {a_rank}") + b = np.array([1.0, 0.2, 0.7, 0.5, 0.1, 0.8, 0.0]) + b_rank = np.argsort(b) + print(f"Ranking of b: {b_rank}") + factors = [0.0, 0.2, 0.5, 1.0] + for f in factors: + result = combine_and_normalize(a, b, f) + result_rank = np.argsort(result) + print(f"Ranking of r with factor {f}: {result_rank}") + + def test_combine_and_normalize(self): + a = self.agent + test_area = self.additional_test_area + assert a in test_area.agents # Test if agent is present + # Give the agent some cells to know of + k = random.sample(range(2, len(test_area.cells)), 1)[0] + print(f"Sample size: {k}") + a.known_cells = random.sample(test_area.cells, k) + est_dist, conf = a.estimate_real_distribution(test_area) + own_prefs = a.personality + # own_prefs = np.array([0.25, 0.5, 0.0, 0.0]) # Should also work.. + print(f"Agent {a.unique_id}s' personality: {own_prefs}" + f" estimated color dist: {est_dist} with confidences: {conf}") + for a_factor in [0.0, 0.2, 0.5, 1.0]: + comb = combine_and_normalize(own_prefs, est_dist, a_factor) + print(f"Assumed opt. distribution with factor {a_factor}: \n{comb}") + # Validation + if a_factor == 0.0: + self.assertEqual(list(comb), list(est_dist)) + elif a_factor == 1.0: + if sum(own_prefs) != 1.0: + own_prefs = own_prefs / sum(own_prefs) + self.assertEqual(list(comb), list(own_prefs)) + self.assertTrue(np.isclose(sum(comb), 1.0, atol=1e-8)) + + def test_compute_assumed_opt_dist(self): + a = self.agent + test_area = self.additional_test_area + # Give the agent some cells to know of + max_size = len(test_area.cells) + k = random.sample(range(2, max_size), 1)[0] + a.known_cells = random.sample(test_area.cells, k=k) + est_dist, conf = a.estimate_real_distribution(test_area) + own_prefs = a.personality + print(f"The agents\npersonality: {own_prefs} \n" + f"est_dist : {est_dist} and confidences: {conf}") + r = a.compute_assumed_opt_dist(test_area) + print(f"Assumed optimal distribution: {r}") + self.assertTrue(np.isclose(sum(r), 1.0, atol=1e-8)) + + + diff --git a/tests/test_pers_dist.py b/tests/test_pers_dist.py new file mode 100644 index 0000000..1142ce8 --- /dev/null +++ b/tests/test_pers_dist.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.pyplot as plt + +def create_gaussian_distribution(size): + # Generate a normal distribution + rng = np.random.default_rng() + dist = rng.normal(0, 1, size) + dist.sort() # To create a gaussian curve like array + dist = np.abs(dist) # Flip negative values "up" + # Normalize the distribution to sum to one + dist /= dist.sum() + # Ensure the sum is exactly one + # sm = dist.sum() + # if sm != 1.0: + # idx = rng.choice(size) # Choose a random index + # dist[idx] += 1 - sm + return dist + +# Example usage +nr_options = 20 +gaussian_dist = create_gaussian_distribution(nr_options) +s = gaussian_dist.sum() + +nr_zeroes = gaussian_dist.size - np.count_nonzero(gaussian_dist) +print("There are", nr_zeroes, "zero values in the distribution") + +# Plot the distribution +plt.plot(gaussian_dist) +plt.title("Normalized Gaussian Distribution") +plt.show() + +sample_size = 800 +pool = np.arange(nr_options) +rng = np.random.default_rng() +print(pool.shape) +chosen = rng.choice(pool, sample_size, p=gaussian_dist) + +plt.hist(chosen) +plt.show() \ No newline at end of file diff --git a/tests/test_set_dimensions.py b/tests/test_set_dimensions.py new file mode 100644 index 0000000..936218c --- /dev/null +++ b/tests/test_set_dimensions.py @@ -0,0 +1,28 @@ +import unittest +from tests.factory import create_default_model + +class TestSetDimensions(unittest.TestCase): + def setUp(self): + self.model = create_default_model( + num_areas=1, + height=10, + width=10, + av_area_height=5, + av_area_width=5, + area_size_variance=0 + ) + + def test_dimensions_no_variance(self): + area = self.model.areas[0] + self.assertEqual(area._width, 5) + self.assertEqual(area._height, 5) + + def test_dimensions_out_of_range(self): + with self.assertRaises(ValueError): + bad_model = create_default_model( + num_areas=1, + av_area_width=5, + av_area_height=5, + area_size_variance=2 + ) + _ = bad_model.areas[0] \ No newline at end of file diff --git a/tests/test_tally_votes.py b/tests/test_tally_votes.py new file mode 100644 index 0000000..e7f4eda --- /dev/null +++ b/tests/test_tally_votes.py @@ -0,0 +1,21 @@ +import unittest +import numpy as np +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestTallyVotes(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_tally_votes_array(self): + area = self.model.areas[0] + votes = area._tally_votes() + self.assertIsInstance(votes, np.ndarray) + + def test_tally_votes_empty(self): + for agent in self.model.voting_agents: + agent.assets = 0 + area = self.model.areas[0] + votes = area._tally_votes() + self.assertEqual(votes.size, 0) diff --git a/tests/test_update_color_distribution.py b/tests/test_update_color_distribution.py new file mode 100644 index 0000000..4e74f9e --- /dev/null +++ b/tests/test_update_color_distribution.py @@ -0,0 +1,23 @@ +import unittest +import numpy as np +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestUpdateColorDistribution(unittest.TestCase): + def setUp(self): + self.model = create_default_model( + num_areas=1, + num_colors=3 + ) + self.model.initialize_area = MagicMock() + + def test_color_distribution(self): + area = self.model.areas[0] + old_dist = np.copy(area._color_distribution) + # Manually change some cell colors + for cell in area.cells[:3]: + cell.color = 1 + area._update_color_distribution() + new_dist = area._color_distribution + self.assertFalse(np.array_equal(old_dist, new_dist)) + self.assertAlmostEqual(np.sum(new_dist), 1.0, places=5)