From 6a5a37ab14149f05aa80b847604f2f6dd21c311f Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 14 Jun 2024 18:20:40 +0200 Subject: [PATCH 01/38] docs - fixed typos and made small adjustments --- docs/de/teaser.md | 24 +++++++++++++++--------- docs/teaser.md | 6 +++++- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/docs/de/teaser.md b/docs/de/teaser.md index 788ae6a..b0c43e7 100644 --- a/docs/de/teaser.md +++ b/docs/de/teaser.md @@ -1,8 +1,8 @@ -# Zukunft gestalten: Demokratische Forschung zur Bewältigung globaler Herausforderungen +# Zukunft gestalten: Demokratie-Forschung zur Bewältigung globaler Herausforderungen In einer Welt voller Komplexität und Unsicherheit stehen wir an einem Scheideweg. Dringende globale Herausforderungen wie der Klimawandel[^9], soziale Ungleichheit[^8] und die ethischen Dilemmata[^4], -die mit dem rasanten Fortschritt der künstlichen Intelligenz (KI) einhergehen, drängen nach Lösungen. +die mit dem rasanten Fortschritt künstlicher Intelligenz (KI) einhergehen, drängen nach Lösungen. Eine Zukunft, in der KI die menschliche Intelligenz in vielen Bereichen übertrifft, erscheint zunehmend wahrscheinlich[^3]. Können wir diese Technologie sinnvoll und sicher in bestehende Governance-Verfahren und Strukturen integrieren? @@ -47,13 +47,17 @@ Reale demokratische Verwaltung geht weit über das bloße Ineinklangbringen indi beinhaltet Pfadabhängigkeiten vergangener Entscheidungen, Desinformation und mangelnde Beteiligung, um nur einige offensichtliche Herausforderungen zu nennen. -Diese Herausforderungen zu bewältigen, erfordert innovative Ansätze. -Dieses Projekt zielt darauf ab, langfristig möglichst viele dieser Einflüsse durch Multi-Agenten-basierte Simulationen integriert zu untersuchen. - -Während das Modell und die Forschungsfragen der vorgeschlagenen Masterarbeit -diesen Ansatz nur in seinen absoluten Grundzügen darstellen können, sollte das Potenzial, durch Multi-Agenten-basiertes Modellieren -langfristig alle wesentlichen Aspekte der realen demokratischen Selbstverwaltung untersuchen zu können, -nicht unterschätzt werden. +Um diese Herausforderungen zu bewältigen, sind innovative Ansätze erforderlich. +Das vorliegende Projekt zielt darauf ab, langfristig möglichst viele dieser Einflüsse durch Multi-Agenten-basierte +Simulationen integriert zu untersuchen. + +Das Modell und die Forschungsfragen einer Masterarbeit +können diesen Ansatz natürlich nur in ihren absoluten Grundzügen darstellen. +Soweit uns bekannt, wurde dieser Ansatz noch nicht systematisch in der +Erforschung kollektiver Entscheidungsfindung angewandt, weshalb wir auch sehr grundlegend beginnen. +Dennoch sollte das Potenzial von Multi-Agenten-basiertem Modellieren nicht unterschätzt werden. +Langfristig wird dadurch sehr wahrscheinlich ermöglicht, +alle wesentlichen Aspekte der realen demokratischen Selbstverwaltung zu untersuchen[^10]. Die kollektive Intelligenz einer Gesellschaft durch verbesserte Governance-Verfahren zu stärken, ist wahrscheinlich der effektivste Weg und unser größter Hebel, die Herausforderungen unserer Zeit zu meistern. @@ -76,3 +80,5 @@ ist wahrscheinlich der effektivste Weg und unser größter Hebel, die Herausford [^8]: Thomas Piketty. Das Kapital im 21. Jahrhundert. CH Beck, 2014. [^9]: Hans-Otto Pörtner, Debra C Roberts, H Adams, C Adler, P Aldunce, E Ali, R Ara Begum, R Betts, R Bezner Kerr, R Biesbroek, et al. [Climate change 2022: Impacts, adaptation and vulnerability](https://hal.science/hal-03774939/document). IPCC Sixth Assessment Report, 2022. + +[^10]: Robert L Axtell and J Doyne Farmer. [Agent-based modeling in economics and finance](https://oms-inet.files.svdcdn.com/production/files/JEL-v2.0.pdf): Past, present, and future. In: Journal of Economic Literature (2022), pp. 1–10 \ No newline at end of file diff --git a/docs/teaser.md b/docs/teaser.md index 2196f7f..923cbbf 100644 --- a/docs/teaser.md +++ b/docs/teaser.md @@ -48,7 +48,9 @@ This research aims to pioneer a new path by incorporating these complexities thr While the model and research inquiries within the proposed master thesis can only represent this approach in its infancy, the potential of multi-agent-based modeling -to eventually encapsulate all essential facets of real-world democratic governance can hardly be overstated. +to eventually encapsulate all essential facets of real-world democratic governance can hardly be overstated[^10]. +To the best of our knowledge, this approach has not been systematically applied to researching social choice +or collective decision-making. It stands poised to boost what may be the most underestimated cornerstone of human society: our collective intelligence. @@ -72,3 +74,5 @@ our collective intelligence. [^8]: Thomas Piketty. Das Kapital im 21. Jahrhundert. CH Beck, 2014. [^9]: Hans-Otto Pörtner, Debra C Roberts, H Adams, C Adler, P Aldunce, E Ali, R Ara Begum, R Betts, R Bezner Kerr, R Biesbroek, et al. [Climate change 2022: Impacts, adaptation and vulnerability](https://hal.science/hal-03774939/document). IPCC Sixth Assessment Report, 2022. + +[^10]: Robert L Axtell and J Doyne Farmer. [Agent-based modeling in economics and finance](https://oms-inet.files.svdcdn.com/production/files/JEL-v2.0.pdf): Past, present, and future. In: Journal of Economic Literature (2022), pp. 1–10 \ No newline at end of file From 876b39ab867f37140905bfc44ca8cc5953a1a84e Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 21 Jun 2024 18:19:45 +0200 Subject: [PATCH 02/38] add my democracy-sim model, server version in run.py rudimentary working, app.py with solara not yet --- democracy_sim/__init__.py | 0 democracy_sim/model_setup.py | 91 ++++++++++++++++++++++++++++ democracy_sim/participation_agent.py | 90 +++++++++++++++++++++++++++ democracy_sim/participation_model.py | 66 ++++++++++++++++++++ democracy_sim/run.py | 13 ++++ requirements.txt | 3 +- 6 files changed, 262 insertions(+), 1 deletion(-) create mode 100644 democracy_sim/__init__.py create mode 100644 democracy_sim/model_setup.py create mode 100644 democracy_sim/participation_agent.py create mode 100644 democracy_sim/participation_model.py create mode 100644 democracy_sim/run.py diff --git a/democracy_sim/__init__.py b/democracy_sim/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py new file mode 100644 index 0000000..4bcb535 --- /dev/null +++ b/democracy_sim/model_setup.py @@ -0,0 +1,91 @@ +""" +handles the definition of the canvas parameters and +the drawing of the model representation on the canvas +""" +# import webbrowser +import mesa +from mesa.visualization.modules import ChartModule +from participation_model import ParticipationModel +from participation_agent import ColorCell + +_COLORS = [ + "White", + "Red", + "Green", + "Blue", + "Yellow", + "Aqua", + "Fuchsia", + "Gray", + "Lime", + "Maroon", + "Navy", + "Olive", + "Orange", + "Purple", + "Silver", + "Teal", +] + + +def participation_draw(cell: ColorCell): + """ + This function is registered with the visualization server to be called + each tick to indicate how to draw the cell in its current color. + + :param cell: the cell in the simulation + + :return: the portrayal dictionary. + """ + if cell is None: + raise AssertionError + # num_agents_at_cell = cell.model.grid.get_cell_list_contents([cell.pos]) + portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, + "x": cell.row, "y": cell.col, + "Color": _COLORS[cell.color]} + return portrayal + + +grid_rows = 160 +grid_cols = 200 +cell_size = 5 +canvas_width = grid_rows * cell_size +canvas_height = grid_cols * cell_size + +canvas_element = mesa.visualization.CanvasGrid( + participation_draw, grid_rows, grid_cols, canvas_width, canvas_height +) + +happy_chart = mesa.visualization.ChartModule([{"Label": "happy", + "Color": "Black"}]) + + +def compute_wealth(model: ParticipationModel): + agents_wealth = [agent.wealth for agent in model.agent_scheduler.agents] + return {"wealth": agents_wealth} + + +wealth_chart = mesa.visualization.modules.ChartModule( + [{"Label": "wealth", "Color": "Black"}], + data_collector_name='datacollector' +) + +model_params = { + "height": mesa.visualization.Slider( + name="World Height", value=200, min_value=10, max_value=1000, step=10, + description="Select the height of the world" + ), + "width": mesa.visualization.Slider( + name="World Width", value=160, min_value=10, max_value=1000, step=10, + description="Select the width of the world" + ), + "num_agents": mesa.visualization.Slider( + name="# Agents", value=200, min_value=10, max_value=9999999, step=10 + ), + "num_colors": mesa.visualization.Slider( + name="# Colors", value=4, min_value=2, max_value=100, step=1 + ), + # "num_regions": mesa.visualization.Slider( + # name="# Regions", value=4, min_value=4, max_value=500, step=1 + # ), +} diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py new file mode 100644 index 0000000..61b60ba --- /dev/null +++ b/democracy_sim/participation_agent.py @@ -0,0 +1,90 @@ +from typing import TYPE_CHECKING, cast +import mesa +from mesa import Agent + +if TYPE_CHECKING: + from participation_model import ParticipationModel + + +class VoteAgent(Agent): + """An agent with fixed initial wealth.""" + + def __init__(self, unique_id, model: mesa.Model): + # Pass the parameters to the parent class. + super().__init__(unique_id, model) + + # Create the agent's variable and set the initial values. + self.wealth = 1 + + def step(self): + # Verify agent has some wealth + if self.wealth > 0: + other_agent = self.random.choice(self.model.agent_scheduler.agents) + if other_agent is not None: + other_agent.wealth += 1 + self.wealth -= 1 + + def move(self): + if TYPE_CHECKING: # Type hint for IDEs + self.model = cast(ParticipationModel, self.model) + possible_steps = self.model.grid.get_neighborhood( + self.pos, + moore=True, # Moore vs. von neumann + include_center=False) + new_position = self.random.choice(possible_steps) + self.model.grid.move_agent(self, new_position) + + +class ColorCell(mesa.Agent): + """ + Represents a cell's color + """ + + def __init__(self, pos, model, initial_color: int, num_colors: int): + """ + Create a cell, in the given state, at the given row, col position. + """ + super().__init__(pos, model) + self._row = pos[0] + self._col = pos[1] + self._color = initial_color + self._next_color = None + self.color_ids = list(range(num_colors)) # Colors as integers + + @property + def col(self): + """Return the col location of this cell.""" + return self._col + + @property + def row(self): + """Return the row location of this cell.""" + return self._row + + @property + def color(self): + """Return the current color of this cell.""" + return self._color + + def color_step(self): + """ + Determines the cells' color for the next step + """ + # _neighbor_iter = self.model.grid.iter_neighbors((self._row, self._col), True) + # neighbors_opinion = Counter(n.get_state() for n in _neighbor_iter) + # # Following is a a tuple (attribute, occurrences) + # polled_opinions = neighbors_opinion.most_common() + # tied_opinions = [] + # for neighbor in polled_opinions: + # if neighbor[1] == polled_opinions[0][1]: + # tied_opinions.append(neighbor) + # + # self._next_color = self.random.choice(tied_opinions)[0] + pass + + def advance(self): + """ + Set the state of the agent to the next state + """ + # self._color = self._next_color + pass diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py new file mode 100644 index 0000000..9abd596 --- /dev/null +++ b/democracy_sim/participation_model.py @@ -0,0 +1,66 @@ +import random + +import mesa +from mesa.time import StagedActivation +from participation_agent import VoteAgent, ColorCell + + +class Area: + def __init__(self, model, width, height): + self.model = model + self.width = width + self.height = height + self.schedule = mesa.time.RandomActivation(self.model) + self.agents = [] + + def add_agent(self, agent): + self.schedule.add(agent) + self.agents.append(agent) + + def step(self): + self.schedule.step() + + +class ParticipationModel(mesa.Model): + """A model with some number of agents.""" + + def __init__(self, num_agents, num_colors, width, height): + super().__init__() + self.num_agents = num_agents + self.num_colors = num_colors + self.height = height + self.width = width + # Create schedulers and assign it to the model + self.color_scheduler = mesa.time.RandomActivation(self) + self.agent_scheduler = mesa.time.RandomActivation(self) + # self.schedule = StagedActivation(self, + # stage_list=['color_step', 'step']) + # The grid + # SingleGrid enforces at most one agent per cell; + # MultiGrid allows multiple agents to be in the same cell. + self.grid = mesa.space.MultiGrid(width, height, torus=True) + self.datacollector = mesa.DataCollector( + model_reporters={"wealth": "wealth"}, + # Model-level count of agents' wealth + ) + # Create colors for the cells + for _, (row, col) in self.grid.coord_iter(): + color = random.choice(range(num_colors)) + cell = ColorCell((row, col), self, color, num_colors) + self.grid.place_agent(cell, (row, col)) + # Add the cell color to the scheduler + self.color_scheduler.add(cell) + # Create agents + for a_id in range(self.num_agents): + a = VoteAgent(a_id, self) + # Add the agent to the scheduler + self.agent_scheduler.add(a) + + def step(self): + """Advance the model by one step.""" + + # The model's step will go here for now + # this will call the step method of each agent + # and print the agent's unique_id + self.color_scheduler.step() + self.agent_scheduler.step() diff --git a/democracy_sim/run.py b/democracy_sim/run.py new file mode 100644 index 0000000..c25c3c7 --- /dev/null +++ b/democracy_sim/run.py @@ -0,0 +1,13 @@ +# import webbrowser +import mesa +from participation_model import ParticipationModel +from model_setup import model_params, canvas_element, happy_chart, wealth_chart + +server = mesa.visualization.ModularServer( + model_cls=ParticipationModel, + visualization_elements=[canvas_element, wealth_chart, happy_chart], + name="DemocracySim", + model_params=model_params, +) + +server.launch(open_browser=True) diff --git a/requirements.txt b/requirements.txt index 13585c3..31b89a5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,5 @@ altair~=5.3.0 streamlit~=1.34.0 mkdocs-git-revision-date-localized-plugin~=0.9.0 mkdocs-static-i18n -mkdocs-static-i18n[material] \ No newline at end of file +mkdocs-static-i18n[material] +git+https://github.com/Logende/mesa-replay@main#egg=Mesa-Replay \ No newline at end of file From ae43861d9f55c19480b0b72c1542e705611e7ebf Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 24 Jun 2024 14:18:42 +0200 Subject: [PATCH 03/38] depiction of colors and count valules of agents in cells work --- democracy_sim/app.py | 60 ++++++++++++++++++++++++++++ democracy_sim/model_setup.py | 47 ++++++++++++++++------ democracy_sim/participation_agent.py | 38 ++++++++++++++---- democracy_sim/participation_model.py | 22 ++++++---- requirements.txt | 2 +- 5 files changed, 142 insertions(+), 27 deletions(-) create mode 100644 democracy_sim/app.py diff --git a/democracy_sim/app.py b/democracy_sim/app.py new file mode 100644 index 0000000..b16989b --- /dev/null +++ b/democracy_sim/app.py @@ -0,0 +1,60 @@ +from mesa.experimental import JupyterViz, make_text, Slider +from participation_model import ParticipationModel +from numpy import arange +import solara +from model_setup import participation_draw +# Data visualization tools. +from matplotlib.figure import Figure + + +def get_agents_wealth(model: ParticipationModel): + """ + Display a text count of how many happy agents there are. + """ + all_wealth = list() + # Store the results + for agent in model.agent_scheduler.agents: + all_wealth.append(agent.wealth) + return f"Agents wealth: {all_wealth}" + + +def agent_portrayal(agent): + # Construct and return the portrayal dictionary + portrayal = { + "size": agent.wealth, + "color": "tab:orange", + } + return portrayal + + +def space_drawer(model, agent_portrayal): + fig = Figure(figsize=(8, 5), dpi=100) + ax = fig.subplots() + + # Set plot limits and aspect + ax.set_xlim(0, model.grid.width) + ax.set_ylim(0, model.grid.height) + ax.set_aspect("equal") + ax.invert_yaxis() # Match grid's origin + + fig.tight_layout() + + return solara.FigureMatplotlib(fig) + + +model_params = { + "height": Slider("World Height", 200, 10, 1000, 10), + "width": Slider("World Width", 160, 10, 1000, 10), + "num_agents": Slider("# Agents", 200, 10, 9999999, 10), + "num_colors": Slider("# Colors", 4, 2, 100, 1), +} + +page = JupyterViz( + ParticipationModel, + model_params, + measures=["wealth", make_text(get_agents_wealth),], + # agent_portrayal=agent_portrayal, + agent_portrayal=participation_draw, + space_drawer=space_drawer, +) +page # noqa diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 4bcb535..56ee34b 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -6,7 +6,7 @@ import mesa from mesa.visualization.modules import ChartModule from participation_model import ParticipationModel -from participation_agent import ColorCell +from participation_agent import ColorCell, VoteAgent _COLORS = [ "White", @@ -25,7 +25,22 @@ "Purple", "Silver", "Teal", -] + "Pink", + "Brown", + "Gold", + "Coral", + "Crimson", + "DarkBlue", + "DarkRed", + "DarkGreen", + "DarkKhaki", + "DarkMagenta", + "DarkOliveGreen", + "DarkOrange", + "DarkTurquoise", + "DarkViolet", + "DeepPink", +] # 30 colors def participation_draw(cell: ColorCell): @@ -39,16 +54,24 @@ def participation_draw(cell: ColorCell): """ if cell is None: raise AssertionError - # num_agents_at_cell = cell.model.grid.get_cell_list_contents([cell.pos]) + if isinstance(cell, VoteAgent): + return None + # # Retrieve the agents of the cell + # agents = cell.model.grid.get_cell_list_contents([cell.pos]) + # # Count the number of ParticipationAgents (subtracting the color cell) + # nr_agents = len(agents) portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, "x": cell.row, "y": cell.col, "Color": _COLORS[cell.color]} + if cell.num_agents_in_cell > 0: + portrayal["text"] = str(cell.num_agents_in_cell) + portrayal["text_color"] = "Black" return portrayal -grid_rows = 160 -grid_cols = 200 -cell_size = 5 +grid_rows = 80 +grid_cols = 100 +cell_size = 10 canvas_width = grid_rows * cell_size canvas_height = grid_cols * cell_size @@ -72,18 +95,18 @@ def compute_wealth(model: ParticipationModel): model_params = { "height": mesa.visualization.Slider( - name="World Height", value=200, min_value=10, max_value=1000, step=10, - description="Select the height of the world" + name="World Height", value=grid_cols, min_value=10, max_value=1000, + step=10, description="Select the height of the world" ), "width": mesa.visualization.Slider( - name="World Width", value=160, min_value=10, max_value=1000, step=10, - description="Select the width of the world" + name="World Width", value=grid_rows, min_value=10, max_value=1000, + step=10, description="Select the width of the world" ), "num_agents": mesa.visualization.Slider( - name="# Agents", value=200, min_value=10, max_value=9999999, step=10 + name="# Agents", value=800, min_value=10, max_value=99999, step=10 ), "num_colors": mesa.visualization.Slider( - name="# Colors", value=4, min_value=2, max_value=100, step=1 + name="# Colors", value=4, min_value=2, max_value=len(_COLORS), step=1 ), # "num_regions": mesa.visualization.Slider( # name="# Regions", value=4, min_value=4, max_value=500, step=1 diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 61b60ba..fbb3a23 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -9,13 +9,24 @@ class VoteAgent(Agent): """An agent with fixed initial wealth.""" - def __init__(self, unique_id, model: mesa.Model): + def __init__(self, unique_id, pos, model: mesa.Model): # Pass the parameters to the parent class. super().__init__(unique_id, model) - + self._row = pos[0] + self._col = pos[1] # Create the agent's variable and set the initial values. self.wealth = 1 + @property + def col(self): + """Return the col location of this cell.""" + return self._col + + @property + def row(self): + """Return the row location of this cell.""" + return self._row + def step(self): # Verify agent has some wealth if self.wealth > 0: @@ -40,7 +51,7 @@ class ColorCell(mesa.Agent): Represents a cell's color """ - def __init__(self, pos, model, initial_color: int, num_colors: int): + def __init__(self, pos, model, initial_color: int): """ Create a cell, in the given state, at the given row, col position. """ @@ -49,23 +60,36 @@ def __init__(self, pos, model, initial_color: int, num_colors: int): self._col = pos[1] self._color = initial_color self._next_color = None - self.color_ids = list(range(num_colors)) # Colors as integers + self._num_agents_in_cell = 0 @property def col(self): - """Return the col location of this cell.""" + """The col location of this cell.""" return self._col @property def row(self): - """Return the row location of this cell.""" + """The row location of this cell.""" return self._row @property def color(self): - """Return the current color of this cell.""" + """The current color of this cell.""" return self._color + @property + def num_agents_in_cell(self): + """The number of agents in this cell.""" + return self._num_agents_in_cell + + @num_agents_in_cell.setter + def num_agents_in_cell(self, value): + self._num_agents_in_cell = value + + @num_agents_in_cell.deleter + def num_agents_in_cell(self): + del self._num_agents_in_cell + def color_step(self): """ Determines the cells' color for the next step diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 9abd596..023b4ff 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,5 +1,4 @@ import random - import mesa from mesa.time import StagedActivation from participation_agent import VoteAgent, ColorCell @@ -31,7 +30,7 @@ def __init__(self, num_agents, num_colors, width, height): self.height = height self.width = width # Create schedulers and assign it to the model - self.color_scheduler = mesa.time.RandomActivation(self) + self.color_cell_scheduler = mesa.time.RandomActivation(self) self.agent_scheduler = mesa.time.RandomActivation(self) # self.schedule = StagedActivation(self, # stage_list=['color_step', 'step']) @@ -43,18 +42,27 @@ def __init__(self, num_agents, num_colors, width, height): model_reporters={"wealth": "wealth"}, # Model-level count of agents' wealth ) - # Create colors for the cells + # Create color ids for the cells for _, (row, col) in self.grid.coord_iter(): color = random.choice(range(num_colors)) - cell = ColorCell((row, col), self, color, num_colors) + cell = ColorCell((row, col), self, color) self.grid.place_agent(cell, (row, col)) # Add the cell color to the scheduler - self.color_scheduler.add(cell) + self.color_cell_scheduler.add(cell) # Create agents for a_id in range(self.num_agents): - a = VoteAgent(a_id, self) + # Get a random position + x = self.random.randrange(self.width) + y = self.random.randrange(self.height) + a = VoteAgent(a_id, (x, y), self) # Add the agent to the scheduler self.agent_scheduler.add(a) + # Place at a random cell + self.grid.place_agent(a, (x, y)) + # Count the agent at the chosen cell + agents = self.grid.get_cell_list_contents([(x, y)]) + cell = [a for a in agents if isinstance(a, ColorCell)][0] + cell.num_agents_in_cell = cell.num_agents_in_cell + 1 def step(self): """Advance the model by one step.""" @@ -62,5 +70,5 @@ def step(self): # The model's step will go here for now # this will call the step method of each agent # and print the agent's unique_id - self.color_scheduler.step() + self.color_cell_scheduler.step() self.agent_scheduler.step() diff --git a/requirements.txt b/requirements.txt index 31b89a5..5118a61 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ Mesa~=2.3.0 numpy~=1.26.4 -solara~=1.32.1 +solara~=1.33 matplotlib~=3.9.0 ipyvuetify~=1.9.4 seaborn~=0.13.2 From 80fe2c0898f3ff1d139d761e3d3913afdcc4e8d9 Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 5 Jul 2024 12:36:36 +0200 Subject: [PATCH 04/38] Areas implemented (adjustable with borders being drawn if needed) - elections not yet implemented --- democracy_sim/app.py | 17 +-- democracy_sim/model_setup.py | 94 +++++++++------ democracy_sim/participation_agent.py | 65 +++++++---- democracy_sim/participation_model.py | 164 +++++++++++++++++++++++---- democracy_sim/run.py | 4 +- 5 files changed, 257 insertions(+), 87 deletions(-) diff --git a/democracy_sim/app.py b/democracy_sim/app.py index b16989b..b7363da 100644 --- a/democracy_sim/app.py +++ b/democracy_sim/app.py @@ -1,5 +1,6 @@ from mesa.experimental import JupyterViz, make_text, Slider from participation_model import ParticipationModel +from participation_agent import VoteAgent from numpy import arange import solara from model_setup import participation_draw @@ -7,21 +8,21 @@ from matplotlib.figure import Figure -def get_agents_wealth(model: ParticipationModel): +def get_agents_assets(model: ParticipationModel): """ Display a text count of how many happy agents there are. """ - all_wealth = list() + all_assets = list() # Store the results - for agent in model.agent_scheduler.agents: - all_wealth.append(agent.wealth) - return f"Agents wealth: {all_wealth}" + for agent in model.all_agents: + all_assets.append(agent.assets) + return f"Agents wealth: {all_assets}" -def agent_portrayal(agent): +def agent_portrayal(agent: VoteAgent): # Construct and return the portrayal dictionary portrayal = { - "size": agent.wealth, + "size": agent.assets, "color": "tab:orange", } return portrayal @@ -52,7 +53,7 @@ def space_drawer(model, agent_portrayal): page = JupyterViz( ParticipationModel, model_params, - measures=["wealth", make_text(get_agents_wealth),], + measures=["wealth", make_text(get_agents_assets),], # agent_portrayal=agent_portrayal, agent_portrayal=participation_draw, space_drawer=space_drawer, diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 56ee34b..cc231be 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -7,6 +7,22 @@ from mesa.visualization.modules import ChartModule from participation_model import ParticipationModel from participation_agent import ColorCell, VoteAgent +from math import sqrt + +# Model grid parameters +grid_rows = 100 # height +grid_cols = 80 # width +cell_size = 10 +canvas_height = grid_rows * cell_size +canvas_width = grid_cols * cell_size +# Colors and agents +num_colors = 4 +num_agents = 800 +# Voting area parameters +num_areas = 4 +area_height = grid_rows // int(sqrt(num_areas)) +area_width = grid_cols // int(sqrt(num_areas)) +area_var = 0.0 _COLORS = [ "White", @@ -16,7 +32,7 @@ "Yellow", "Aqua", "Fuchsia", - "Gray", + "Lavender", "Lime", "Maroon", "Navy", @@ -56,59 +72,71 @@ def participation_draw(cell: ColorCell): raise AssertionError if isinstance(cell, VoteAgent): return None - # # Retrieve the agents of the cell - # agents = cell.model.grid.get_cell_list_contents([cell.pos]) - # # Count the number of ParticipationAgents (subtracting the color cell) - # nr_agents = len(agents) + color = _COLORS[cell.color] portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, "x": cell.row, "y": cell.col, - "Color": _COLORS[cell.color]} + "Color": color} + # If the cell is a border cell, change its appearance + if cell.is_border_cell and cell.model.draw_borders: + portrayal["Shape"] = "circle" + portrayal["r"] = 0.9 # Adjust the radius to fit within the cell + if color == "White": + portrayal["Color"] = "Grey" if cell.num_agents_in_cell > 0: portrayal["text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" return portrayal -grid_rows = 80 -grid_cols = 100 -cell_size = 10 -canvas_width = grid_rows * cell_size -canvas_height = grid_cols * cell_size - canvas_element = mesa.visualization.CanvasGrid( - participation_draw, grid_rows, grid_cols, canvas_width, canvas_height + participation_draw, grid_cols, grid_rows, canvas_width, canvas_height ) -happy_chart = mesa.visualization.ChartModule([{"Label": "happy", - "Color": "Black"}]) - - -def compute_wealth(model: ParticipationModel): - agents_wealth = [agent.wealth for agent in model.agent_scheduler.agents] - return {"wealth": agents_wealth} +a_chart = mesa.visualization.ChartModule([{"Label": "Number of agents", + "Color": "Black"}], + data_collector_name='datacollector') wealth_chart = mesa.visualization.modules.ChartModule( - [{"Label": "wealth", "Color": "Black"}], + [{"Label": "Collective assets", "Color": "Black"}], data_collector_name='datacollector' ) model_params = { - "height": mesa.visualization.Slider( - name="World Height", value=grid_cols, min_value=10, max_value=1000, - step=10, description="Select the height of the world" - ), - "width": mesa.visualization.Slider( - name="World Width", value=grid_rows, min_value=10, max_value=1000, - step=10, description="Select the width of the world" - ), + "height": grid_rows, + "width": grid_cols, "num_agents": mesa.visualization.Slider( - name="# Agents", value=800, min_value=10, max_value=99999, step=10 + name="# Agents", value=num_agents, min_value=10, max_value=99999, + step=10 ), "num_colors": mesa.visualization.Slider( - name="# Colors", value=4, min_value=2, max_value=len(_COLORS), step=1 + name="# Colors", value=num_colors, min_value=2, max_value=len(_COLORS), + step=1 + ), + "num_areas": mesa.visualization.Slider( + name=f"# Areas within the {grid_rows}x{grid_cols} world", step=1, + value=num_areas, min_value=4, max_value=min(grid_cols, grid_rows)//2 + ), + "av_area_height": mesa.visualization.Slider( + name="Av. area height", value=area_height, + min_value=2, max_value=grid_rows//2, + step=10, description="Select the average height of an area" ), - # "num_regions": mesa.visualization.Slider( - # name="# Regions", value=4, min_value=4, max_value=500, step=1 + "av_area_width": mesa.visualization.Slider( + name="Av. area width", value=area_width, + min_value=2, max_value=grid_cols//2, + step=10, description="Select the average width of an area" + ), + "area_size_variance": mesa.visualization.Slider( + name="Area size variance", value=area_var, min_value=0.0, max_value=1.0, + step=0.1, description="Select the variance of the area sizes" + ), + # "area_overlap": mesa.visualization.Slider( + # name="Area overlap", value=area_overlap, min_value=0, + # max_value=max(area_width, area_height) // 2, + # step=1, description="Select the overlap of the areas" # ), + "draw_borders": mesa.visualization.Checkbox( + name="Draw border cells", value=False + ), } diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index fbb3a23..f0a4f2f 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -7,15 +7,15 @@ class VoteAgent(Agent): - """An agent with fixed initial wealth.""" + """An agent that has limited knowledge and resources and + can decide to use them to participate in elections.""" - def __init__(self, unique_id, pos, model: mesa.Model): + def __init__(self, unique_id, pos, model: mesa.Model, assets=1): # Pass the parameters to the parent class. super().__init__(unique_id, model) self._row = pos[0] self._col = pos[1] - # Create the agent's variable and set the initial values. - self.wealth = 1 + self._assets = assets @property def col(self): @@ -27,23 +27,36 @@ def row(self): """Return the row location of this cell.""" return self._row - def step(self): - # Verify agent has some wealth - if self.wealth > 0: - other_agent = self.random.choice(self.model.agent_scheduler.agents) - if other_agent is not None: - other_agent.wealth += 1 - self.wealth -= 1 - - def move(self): - if TYPE_CHECKING: # Type hint for IDEs - self.model = cast(ParticipationModel, self.model) - possible_steps = self.model.grid.get_neighborhood( - self.pos, - moore=True, # Moore vs. von neumann - include_center=False) - new_position = self.random.choice(possible_steps) - self.model.grid.move_agent(self, new_position) + @property + def assets(self): + """Return the assets of this agent.""" + return self._assets + + @assets.setter + def assets(self, value): + self._assets = value + + @assets.deleter + def assets(self): + del self._assets + + # def step(self): + # # Verify agent has some wealth + # if self.wealth > 0: + # other_a = self.random.choice(self.model.agent_scheduler.agents) + # if other_agent is not None: + # other_a.wealth += 1 + # self.wealth -= 1 + + # def move(self): + # if TYPE_CHECKING: # Type hint for IDEs + # self.model = cast(ParticipationModel, self.model) + # possible_steps = self.model.grid.get_neighborhood( + # self.pos, + # moore=True, # Moore vs. von neumann + # include_center=False) + # new_position = self.random.choice(possible_steps) + # self.model.grid.move_agent(self, new_position) class ColorCell(mesa.Agent): @@ -61,6 +74,8 @@ def __init__(self, pos, model, initial_color: int): self._color = initial_color self._next_color = None self._num_agents_in_cell = 0 + self.areas = [] + self.is_border_cell = False @property def col(self): @@ -90,13 +105,17 @@ def num_agents_in_cell(self, value): def num_agents_in_cell(self): del self._num_agents_in_cell + def add_area(self, area): + self.areas.append(area) + def color_step(self): """ Determines the cells' color for the next step """ - # _neighbor_iter = self.model.grid.iter_neighbors((self._row, self._col), True) + # _neighbor_iter = self.model.grid.iter_neighbors( + # (self._row, self._col), True) # neighbors_opinion = Counter(n.get_state() for n in _neighbor_iter) - # # Following is a a tuple (attribute, occurrences) + # # Following is a tuple (attribute, occurrences) # polled_opinions = neighbors_opinion.most_common() # tied_opinions = [] # for neighbor in polled_opinions: diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 023b4ff..d621a50 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,50 +1,120 @@ import random +from math import sqrt import mesa from mesa.time import StagedActivation from participation_agent import VoteAgent, ColorCell -class Area: - def __init__(self, model, width, height): +class Area: # TODO implement this + def __init__(self, model, height, width, size_variance=0): + if size_variance == 0: + self._width = width + self._height = height + self.width_off, self.height_off = 0, 0 + elif size_variance > 1 or size_variance < 0: + raise ValueError("Size variance must be between 0 and 1") + else: # Apply variance + w_var_factor = random.uniform(1 - size_variance, 1 + size_variance) + h_var_factor = random.uniform(1 - size_variance, 1 + size_variance) + self._width = int(width * w_var_factor) + self.width_off = abs(width - self._width) + self._height = int(height * h_var_factor) + self.height_off = abs(height - self._height) self.model = model - self.width = width - self.height = height - self.schedule = mesa.time.RandomActivation(self.model) self.agents = [] + self.cells = [] + self._idx_field = None + + @property + def idx_field(self): + return self._idx_field + + @idx_field.setter + def idx_field(self, value: tuple): + try: + x_val, y_val = value + except ValueError: + raise ValueError("The idx_field must be a tuple") + if x_val < 0 or x_val >= self.model.width: + raise ValueError(f"The x={x_val} value must be within the grid") + if y_val < 0 or y_val >= self.model.height: + raise ValueError(f"The y={y_val} value must be within the grid") + x_off = self.width_off // 2 + y_off = self.height_off // 2 + # Adjusting indices with offset and ensuring they wrap around the grid + adjusted_x = (x_val + x_off) % self.model.width + adjusted_y = (y_val + y_off) % self.model.height + # Assign the cells to the area + for x_area in range(self._width): + for y_area in range(self._height): + x = (adjusted_x + x_area) % self.model.width + y = (adjusted_y + y_area) % self.model.height + local_agents = self.model.grid.get_cell_list_contents([(x, y)]) + for a in local_agents: + if isinstance(a, VoteAgent): + self.add_agent(a) # Add the agent to the area + elif isinstance(a, ColorCell): + a.add_area(self) # Add the area to the cell + # Mark as a border cell if true + if (x_area == 0 or y_area == 0 + or x_area == self._width - 1 + or y_area == self._height - 1): + a.is_border_cell = True + self.add_cell(a) # Add the cell to the area + + self._idx_field = (adjusted_x, adjusted_y) def add_agent(self, agent): - self.schedule.add(agent) self.agents.append(agent) - def step(self): - self.schedule.step() + def add_cell(self, cell): + self.cells.append(cell) + + def conduct_election(self, rule): + # Placeholder for election logic + pass + + +def compute_collective_assets(model): + sum_assets = sum(agent.assets for agent in model.all_agents) + return sum_assets + + +def get_num_agents(model): + return len(model.all_agents) class ParticipationModel(mesa.Model): """A model with some number of agents.""" - def __init__(self, num_agents, num_colors, width, height): + def __init__(self, height, width, num_agents, num_colors, num_areas, + av_area_height, av_area_width, area_size_variance, + draw_borders=False): super().__init__() - self.num_agents = num_agents - self.num_colors = num_colors self.height = height self.width = width + self.num_agents = num_agents + self.all_agents = [] + self.num_colors = num_colors + # Area variables + self.num_areas = num_areas + self.av_area_height = av_area_height + self.av_area_width = av_area_width + self.area_size_variance = area_size_variance # Create schedulers and assign it to the model self.color_cell_scheduler = mesa.time.RandomActivation(self) - self.agent_scheduler = mesa.time.RandomActivation(self) + self.area_scheduler = mesa.time.RandomActivation(self) + # self.agent_scheduler = mesa.time.RandomActivation(self) # self.schedule = StagedActivation(self, # stage_list=['color_step', 'step']) # The grid # SingleGrid enforces at most one agent per cell; # MultiGrid allows multiple agents to be in the same cell. - self.grid = mesa.space.MultiGrid(width, height, torus=True) - self.datacollector = mesa.DataCollector( - model_reporters={"wealth": "wealth"}, - # Model-level count of agents' wealth - ) + self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) + self.draw_borders = draw_borders # Create color ids for the cells for _, (row, col) in self.grid.coord_iter(): - color = random.choice(range(num_colors)) + color = random.choice(range(num_colors)) # TODO improve this cell = ColorCell((row, col), self, color) self.grid.place_agent(cell, (row, col)) # Add the cell color to the scheduler @@ -55,14 +125,50 @@ def __init__(self, num_agents, num_colors, width, height): x = self.random.randrange(self.width) y = self.random.randrange(self.height) a = VoteAgent(a_id, (x, y), self) - # Add the agent to the scheduler - self.agent_scheduler.add(a) + # Add the agent to the models' agents and scheduler + self.all_agents.append(a) # Place at a random cell self.grid.place_agent(a, (x, y)) # Count the agent at the chosen cell agents = self.grid.get_cell_list_contents([(x, y)]) cell = [a for a in agents if isinstance(a, ColorCell)][0] cell.num_agents_in_cell = cell.num_agents_in_cell + 1 + # Create areas spread approximately evenly across the grid + roo_apx = int(sqrt(self.num_areas)) + area_x_dist = self.grid.width // roo_apx + area_y_dist = self.grid.height // roo_apx + x_coords = range(0, self.grid.width, area_x_dist) + y_coords = range(0, self.grid.height, area_y_dist) + for x_coord in x_coords: + for y_coord in y_coords: + area = Area(self, av_area_height, av_area_width, + area_size_variance) + print(f"Area {id(area)} at {x_coord}, {y_coord}") + area.idx_field = (x_coord, y_coord) + # area_height = self.dist_area_height() + # area_width = self.dist_area_width() + # new_area = Area(self, area_height, area_width) + # # Add agents to the area + # for agent in self.all_agents: + # if len(new_area.agents) < area_height * area_width: + # new_area.add_agent(agent) + # Add the area to the model + self.area_scheduler.add(area) + # Data collector + self.datacollector = mesa.DataCollector( + model_reporters={ + "Collective assets": compute_collective_assets, + "Number of agents": get_num_agents, + # "Middle Class": get_num_mid_agents, + # "Savings": get_total_savings, + # "Wallets": get_total_wallets, + # "Money": get_total_money, + # "Loans": get_total_loans, + }, + agent_reporters={"Wealth": lambda x: getattr(x, "assets", None)}, + ) + # Collect initial data + self.datacollector.collect(self) def step(self): """Advance the model by one step.""" @@ -71,4 +177,20 @@ def step(self): # this will call the step method of each agent # and print the agent's unique_id self.color_cell_scheduler.step() - self.agent_scheduler.step() + self.datacollector.collect(self) # Collect data at each step + + # def dist_area_height(self): + # av_height = self.av_area_height + # variance = self.area_size_variance + # return self.random.randint( + # av_height - variance * av_height, + # av_height + variance * av_height + # ) + # + # def dist_area_width(self): + # av_width = self.av_area_width + # variance = self.area_size_variance + # return self.random.randint( + # av_width - variance * av_width, + # av_width + variance * av_width + # ) \ No newline at end of file diff --git a/democracy_sim/run.py b/democracy_sim/run.py index c25c3c7..0cfa995 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -1,11 +1,11 @@ # import webbrowser import mesa from participation_model import ParticipationModel -from model_setup import model_params, canvas_element, happy_chart, wealth_chart +from model_setup import model_params, canvas_element, a_chart, wealth_chart server = mesa.visualization.ModularServer( model_cls=ParticipationModel, - visualization_elements=[canvas_element, wealth_chart, happy_chart], + visualization_elements=[canvas_element, wealth_chart, a_chart], name="DemocracySim", model_params=model_params, ) From c7cdd93ab0a40545c935bc6a0cfd1e3ffcb9c15e Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 5 Jul 2024 17:18:58 +0200 Subject: [PATCH 05/38] improved the initial color distribution to make it less homogeneous and improved to area distribution --- democracy_sim/app.py | 16 ++-- democracy_sim/model_setup.py | 29 ++++--- democracy_sim/participation_agent.py | 4 + democracy_sim/participation_model.py | 110 +++++++++++++++++++-------- 4 files changed, 110 insertions(+), 49 deletions(-) diff --git a/democracy_sim/app.py b/democracy_sim/app.py index b7363da..ef88be5 100644 --- a/democracy_sim/app.py +++ b/democracy_sim/app.py @@ -1,9 +1,6 @@ from mesa.experimental import JupyterViz, make_text, Slider -from participation_model import ParticipationModel -from participation_agent import VoteAgent -from numpy import arange import solara -from model_setup import participation_draw +from model_setup import * # Data visualization tools. from matplotlib.figure import Figure @@ -44,16 +41,21 @@ def space_drawer(model, agent_portrayal): model_params = { - "height": Slider("World Height", 200, 10, 1000, 10), - "width": Slider("World Width", 160, 10, 1000, 10), + "height": grid_rows, + "width": grid_cols, "num_agents": Slider("# Agents", 200, 10, 9999999, 10), "num_colors": Slider("# Colors", 4, 2, 100, 1), + "num_areas": Slider("# Areas", num_areas, 4, min(grid_cols, grid_rows)//2, 1), + "av_area_height": Slider("Av. Area Height", area_height, 2, grid_rows//2, 1), + "av_area_width": Slider("Av. Area Width", area_width, 2, grid_cols//2, 1), + "area_size_variance": Slider("Area Size Variance", area_var, 0.0, 1.0, 0.1), + "draw_borders": False, } page = JupyterViz( ParticipationModel, model_params, - measures=["wealth", make_text(get_agents_assets),], + #measures=["wealth", make_text(get_agents_assets),], # agent_portrayal=agent_portrayal, agent_portrayal=participation_draw, space_drawer=space_drawer, diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index cc231be..2af4b2a 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -2,6 +2,7 @@ handles the definition of the canvas parameters and the drawing of the model representation on the canvas """ +from typing import TYPE_CHECKING, cast # import webbrowser import mesa from mesa.visualization.modules import ChartModule @@ -15,8 +16,11 @@ cell_size = 10 canvas_height = grid_rows * cell_size canvas_width = grid_cols * cell_size +draw_borders = True # Colors and agents num_colors = 4 +color_adj_steps = 3 +color_heterogeneity = 0.3 num_agents = 800 # Voting area parameters num_areas = 4 @@ -64,9 +68,9 @@ def participation_draw(cell: ColorCell): This function is registered with the visualization server to be called each tick to indicate how to draw the cell in its current color. - :param cell: the cell in the simulation + :param cell: The cell in the simulation - :return: the portrayal dictionary. + :return: The portrayal dictionary. """ if cell is None: raise AssertionError @@ -77,6 +81,8 @@ def participation_draw(cell: ColorCell): "x": cell.row, "y": cell.col, "Color": color} # If the cell is a border cell, change its appearance + if TYPE_CHECKING: # Type hint for IDEs + cell.model = cast(ParticipationModel, cell.model) if cell.is_border_cell and cell.model.draw_borders: portrayal["Shape"] = "circle" portrayal["r"] = 0.9 # Adjust the radius to fit within the cell @@ -113,6 +119,14 @@ def participation_draw(cell: ColorCell): name="# Colors", value=num_colors, min_value=2, max_value=len(_COLORS), step=1 ), + "color_adj_steps": mesa.visualization.Slider( + name="# Color adjustment steps", value=color_adj_steps, + min_value=0, max_value=9, step=1 + ), + "heterogeneity": mesa.visualization.Slider( + name="Color-heterogeneity factor", value=color_heterogeneity, + min_value=0.0, max_value=0.9, step=0.1 + ), "num_areas": mesa.visualization.Slider( name=f"# Areas within the {grid_rows}x{grid_cols} world", step=1, value=num_areas, min_value=4, max_value=min(grid_cols, grid_rows)//2 @@ -120,23 +134,18 @@ def participation_draw(cell: ColorCell): "av_area_height": mesa.visualization.Slider( name="Av. area height", value=area_height, min_value=2, max_value=grid_rows//2, - step=10, description="Select the average height of an area" + step=1, description="Select the average height of an area" ), "av_area_width": mesa.visualization.Slider( name="Av. area width", value=area_width, min_value=2, max_value=grid_cols//2, - step=10, description="Select the average width of an area" + step=1, description="Select the average width of an area" ), "area_size_variance": mesa.visualization.Slider( name="Area size variance", value=area_var, min_value=0.0, max_value=1.0, step=0.1, description="Select the variance of the area sizes" ), - # "area_overlap": mesa.visualization.Slider( - # name="Area overlap", value=area_overlap, min_value=0, - # max_value=max(area_width, area_height) // 2, - # step=1, description="Select the overlap of the areas" - # ), "draw_borders": mesa.visualization.Checkbox( - name="Draw border cells", value=False + name="Draw border cells", value=draw_borders ), } diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index f0a4f2f..a39226c 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -92,6 +92,10 @@ def color(self): """The current color of this cell.""" return self._color + @color.setter + def color(self, value): + self._color = value + @property def num_agents_in_cell(self): """The number of agents in this cell.""" diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index d621a50..ffefedd 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,12 +1,14 @@ +from typing import TYPE_CHECKING, cast import random from math import sqrt import mesa -from mesa.time import StagedActivation +from mesa import Agent from participation_agent import VoteAgent, ColorCell -class Area: # TODO implement this - def __init__(self, model, height, width, size_variance=0): +class Area(Agent): + def __init__(self, unique_id, model, height, width, size_variance=0): + super().__init__(unique_id, model) if size_variance == 0: self._width = width self._height = height @@ -20,7 +22,6 @@ def __init__(self, model, height, width, size_variance=0): self.width_off = abs(width - self._width) self._height = int(height * h_var_factor) self.height_off = abs(height - self._height) - self.model = model self.agents = [] self.cells = [] self._idx_field = None @@ -31,6 +32,8 @@ def idx_field(self): @idx_field.setter def idx_field(self, value: tuple): + if TYPE_CHECKING: # Type hint for IDEs + self.model = cast(ParticipationModel, self.model) try: x_val, y_val = value except ValueError: @@ -89,7 +92,7 @@ class ParticipationModel(mesa.Model): def __init__(self, height, width, num_agents, num_colors, num_areas, av_area_height, av_area_width, area_size_variance, - draw_borders=False): + color_adj_steps, draw_borders, heterogeneity): super().__init__() self.height = height self.width = width @@ -111,6 +114,8 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, # SingleGrid enforces at most one agent per cell; # MultiGrid allows multiple agents to be in the same cell. self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) + self.color_adj_steps = color_adj_steps + self.heterogeneity = heterogeneity self.draw_borders = draw_borders # Create color ids for the cells for _, (row, col) in self.grid.coord_iter(): @@ -134,26 +139,46 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, cell = [a for a in agents if isinstance(a, ColorCell)][0] cell.num_agents_in_cell = cell.num_agents_in_cell + 1 # Create areas spread approximately evenly across the grid - roo_apx = int(sqrt(self.num_areas)) + roo_apx = round(sqrt(self.num_areas)) + nr_areas_x = self.grid.width // av_area_width + nr_areas_y = self.grid.width // av_area_height area_x_dist = self.grid.width // roo_apx area_y_dist = self.grid.height // roo_apx + print(f"roo_apx: {roo_apx}, nr_areas_x: {nr_areas_x}, " + f"nr_areas_y: {nr_areas_y}, area_x_dist: {area_x_dist}, " + f"area_y_dist: {area_y_dist}") + # if (abs(nr_areas_x * nr_areas_y - self.num_areas) < + # abs(roo_apx**2 - self.num_areas)): + # area_x_dist = self.grid.width // nr_areas_x + # area_y_dist = self.grid.height // nr_areas_y + # print(f"## {nr_areas_x * nr_areas_y} vs {roo_apx**2}") + # x_coords = [(0 + i * area_x_dist) % width for i in range(nr_areas_x)] + # y_coords = [(0 + i * area_y_dist) % height for i in range(nr_areas_y)] x_coords = range(0, self.grid.width, area_x_dist) y_coords = range(0, self.grid.height, area_y_dist) + # Add additional areas if necessary (num_areas not a square number) + additional_x, additional_y = [], [] + missing = self.num_areas - len(x_coords) * len(y_coords) + for _ in range(missing): + additional_x.append(self.random.randrange(self.grid.width)) + additional_y.append(self.random.randrange(self.grid.height)) + a_ids = iter(range(1, self.num_areas + 1)) for x_coord in x_coords: for y_coord in y_coords: - area = Area(self, av_area_height, av_area_width, + a_id = next(a_ids, 0) + if a_id == 0: + break + area = Area(a_id, self, av_area_height, av_area_width, area_size_variance) - print(f"Area {id(area)} at {x_coord}, {y_coord}") + print(f"Area {area.unique_id} at {x_coord}, {y_coord}") area.idx_field = (x_coord, y_coord) - # area_height = self.dist_area_height() - # area_width = self.dist_area_width() - # new_area = Area(self, area_height, area_width) - # # Add agents to the area - # for agent in self.all_agents: - # if len(new_area.agents) < area_height * area_width: - # new_area.add_agent(agent) - # Add the area to the model self.area_scheduler.add(area) + for x_coord, y_coord in zip(additional_x, additional_y): + area = Area(next(a_ids), self, av_area_height, av_area_width, + area_size_variance) + print(f"++ Area {area.unique_id} at {x_coord}, {y_coord}") + area.idx_field = (x_coord, y_coord) + self.area_scheduler.add(area) # Data collector self.datacollector = mesa.DataCollector( model_reporters={ @@ -165,8 +190,19 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, # "Money": get_total_money, # "Loans": get_total_loans, }, - agent_reporters={"Wealth": lambda x: getattr(x, "assets", None)}, + agent_reporters={"Wealth": lambda ag: getattr(ag, "assets", None)}, ) + # Adjust the color pattern to make it less random (see color patches) + for _ in range(color_adj_steps): + print(f"Color adjustment step {_}") + for cell in self.grid.coord_iter(): + agents = cell[0] + if TYPE_CHECKING: + agents = cast(list, agents) + c = [cell for cell in agents if isinstance(cell, ColorCell)][0] + if isinstance(c, ColorCell): + most_common_color = self.mix_colors(c, self.heterogeneity) + c.color = most_common_color # Collect initial data self.datacollector.collect(self) @@ -179,18 +215,28 @@ def step(self): self.color_cell_scheduler.step() self.datacollector.collect(self) # Collect data at each step - # def dist_area_height(self): - # av_height = self.av_area_height - # variance = self.area_size_variance - # return self.random.randint( - # av_height - variance * av_height, - # av_height + variance * av_height - # ) - # - # def dist_area_width(self): - # av_width = self.av_area_width - # variance = self.area_size_variance - # return self.random.randint( - # av_width - variance * av_width, - # av_width + variance * av_width - # ) \ No newline at end of file + def mix_colors(self, cell, heterogeneity): + """ + This method is used to create a less random initial color distribution + """ + neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), + moore=True, + include_center=False) + color_counts = {} + for neighbor in neighbor_cells: + if random.random() < heterogeneity: # Create heterogeneity + # Introduce a bias based on coordinates + p = (cell.row * cell.col) / (self.grid.width * self.grid.height) + if random.random() < p: + return random.choice(range(self.num_colors)) + if isinstance(neighbor, ColorCell): + color = neighbor.color + color_counts[color] = color_counts.get(color, 0) + 1 + if color_counts: + max_count = max(color_counts.values()) + most_common_colors = [color for color, count in color_counts.items() + if count == max_count] + # if random.random() < randomness: + # return random.choice(range(cell.model.num_colors)) # Add some randomness + return self.random.choice(most_common_colors) + return cell.color # Return the cell's own color no consensus From f12c2040153d7299cd18eae7119ac363a28b541b Mon Sep 17 00:00:00 2001 From: jurikane Date: Sat, 6 Jul 2024 16:27:42 +0200 Subject: [PATCH 06/38] improved setup - color distribution is now not uniform and areas calculate there distribution --- democracy_sim/app.py | 31 ++++++++++-- democracy_sim/model_setup.py | 19 +++++-- democracy_sim/participation_agent.py | 7 ++- democracy_sim/participation_model.py | 75 +++++++++++++++++++++++----- 4 files changed, 109 insertions(+), 23 deletions(-) diff --git a/democracy_sim/app.py b/democracy_sim/app.py index ef88be5..35a850a 100644 --- a/democracy_sim/app.py +++ b/democracy_sim/app.py @@ -43,21 +43,44 @@ def space_drawer(model, agent_portrayal): model_params = { "height": grid_rows, "width": grid_cols, + "draw_borders": False, "num_agents": Slider("# Agents", 200, 10, 9999999, 10), "num_colors": Slider("# Colors", 4, 2, 100, 1), + "color_adj_steps": Slider("# Color adjustment steps", 5, 0, 9, 1), + "heterogeneity": Slider("Color-heterogeneity factor", color_heterogeneity, 0.0, 0.9, 0.1), "num_areas": Slider("# Areas", num_areas, 4, min(grid_cols, grid_rows)//2, 1), "av_area_height": Slider("Av. Area Height", area_height, 2, grid_rows//2, 1), "av_area_width": Slider("Av. Area Width", area_width, 2, grid_cols//2, 1), "area_size_variance": Slider("Area Size Variance", area_var, 0.0, 1.0, 0.1), - "draw_borders": False, } + +def agent_portrayal(agent): + portrayal = participation_draw(agent) + if portrayal is None: + return {} + else: + return portrayal + +def agent_portrayal(agent): + portrayal = { + "Shape": "circle", + "Color": "red", + "Filled": "true", + "Layer": 0, + "r": 0.5, + } + return portrayal + +grid = mesa.visualization.CanvasGrid(agent_portrayal, 10, 10, 500, 500) + + page = JupyterViz( ParticipationModel, model_params, #measures=["wealth", make_text(get_agents_assets),], - # agent_portrayal=agent_portrayal, - agent_portrayal=participation_draw, - space_drawer=space_drawer, + agent_portrayal=agent_portrayal, + #agent_portrayal=participation_draw, + #space_drawer=space_drawer, ) page # noqa diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 2af4b2a..cf06138 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -27,6 +27,10 @@ area_height = grid_rows // int(sqrt(num_areas)) area_width = grid_cols // int(sqrt(num_areas)) area_var = 0.0 +# Voting rules TODO: Implement voting rules +voting_rules = ["Majority Rule", "Approval Voting", "Kemeny"] +rule_idx = 0 + _COLORS = [ "White", @@ -111,6 +115,18 @@ def participation_draw(cell: ColorCell): model_params = { "height": grid_rows, "width": grid_cols, + # "voting_rule": mesa.visualization.modules.DropDown( + # name="Voting Rule", + # options=voting_rules, + # value=voting_rules[0], # Default value + # ), + "voting_rule": mesa.visualization.Slider( + name=f"Rule index {voting_rules}", value=rule_idx, + min_value=0, max_value=2, + ), + "draw_borders": mesa.visualization.Checkbox( + name="Draw border cells", value=draw_borders + ), "num_agents": mesa.visualization.Slider( name="# Agents", value=num_agents, min_value=10, max_value=99999, step=10 @@ -145,7 +161,4 @@ def participation_draw(cell: ColorCell): name="Area size variance", value=area_var, min_value=0.0, max_value=1.0, step=0.1, description="Select the variance of the area sizes" ), - "draw_borders": mesa.visualization.Checkbox( - name="Draw border cells", value=draw_borders - ), } diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index a39226c..8ca7050 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -1,9 +1,8 @@ -from typing import TYPE_CHECKING, cast import mesa from mesa import Agent -if TYPE_CHECKING: - from participation_model import ParticipationModel +# if TYPE_CHECKING: +# from participation_model import ParticipationModel class VoteAgent(Agent): @@ -71,7 +70,7 @@ def __init__(self, pos, model, initial_color: int): super().__init__(pos, model) self._row = pos[0] self._col = pos[1] - self._color = initial_color + self._color = initial_color # The cell's current color (int) self._next_color = None self._num_agents_in_cell = 0 self.areas = [] diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index ffefedd..d90d5e7 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -7,7 +7,9 @@ class Area(Agent): - def __init__(self, unique_id, model, height, width, size_variance=0): + def __init__(self, unique_id, model, height, width, size_variance): + if TYPE_CHECKING: # Type hint for IDEs + model = cast(ParticipationModel, model) super().__init__(unique_id, model) if size_variance == 0: self._width = width @@ -24,7 +26,8 @@ def __init__(self, unique_id, model, height, width, size_variance=0): self.height_off = abs(height - self._height) self.agents = [] self.cells = [] - self._idx_field = None + self._idx_field = None # An indexing position of the area in the grid + self.color_distribution = None @property def idx_field(self): @@ -64,7 +67,7 @@ def idx_field(self, value: tuple): or y_area == self._height - 1): a.is_border_cell = True self.add_cell(a) # Add the cell to the area - + self.update_color_distribution() self._idx_field = (adjusted_x, adjusted_y) def add_agent(self, agent): @@ -77,6 +80,24 @@ def conduct_election(self, rule): # Placeholder for election logic pass + def update_color_distribution(self): + if self.color_distribution is None: + return None + color_count = {} + num_cells = len(self.cells) + for cell in self.cells: + color = cell.color + color_count[color] = color_count.get(color, 0) + 1 + for color in range(self.model.num_colors): + dist_val = color_count.get(color, 0) / num_cells # Float division + self.color_distribution[color] = dist_val + print(f"Area {self.unique_id} color " + f"distribution: {self.color_distribution}") + + def step(self) -> None: + self.update_color_distribution() + self.conduct_election(self.model.voting_rule) + def compute_collective_assets(model): sum_assets = sum(agent.assets for agent in model.all_agents) @@ -87,12 +108,27 @@ def get_num_agents(model): return len(model.all_agents) +def color_by_dst(color_distribution) -> int: + """ + This method selects a color (int) of range(len(color_distribution)) + such that, each color is selected with a probability according to the + given color_distribution array. + Example: color_distribution = [0.2, 0.3, 0.5] + Color 1 is selected with a probability of 0.3 + """ + r = random.random() + for color_idx, prob in enumerate(color_distribution): + if r < prob: + return color_idx + r -= prob + + class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_areas, av_area_height, av_area_width, area_size_variance, - color_adj_steps, draw_borders, heterogeneity): + color_adj_steps, draw_borders, heterogeneity, voting_rule): super().__init__() self.height = height self.width = width @@ -117,9 +153,13 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, self.color_adj_steps = color_adj_steps self.heterogeneity = heterogeneity self.draw_borders = draw_borders + # Color distribution + self.color_dst = self.create_color_distribution(heterogeneity) + # Elections + self.voting_rule = voting_rule # Create color ids for the cells for _, (row, col) in self.grid.coord_iter(): - color = random.choice(range(num_colors)) # TODO improve this + color = color_by_dst(self.color_dst) cell = ColorCell((row, col), self, color) self.grid.place_agent(cell, (row, col)) # Add the cell color to the scheduler @@ -209,11 +249,24 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, def step(self): """Advance the model by one step.""" - # The model's step will go here for now - # this will call the step method of each agent - # and print the agent's unique_id + # Conduct elections in the areas + self.area_scheduler.step() + # Mutate the color cells according to election outcomes self.color_cell_scheduler.step() - self.datacollector.collect(self) # Collect data at each step + # Collect data for monitoring and data analysis + self.datacollector.collect(self) + + def create_color_distribution(self, heterogeneity): + """ + This method is used create a color distribution that has a bias + according to the given heterogeneity factor + """ + colors = range(self.num_colors) + values = [abs(random.gauss(1, heterogeneity)) for _ in colors] + # Normalize (with float division) + total = sum(values) + dst_array = [value / total for value in values] + return dst_array def mix_colors(self, cell, heterogeneity): """ @@ -228,7 +281,7 @@ def mix_colors(self, cell, heterogeneity): # Introduce a bias based on coordinates p = (cell.row * cell.col) / (self.grid.width * self.grid.height) if random.random() < p: - return random.choice(range(self.num_colors)) + return color_by_dst(self.color_dst) if isinstance(neighbor, ColorCell): color = neighbor.color color_counts[color] = color_counts.get(color, 0) + 1 @@ -236,7 +289,5 @@ def mix_colors(self, cell, heterogeneity): max_count = max(color_counts.values()) most_common_colors = [color for color, count in color_counts.items() if count == max_count] - # if random.random() < randomness: - # return random.choice(range(cell.model.num_colors)) # Add some randomness return self.random.choice(most_common_colors) return cell.color # Return the cell's own color no consensus From 9b20b0a8a659f820d1603b5174d107736d477728 Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 8 Jul 2024 18:42:12 +0200 Subject: [PATCH 07/38] adapted the color distribution again, such that the randomness apears not always toward the top right corner but randomly around a bias coordinate. And introduced a Patch-Power variable that determines how strong the color-patches effect radiates around the bias-coordinate. --- democracy_sim/model_setup.py | 44 ++++++++++++---- democracy_sim/participation_model.py | 75 ++++++++++++++++------------ 2 files changed, 77 insertions(+), 42 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index cf06138..461751c 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -8,7 +8,8 @@ from mesa.visualization.modules import ChartModule from participation_model import ParticipationModel from participation_agent import ColorCell, VoteAgent -from math import sqrt +import matplotlib.pyplot as plt + # Model grid parameters grid_rows = 100 # height @@ -23,9 +24,12 @@ color_heterogeneity = 0.3 num_agents = 800 # Voting area parameters -num_areas = 4 -area_height = grid_rows // int(sqrt(num_areas)) -area_width = grid_cols // int(sqrt(num_areas)) +# num_areas = 25 +# area_height = 20 # grid_rows // int(sqrt(num_areas)) +# area_width = 16 # grid_cols // int(sqrt(num_areas)) +num_areas = 16 +area_height = 25 +area_width = 20 area_var = 0.0 # Voting rules TODO: Implement voting rules voting_rules = ["Majority Rule", "Approval Voting", "Kemeny"] @@ -102,6 +106,21 @@ def participation_draw(cell: ColorCell): participation_draw, grid_cols, grid_rows, canvas_width, canvas_height ) + +# Draw bars TODO: Implement to use within the mesa framework.. +def draw_color_dist_bars(color_distributions): + # Setup plot + fig, ax = plt.subplots() + for i, dist in enumerate(color_distributions): + bottom = 0 + for j, part in enumerate(color_distributions): + ax.bar(i, part, bottom=bottom, color=_COLORS[j % len(_COLORS)]) + bottom += part + # Set x-ticks to be distribution indices + plt.xticks(range(len(color_distributions))) + plt.show() + + a_chart = mesa.visualization.ChartModule([{"Label": "Number of agents", "Color": "Black"}], data_collector_name='datacollector') @@ -135,13 +154,20 @@ def participation_draw(cell: ColorCell): name="# Colors", value=num_colors, min_value=2, max_value=len(_COLORS), step=1 ), - "color_adj_steps": mesa.visualization.Slider( - name="# Color adjustment steps", value=color_adj_steps, - min_value=0, max_value=9, step=1 + "color_patches_steps": mesa.visualization.Slider( + name="Patches size (# steps)", value=color_adj_steps, + min_value=0, max_value=9, step=1, + description="More steps lead to bigger color patches" + ), + "patch_power": mesa.visualization.Slider( + name="Patches power", value=1.0, min_value=0.0, max_value=3.0, + step=0.2, description="Increases the power/radius of the color patches" ), "heterogeneity": mesa.visualization.Slider( - name="Color-heterogeneity factor", value=color_heterogeneity, - min_value=0.0, max_value=0.9, step=0.1 + name="Global color distribution heterogeneity", + value=color_heterogeneity, min_value=0.0, max_value=0.9, step=0.1, + description="The higher the heterogeneity factor the greater the" + + "difference in how often some colors appear overall" ), "num_areas": mesa.visualization.Slider( name=f"# Areas within the {grid_rows}x{grid_cols} world", step=1, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index d90d5e7..e0ef18e 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -27,7 +27,7 @@ def __init__(self, unique_id, model, height, width, size_variance): self.agents = [] self.cells = [] self._idx_field = None # An indexing position of the area in the grid - self.color_distribution = None + self.color_distribution = [0] * model.num_colors # Initialize to 0 @property def idx_field(self): @@ -76,13 +76,15 @@ def add_agent(self, agent): def add_cell(self, cell): self.cells.append(cell) - def conduct_election(self, rule): + def conduct_election(self, voting_rule): # Placeholder for election logic pass def update_color_distribution(self): - if self.color_distribution is None: - return None + """ + This method calculates the current color distribution of the area + and saves it in the color_distribution attribute. + """ color_count = {} num_cells = len(self.cells) for cell in self.cells: @@ -108,6 +110,11 @@ def get_num_agents(model): return len(model.all_agents) +def get_area_color_distributions(model): + return {area.unique_id: area.color_distribution + for area in model.area_scheduler.agents} + + def color_by_dst(color_distribution) -> int: """ This method selects a color (int) of range(len(color_distribution)) @@ -127,8 +134,8 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_areas, - av_area_height, av_area_width, area_size_variance, - color_adj_steps, draw_borders, heterogeneity, voting_rule): + av_area_height, av_area_width, area_size_variance, patch_power, + color_patches_steps, draw_borders, heterogeneity, voting_rule): super().__init__() self.height = height self.width = width @@ -150,8 +157,10 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, # SingleGrid enforces at most one agent per cell; # MultiGrid allows multiple agents to be in the same cell. self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) - self.color_adj_steps = color_adj_steps self.heterogeneity = heterogeneity + # Random bias factors that affect the initial color distribution + self.vertical_bias = random.uniform(0, 1) + self.horizontal_bias = random.uniform(0, 1) self.draw_borders = draw_borders # Color distribution self.color_dst = self.create_color_distribution(heterogeneity) @@ -187,13 +196,6 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, print(f"roo_apx: {roo_apx}, nr_areas_x: {nr_areas_x}, " f"nr_areas_y: {nr_areas_y}, area_x_dist: {area_x_dist}, " f"area_y_dist: {area_y_dist}") - # if (abs(nr_areas_x * nr_areas_y - self.num_areas) < - # abs(roo_apx**2 - self.num_areas)): - # area_x_dist = self.grid.width // nr_areas_x - # area_y_dist = self.grid.height // nr_areas_y - # print(f"## {nr_areas_x * nr_areas_y} vs {roo_apx**2}") - # x_coords = [(0 + i * area_x_dist) % width for i in range(nr_areas_x)] - # y_coords = [(0 + i * area_y_dist) % height for i in range(nr_areas_y)] x_coords = range(0, self.grid.width, area_x_dist) y_coords = range(0, self.grid.height, area_y_dist) # Add additional areas if necessary (num_areas not a square number) @@ -224,25 +226,20 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, model_reporters={ "Collective assets": compute_collective_assets, "Number of agents": get_num_agents, - # "Middle Class": get_num_mid_agents, - # "Savings": get_total_savings, - # "Wallets": get_total_wallets, - # "Money": get_total_money, - # "Loans": get_total_loans, + "Area Color Distributions": get_area_color_distributions, }, agent_reporters={"Wealth": lambda ag: getattr(ag, "assets", None)}, ) # Adjust the color pattern to make it less random (see color patches) - for _ in range(color_adj_steps): + for _ in range(color_patches_steps): print(f"Color adjustment step {_}") for cell in self.grid.coord_iter(): agents = cell[0] if TYPE_CHECKING: agents = cast(list, agents) c = [cell for cell in agents if isinstance(cell, ColorCell)][0] - if isinstance(c, ColorCell): - most_common_color = self.mix_colors(c, self.heterogeneity) - c.color = most_common_color + most_common_color = self.color_patches(c, patch_power) + c.color = most_common_color # Collect initial data self.datacollector.collect(self) @@ -258,30 +255,42 @@ def step(self): def create_color_distribution(self, heterogeneity): """ - This method is used create a color distribution that has a bias - according to the given heterogeneity factor + This method is used to create a color distribution that has a bias + according to the given heterogeneity factor. """ colors = range(self.num_colors) values = [abs(random.gauss(1, heterogeneity)) for _ in colors] # Normalize (with float division) total = sum(values) dst_array = [value / total for value in values] + print(f"Color distribution: {dst_array}") return dst_array - def mix_colors(self, cell, heterogeneity): + def color_patches(self, cell, patch_power): """ This method is used to create a less random initial color distribution + using a similar logic to the color patches model. """ + # Introduce a bias based on coordinates + # if random.random() < heterogeneity: # TODO: remove this dep.? + # Calculate the normalized position of the cell + normalized_x = cell.row / self.height + normalized_y = cell.col / self.width + # Calculate bias based on coordinates and models bias directions + bias_factor = (abs(normalized_x - self.horizontal_bias) + + abs(normalized_y - self.vertical_bias)) + #p = (cell.row * cell.col) / (self.grid.width * self.grid.height) + # The closer the cell to the bias-point, the less often it is + # to be replaced by a color chosen from the initial distribution: + if abs(random.gauss(0, patch_power)) < bias_factor: + return color_by_dst(self.color_dst) + + # Otherwise, apply the color patches logic neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), moore=True, include_center=False) - color_counts = {} + color_counts = {} # Count neighbors' colors for neighbor in neighbor_cells: - if random.random() < heterogeneity: # Create heterogeneity - # Introduce a bias based on coordinates - p = (cell.row * cell.col) / (self.grid.width * self.grid.height) - if random.random() < p: - return color_by_dst(self.color_dst) if isinstance(neighbor, ColorCell): color = neighbor.color color_counts[color] = color_counts.get(color, 0) + 1 @@ -290,4 +299,4 @@ def mix_colors(self, cell, heterogeneity): most_common_colors = [color for color, count in color_counts.items() if count == max_count] return self.random.choice(most_common_colors) - return cell.color # Return the cell's own color no consensus + return cell.color # Return the cell's own color if no consensus From fdea74b1b90b49a6873c051cc683ecc0efdf523a Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 19 Jul 2024 17:36:49 +0200 Subject: [PATCH 08/38] implemented distance functions (kendall tau & spearman) with unit testing + started social_welfare_function implementation --- democracy_sim/distance_functions.py | 80 ++++++++++++ democracy_sim/model_setup.py | 2 + democracy_sim/participation_model.py | 69 ++++++++-- democracy_sim/social_welfare_functions.py | 43 ++++++ tests/test_distance_functions.py | 151 ++++++++++++++++++++++ 5 files changed, 337 insertions(+), 8 deletions(-) create mode 100644 democracy_sim/distance_functions.py create mode 100644 democracy_sim/social_welfare_functions.py create mode 100644 tests/test_distance_functions.py diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py new file mode 100644 index 0000000..333a92c --- /dev/null +++ b/democracy_sim/distance_functions.py @@ -0,0 +1,80 @@ +import numpy as np + + +def kendall_tau(rank_arr_1, rank_arr_2, search_pairs, color_vec): + """ + This function calculates the kendal tau distance between two rank vektors. + (The Kendall tau rank distance is a metric that counts the number + of pairwise disagreements between two ranking lists. + The larger the distance, the more dissimilar the two lists are. + Kendall tau distance is also called bubble-sort distance). + Rank vectors hold the rank of each option (option = index). + Not to be confused with an ordering (or sequence) where the vector + holds options and the index is the rank. + :param rank_arr_1: First (NumPy) array containing the ranks of each option + :param rank_arr_2: The second rank array + :param search_pairs: The pairs of indices (for efficiency) + :param color_vec: The vector of colors (for efficiency) + :return: The kendall tau distance + """ + # Get the ordering (option names being 0 to length) + ordering_1 = np.argsort(rank_arr_1) + ordering_2 = np.argsort(rank_arr_2) + # print("Ord1:", list(ordering_1), " Ord2:", list(ordering_2)) + # Create the mapping array + mapping_array = np.empty_like(ordering_1) # Empty array with same shape + mapping_array[ordering_1] = color_vec # Fill the mapping + # Use the mapping array to rename elements in ordering_2 + renamed_arr_2 = mapping_array[ordering_2] # Uses NumPys advanced indexing + # print("Ren1:",list(range(len(color_vec))), " Ren2:", list(renamed_arr_2)) + # Count inversions using precomputed pairs + kendall_distance = 0 + # inversions = [] + for i, j in search_pairs: + if renamed_arr_2[i] > renamed_arr_2[j]: + # inversions.append((renamed_arr_2[i], renamed_arr_2[j])) + kendall_distance += 1 + # print("Inversions:\n", inversions) + return kendall_distance + + +def kendall_tau_on_orderings(ordering_1, ordering_2, search_pais): + """ + This function calculates the kendal tau distance on two orderings. + An ordering holds the option names in the order of their rank (rank=index). + :param ordering_1: First (NumPy) array containing ranked options + :param ordering_2: The second ordering array + :param search_pais: The pairs of indices (for efficiency) + :return: The kendall tau distance + """ + # Rename the elements to reduce the problem to counting inversions + mapping = {option: idx for idx, option in enumerate(ordering_1)} + renamed_arr_2 = np.array([mapping[option] for option in ordering_2]) + # Count inversions using precomputed pairs + kendall_distance = 0 + for i, j in search_pais: + if renamed_arr_2[i] > renamed_arr_2[j]: + kendall_distance += 1 + return kendall_distance + + +def spearman_distance(rank_arr_1, rank_arr_2): + """ + This function calculates the Spearman distance between two rank vektors. + Spearman's foot rule is a measure of the distance between ranked lists. + It is given as the sum of the absolute differences between the ranks + of the two lists. + This function is meant to work with numeric values as well. + Hence, we only assume the rank values to be comparable (e.q. normalized). + :param rank_arr_1: First (NumPy) array containing the ranks of each option + :param rank_arr_2: The second rank array + :return: The Spearman distance + """ + # TODO: remove these tests (comment out) on actual simulations + assert rank_arr_1.size == rank_arr_2.size, \ + "Rank arrays must have the same length" + if rank_arr_1.size > 0: + assert (rank_arr_1.min() == rank_arr_2.min() + and rank_arr_1.max() == rank_arr_2.max()), \ + f"Error: Sequences {rank_arr_1}, {rank_arr_2} aren't comparable." + return np.sum(np.abs(rank_arr_1 - rank_arr_2)) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 461751c..39c655d 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -88,6 +88,8 @@ def participation_draw(cell: ColorCell): portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, "x": cell.row, "y": cell.col, "Color": color} + # TODO: add the areas the cell belongs to to the hover-text (the text that is shown when one hovers over the cell in the grid) + # + maybe: draw the agent number in the opposing color, + maybe draw borders nicer # If the cell is a border cell, change its appearance if TYPE_CHECKING: # Type hint for IDEs cell.model = cast(ParticipationModel, cell.model) diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index e0ef18e..ecf8b1e 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -4,6 +4,10 @@ import mesa from mesa import Agent from participation_agent import VoteAgent, ColorCell +import numpy as np +from itertools import permutations, product, combinations + +election_cost = 5 # TODO: integrate properly class Area(Agent): @@ -28,6 +32,7 @@ def __init__(self, unique_id, model, height, width, size_variance): self.cells = [] self._idx_field = None # An indexing position of the area in the grid self.color_distribution = [0] * model.num_colors # Initialize to 0 + self.voted_distribution = [0] * model.num_colors @property def idx_field(self): @@ -76,9 +81,34 @@ def add_agent(self, agent): def add_cell(self, cell): self.cells.append(cell) - def conduct_election(self, voting_rule): - # Placeholder for election logic - pass + def conduct_election(self, voting_rule, distance_func): + """ + This method holds the primary logic of the simulation by simulating + the election in the area as well as handling the payments and rewards. + """ + # Ask agents to participate + participating_agents = [] + preference_profile = [] + for agent in self.agents: + if agent.ask_for_participation(area=self): + participating_agents.append(agent) + # collect the participation fee from the agents + agent.assets = agent.assets - election_cost + # Ask participating agents for their prefs + preference_profile.append(agent.vote(area=self)) # TODO use np! + # TODO: WHERE to discretize if needed? + # accumulate the prefs using the voting rule + aggreg_prefs = voting_rule(preference_profile) + # save the "elected" distribution in self.voted_distribution + winning_option = aggreg_prefs[0] + self.voted_distribution = self.model.options[winning_option] + # calculate the distance to the real distribution using distance_func + distance_factor = distance_func(self.voted_distribution, + self.color_distribution) + # calculate the rewards for the agents + # TODO + # distribute the rewards + # TODO def update_color_distribution(self): """ @@ -130,6 +160,25 @@ def color_by_dst(color_distribution) -> int: r -= prob +def create_all_options(n, include_ties=False): + """ + Creates and returns the list of all possible ranking vectors, + if specified including ties. + Rank values start from 0. + :param n: The number of items to rank (number of colors in our case) + :param include_ties: If True, rankings include ties. + :return r: A NumPy matrix containing all possible rankings of n items + """ + if include_ties: + # Create all possible combinations and sort out invalid rankings + # i.e. [1, 1, 1] or [1, 2, 2] aren't valid as no option is ranked first. + r = np.array([np.array(comb) for comb in product(range(n), repeat=n) + if set(range(max(comb))).issubset(comb)]) + else: + r = np.array([np.array(p) for p in permutations(range(n))]) + return r + + class ParticipationModel(mesa.Model): """A model with some number of agents.""" @@ -166,6 +215,10 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, self.color_dst = self.create_color_distribution(heterogeneity) # Elections self.voting_rule = voting_rule + self.options = create_all_options(num_colors) + # Create search pairs once for faster iterations when comparing rankings + self.search_pairs = combinations(range(0, num_colors), 2) + self.color_vec = np.arange(num_colors) # Also for faster algorithms # Create color ids for the cells for _, (row, col) in self.grid.coord_iter(): color = color_by_dst(self.color_dst) @@ -270,21 +323,21 @@ def color_patches(self, cell, patch_power): """ This method is used to create a less random initial color distribution using a similar logic to the color patches model. + It uses a (normalized) bias coordinate to center the impact of the + color patches structures impact around. + :param cell: The cell that may change its color accordingly + :param patch_power: Like a radius of impact around the bias point. """ - # Introduce a bias based on coordinates - # if random.random() < heterogeneity: # TODO: remove this dep.? # Calculate the normalized position of the cell normalized_x = cell.row / self.height normalized_y = cell.col / self.width - # Calculate bias based on coordinates and models bias directions + # Calculate the distance of the cell to the bias point bias_factor = (abs(normalized_x - self.horizontal_bias) + abs(normalized_y - self.vertical_bias)) - #p = (cell.row * cell.col) / (self.grid.width * self.grid.height) # The closer the cell to the bias-point, the less often it is # to be replaced by a color chosen from the initial distribution: if abs(random.gauss(0, patch_power)) < bias_factor: return color_by_dst(self.color_dst) - # Otherwise, apply the color patches logic neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), moore=True, diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py new file mode 100644 index 0000000..7b778af --- /dev/null +++ b/democracy_sim/social_welfare_functions.py @@ -0,0 +1,43 @@ +""" +Here we define the social welfare functions that can be used in the simulation. +Beware: +We assume the preference relation in the following (unconventional) way +on purpose. +pref_table: numpy matrix with one row per agent, column number is option number + and the values (each in [0,1]) are normalized ranking values. +The purpose of this is to allow for non-discrete and non-equidistant rankings. +""" +import numpy as np + + +def majority_rule(pref_table): + """ + This function implements the majority rule social welfare function. + :param pref_table: The agents preferences as np matrix + one row one agent, column number is color, + values are the guessed distribution values (not the ranking!) + :return: The resulting preference ranking + """ + # Count how often each color is guessed as + first_choices = np.argmax(pref_table, axis=0) + first_choice_counts = {} + for choice in first_choices: + first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 + option_count_pairs = list(first_choice_counts.items()) + option_count_pairs.sort(key=lambda x: x[1], reverse=True) + return [pair[0] for pair in option_count_pairs] + + +# Helper functions + +def rank_arr_to_ordering(rank_arr): + """ + This function converts a rank array to an ordering array. + Rank vectors hold the rank of each option (option = index). + Ordering (or sequence) vectors hold options (rank = index). + :param rank_arr: Array of numeric values unambiguously determining a ranking + :return: The ordering determined by the rank array (options from 1 to n) + """ + tuples = enumerate(rank_arr, start=1) # (option, rank) + ordering = sorted(tuples, key=lambda x: x[1]) # Sort by rank + return ordering diff --git a/tests/test_distance_functions.py b/tests/test_distance_functions.py new file mode 100644 index 0000000..52ba71f --- /dev/null +++ b/tests/test_distance_functions.py @@ -0,0 +1,151 @@ +import unittest +from democracy_sim.distance_functions import * +import numpy as np +from itertools import combinations + + +class TestKendallTauDistance(unittest.TestCase): + + def test_kendall_tau(self): + + print("TEST kendall_tau function") + + # Test cases kendall tau (rank-vektors) + sequences = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([0, 3, 1, 6, 2, 5, 4], [1, 0, 3, 6, 4, 2, 5], 6), + # Because: + # convert to orderings => + # ['A','C','E','B','G','F','D'], ['B','A','F','C','E','G','D'] + # rename items s.t. first vector is sorted int vector => + # ['0','1','2','3','4','5','6'], ['3','0','5','1','2','4','6'] + # count inversions => + # (3, 0), (3, 1), (3, 2), (5, 1), (5, 2), (5, 4) + # => 6 inversions + # If it were an ordering instead of a rank-vektor it'd be: + # => ['A','D','B','G','C','F','E'], ['B','A','D','G','E','C','F'], + # => ['0','1','2','3','4','5','6'], ['2','0','1','3','6','4','5'] + # => 4 inversions: (2,0), (2,1), (6,4), (6,5) (like on wikipedia) + ([0, 5, 2, 3, 1, 4], [5, 0, 3, 2, 4, 1], 15), + # ordering => ['A','E','C','D','F','B'], ['B','F','D','C','E','A'] + # rename => ['0','1','2','3','4','5'], ['5','4','3','2','1','0'] + # count => (5, 4), (5, 3), (5, 2), (5, 1), (5, 0), (4, 3), (4, 2), + # (4, 1), (4, 0), (3, 2), (3, 1), (3, 0), (2, 1), (2, 0), (1, 0) + # => 15 inversions + # ([1, 2, 3], [4, 5, 6], 0), # No common elements + # Again, if it were an ordering instead of a rank-vektor it'd be: + # => ['A','F','C','D','B','E'], ['F','A','D','C','E','B'], + # => ['0','1','2','3','4','5'], ['1','0','3','2','5','4'] + # => 3 inversions: (1,0), (3,2), (5,4) + ([2, 3, 1], [2, 1, 3], 3), + # C, A, B -- B, A, C + # 3, 1, 2 (ordering but named with ints) + # 0, 1, 2 -- 2, 1, 0 + # => inversions: (2,1), (2,0), (1,0) => 3 inversions + ([3, 1, 2], [2, 1, 3], 1), + # B, C, A -- B, A, C + # 0, 1, 2 -- 0, 2, 1 + # => inversions: (2,1) => 1 inversion + ([0.5, 1.0, 0.0], [0.5, 0.0, 1.0], 3), # Using floats + ([0.5, 1.0, 0.0], [0.2, 0.1, 0.8], 3), # Using floats but not equal + # Ties are problematic as they break the metric property here + # see 10.1137/05063088X + ([1, 2, 2, 3], [2, 1, 3, 2], 2), # Testing orderings with *ties* + # 'A'>'B'='C'>'D' - 'B'>'A'='D'>'C' + # Ord1: [0, 1, 2, 3] Ord2: [1, 0, 3, 2] + # Ren1: [0, 1, 2, 3] Ren2: [1, 0, 3, 2] + # 2 inversions: [(1, 0), (3, 2)] + ([2, 1, 1, 1, 3], [2, 2, 3, 3, 1], 7), # more ties + # 'B'='C'='D'>'A'>'E' - 'E'>'A'='B'>'C'='D' + # Ord1: [1, 2, 3, 0, 4] Ord2: [4, 0, 1, 2, 3] + # Ren1: [0, 1, 2, 3, 4] Ren2: [4, 3, 0, 1, 2] + # 7 Inversions: + # [(4, 3), (4, 0), (4, 1), (4, 2), (3, 0), (3, 1), (3, 2)] + ([3, 1, 1, 2, 2, 1, 3], [2, 2, 1, 3, 1, 1, 1], 10), # more ties + # 'B'='C'='F'>'D'='E'>'A'='G' - 'C'='E'='F'='G'>'A'='B'>'D' + # Ord1: [1, 2, 5, 3, 4, 0, 6] - Ord2: [2, 4, 5, 6, 0, 1, 3] + # Ren1: [0, 1, 2, 3, 4, 5, 6] - Ren2: [1, 4, 2, 6, 5, 0, 3] + ([0.1, 0.2, 0.2, 0.3], [0.2, 0.01, 0.9, 0.2], 2), + # Ties with floats + ] + + for seq1, seq2, expected in sequences: + print(f"# Next #\nSeq1: {seq1}, Seq2: {seq2}") + n = len(seq1) + assert n == len(seq2), \ + "Test failed: sequences must have the same length" + pairs = combinations(range(0, n), 2) + item_vec = np.arange(n) + # assert set(np.unique(seq1)) == set(np.unique(seq2)), \ + # "Test failed: sequences must have the same elements" + d = kendall_tau(np.array(seq1), np.array(seq2), pairs, item_vec) + print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") + assert d == expected, f"Test failed for input {seq1}, {seq2}" + + def test_kendall_tau_on_orderings(self): + + print("\nTEST kendall_tau_on_orderings function\n") + + # Test cases kendall tau (on orderings) + ordering_seqs = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([0, 3, 1, 6, 2, 5, 4], [1, 0, 3, 6, 4, 2, 5], 4), + # Because: + # => ['A','D','B','G','C','F','E'], ['B','A','D','G','E','C','F'], + # => ['0','1','2','3','4','5','6'], ['2','0','1','3','6','4','5'] + # => 4 inversions: (2,0), (2,1), (6,4), (6,5) (like on wikipedia) + ([0, 5, 2, 3, 1, 4], [5, 0, 3, 2, 4, 1], 3), + # => ['A','F','C','D','B','E'], ['F','A','D','C','E','B'], + # => ['0','1','2','3','4','5'], ['1','0','3','2','5','4'] + # => 3 inversions: (1,0), (3,2), (5,4) + ([2, 3, 1], [2, 1, 3], 1), + # B, C, A -- B, A, C + # 0, 1, 2 -- 0, 2, 1 + # => inversions: (2,1) => 1 inversion + ([3, 1, 2], [2, 1, 3], 3), + # C, A, B -- B, A, C + # 0, 1, 2 -- 2, 1, 0 + # => inversions: (2,1), (2,0), (1,0) => 3 inversions + ] + + for seq1, seq2, expected in ordering_seqs: + print(f"# Next #\nSeq1: {seq1}, Seq2: {seq2}") + n = len(seq1) + assert n == len(seq2), \ + "Test failed: sequences must have the same length" + pairs = combinations(range(0, n), 2) + # Test the ordering version + d = kendall_tau_on_orderings(np.array(seq1), np.array(seq2), pairs) + print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") + assert d == expected, f"Test failed for input {seq1}, {seq2}" + + +class TestSpearmanDistance(unittest.TestCase): + + def test_spearman_distance(self): + + print("\nTEST spearman_distance function\n") + + sequences = [ + ([1, 2, 3, 4], [1, 2, 3, 4], 0), # Equal sequences + ([1], [1], 0), # Single-element sequences + ([], [], 0), # Empty sequences + ([1, 2, 3], [3, 2, 1], 4), # Reversed sequences + ([1, 2, 3], [2, 3, 1], 4), # Different ranks + ([1, 1, 1], [1, 1, 1], 0), # Sequences with ties + ([1, 2, 2, 3], [2, 1, 3, 2], 4), # Sequences with ties + ([0.0, 0.2, 1.0], [1.0, 0.2, 0.0], 2), # Reversed (using floats) + ([0.5, 1.0, 0.0], [0.5, 0.0, 1.0], 2.0), # Using floats + ([0.5, 1.0, 0.0], [0.2, 0.0, 1.0], 2.3), # Non-equidistant ranks + # ([0.5, 1.0, 0.0], [0.2, 0.1, 0.8], 2.3), # Non-normalized + # Using floats but not equal + ] + + for seq1, seq2, expected in sequences: + distance = spearman_distance(np.array(seq1), np.array(seq2)) + self.assertEqual(distance, expected, + f"Test failed for input {seq1}, {seq2}") From 0a947ff5274e86685aabad8f56c275a876461521 Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 22 Jul 2024 00:52:06 +0200 Subject: [PATCH 09/38] started to implement the election but many things are needed for it - added (or started to add) many features, some with unit-tests (not all ready) - added many TODOs --- democracy_sim/app.py | 2 +- democracy_sim/model_setup.py | 100 +++++++---- democracy_sim/participation_agent.py | 109 +++++++++++- democracy_sim/participation_model.py | 201 +++++++++++++++------- democracy_sim/run.py | 4 +- democracy_sim/social_welfare_functions.py | 46 +++-- tests/test_participation_agent.py | 49 ++++++ tests/test_participation_model.py | 66 +++++++ 8 files changed, 460 insertions(+), 117 deletions(-) create mode 100644 tests/test_participation_agent.py create mode 100644 tests/test_participation_model.py diff --git a/democracy_sim/app.py b/democracy_sim/app.py index 35a850a..f726047 100644 --- a/democracy_sim/app.py +++ b/democracy_sim/app.py @@ -11,7 +11,7 @@ def get_agents_assets(model: ParticipationModel): """ all_assets = list() # Store the results - for agent in model.all_agents: + for agent in model.voting_agents: all_assets.append(agent.assets) return f"Agents wealth: {all_assets}" diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 39c655d..77f470a 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -3,37 +3,53 @@ the drawing of the model representation on the canvas """ from typing import TYPE_CHECKING, cast -# import webbrowser +from math import comb import mesa from mesa.visualization.modules import ChartModule -from participation_model import ParticipationModel -from participation_agent import ColorCell, VoteAgent +from democracy_sim.participation_model import ParticipationModel +from democracy_sim.participation_agent import ColorCell, VoteAgent import matplotlib.pyplot as plt +from democracy_sim.distance_functions import * +from democracy_sim.social_welfare_functions import * +# Parameters -# Model grid parameters +############# +# Elections # +############# +election_costs = 5 +# Voting rules (see social_welfare_functions.py) +voting_rules = [majority_rule, approval_voting] +rule_idx = 0 +# Distance functions (see distance_functions.py) +distance_functions = [spearman_distance, kendall_tau] +distance_idx = 0 +#################### +# Model parameters # +#################### +num_agents = 800 +# Colors +num_colors = 4 +color_patches_steps = 3 +patch_power = 1.0 +color_heterogeneity = 0.3 +# Voting Agents +num_personality_colors = 2 +num_personalities = comb(num_colors, num_personality_colors) +# Grid grid_rows = 100 # height grid_cols = 80 # width cell_size = 10 canvas_height = grid_rows * cell_size canvas_width = grid_cols * cell_size draw_borders = True -# Colors and agents -num_colors = 4 -color_adj_steps = 3 -color_heterogeneity = 0.3 -num_agents = 800 -# Voting area parameters -# num_areas = 25 -# area_height = 20 # grid_rows // int(sqrt(num_areas)) -# area_width = 16 # grid_cols // int(sqrt(num_areas)) +# Voting Areas num_areas = 16 -area_height = 25 -area_width = 20 -area_var = 0.0 -# Voting rules TODO: Implement voting rules -voting_rules = ["Majority Rule", "Approval Voting", "Kemeny"] -rule_idx = 0 +av_area_height = 25 +# area_height = grid_rows // int(sqrt(num_areas)) +av_area_width = 20 +# area_width = grid_cols // int(sqrt(num_areas)) +area_size_variance = 0.0 _COLORS = [ @@ -97,7 +113,7 @@ def participation_draw(cell: ColorCell): portrayal["Shape"] = "circle" portrayal["r"] = 0.9 # Adjust the radius to fit within the cell if color == "White": - portrayal["Color"] = "Grey" + portrayal["Color"] = "LightGrey" if cell.num_agents_in_cell > 0: portrayal["text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" @@ -136,18 +152,21 @@ def draw_color_dist_bars(color_distributions): model_params = { "height": grid_rows, "width": grid_cols, - # "voting_rule": mesa.visualization.modules.DropDown( - # name="Voting Rule", - # options=voting_rules, - # value=voting_rules[0], # Default value - # ), - "voting_rule": mesa.visualization.Slider( - name=f"Rule index {voting_rules}", value=rule_idx, - min_value=0, max_value=2, - ), "draw_borders": mesa.visualization.Checkbox( name="Draw border cells", value=draw_borders ), + "voting_rule": mesa.visualization.Slider( + name=f"Rule index {[r.__name__ for r in voting_rules]}", value=rule_idx, + min_value=0, max_value=len(voting_rules)-1, + ), + "distance_func": mesa.visualization.Slider( + name=f"Rule index {[f.__name__ for f in distance_functions]}", + value=distance_idx, min_value=0, max_value=len(distance_functions)-1, + ), + "election_costs": mesa.visualization.Slider( + name="Election costs", value=election_costs, min_value=0, max_value=100, + step=1, description="The costs for participating in an election" + ), "num_agents": mesa.visualization.Slider( name="# Agents", value=num_agents, min_value=10, max_value=99999, step=10 @@ -156,13 +175,22 @@ def draw_color_dist_bars(color_distributions): name="# Colors", value=num_colors, min_value=2, max_value=len(_COLORS), step=1 ), + "num_personalities": mesa.visualization.Slider( + name="# of different personalities", value=num_personalities, + min_value=1, max_value=comb(num_colors, num_personality_colors), step=1 + ), + "num_personality_colors": mesa.visualization.Slider( + name="# colors determining the personality", + value=num_personality_colors, + min_value=1, max_value=num_colors-1, step=1 + ), "color_patches_steps": mesa.visualization.Slider( - name="Patches size (# steps)", value=color_adj_steps, + name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, description="More steps lead to bigger color patches" ), "patch_power": mesa.visualization.Slider( - name="Patches power", value=1.0, min_value=0.0, max_value=3.0, + name="Patches power", value=patch_power, min_value=0.0, max_value=3.0, step=0.2, description="Increases the power/radius of the color patches" ), "heterogeneity": mesa.visualization.Slider( @@ -176,17 +204,19 @@ def draw_color_dist_bars(color_distributions): value=num_areas, min_value=4, max_value=min(grid_cols, grid_rows)//2 ), "av_area_height": mesa.visualization.Slider( - name="Av. area height", value=area_height, + name="Av. area height", value=av_area_height, min_value=2, max_value=grid_rows//2, step=1, description="Select the average height of an area" ), "av_area_width": mesa.visualization.Slider( - name="Av. area width", value=area_width, + name="Av. area width", value=av_area_width, min_value=2, max_value=grid_cols//2, step=1, description="Select the average width of an area" ), "area_size_variance": mesa.visualization.Slider( - name="Area size variance", value=area_var, min_value=0.0, max_value=1.0, - step=0.1, description="Select the variance of the area sizes" + name="Area size variance", value=area_size_variance, + # TODO there is a division by zero error for value=1.0 - check this + min_value=0.0, max_value=0.99, step=0.1, + description="Select the variance of the area sizes" ), } diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 8ca7050..13d9004 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -1,20 +1,48 @@ -import mesa -from mesa import Agent +from typing import TYPE_CHECKING, cast -# if TYPE_CHECKING: -# from participation_model import ParticipationModel +import numpy as np +from mesa import Agent, Model +from numpy import random +if TYPE_CHECKING: # Type hint for IDEs + from democracy_sim.participation_model import ParticipationModel + + +def combine_and_normalize(arr_1, arr_2, factor): + # Ensure f is between 0 and 1 TODO: remove this on simulations to speed up + if not (0 <= factor <= 1): + raise ValueError("Factor f must be between 0 and 1") + + # Linear combination + res = factor * arr_1 + (1 - factor) * arr_2 + print(f"un-normalized result: {res}") # TODO rm + # Normalize the result + res_min = res.min() + return (res - res_min) / (res.max() - res_min + 1e-8) class VoteAgent(Agent): """An agent that has limited knowledge and resources and can decide to use them to participate in elections.""" - def __init__(self, unique_id, pos, model: mesa.Model, assets=1): + def __init__(self, unique_id, pos, model, personality, assets=1): + """ + Create a new agent. + :param unique_id: The unique identifier of the agent. + :param pos: The position of the agent in the models' grid. + :type pos: tuple + :param model: The simulation model of which the agent is part of. + :type model: ParticipationModel + :param personality: Represents the agent's preferences among colors. + :type personality: np.ndarray + :param assets: The wealth/assets/motivation of the agent. + """ # Pass the parameters to the parent class. super().__init__(unique_id, model) self._row = pos[0] self._col = pos[1] self._assets = assets + self.personality = personality + self.known_cells = [] # ColorCell objects the agent knows (knowledge) @property def col(self): @@ -39,6 +67,61 @@ def assets(self, value): def assets(self): del self._assets + def ask_for_participation(self, area): + """ + The agent decides + whether to participate in the upcoming election of a given area. + :param area: The area in which the election takes place. + :return: True if the agent decides to participate, False otherwise + """ + print("Agent", self.unique_id, "decides whether to participate", + "in election of area", area.unique_id) + # TODO Implement this (is to be decided upon a learned decision tree) + return random.choice([True, False]) + + def decide_altruism_factor(self, area): + """ + Uses a trained decision tree to decide on the altruism factor. + """ + # TODO Implement this (is to be decided upon a learned decision tree) + # This part is important - also for monitoring - save/plot a_factors + a_factor = random.uniform(0.0, 1.0) + print(f"{area}:", "Agent", self.unique_id, "altruism factor:", a_factor) + return a_factor + + def compute_assumed_opt_dist(self, area): + """ + Computes a color distribution that the agent assumes to be an optimal + choice in any election (regardless of whether it exists as a real option + to vote for or not). It takes "altruistic" concepts into consideration. + """ + # Compute the "altruism_factor" via a decision tree + a_factor = self.decide_altruism_factor(area) + # compute the preference ranking vector as a mix between the agent's + # own preferences/personality traits and the estimated real distribution + est_dist = self.estimate_real_distribution(area) + ass_opt = combine_and_normalize(self.personality, est_dist, a_factor) + return ass_opt + + def vote(self, area, voting_rule): + """ + The agent votes in the election of a given area, + i.e., she returns a preference ranking vector over all options. + The available options are set in the model. + :param area: The area in which the election takes place. + """ + # TODO Implement this (is to be decided upon a learned decision tree) + # Compute the color distribution that is assumed to be the best choice. + est_best_dist = self.compute_assumed_opt_dist(area) + # make sure that r is normalized! + # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!) + ############## + if TYPE_CHECKING: # Type hint for IDEs + self.model = cast(ParticipationModel, self.model) + r = self.model.options[random.choice(self.model.options.shape[0])] + print("Agent", self.unique_id, "voted:", r) + return r + # def step(self): # # Verify agent has some wealth # if self.wealth > 0: @@ -56,9 +139,19 @@ def assets(self): # include_center=False) # new_position = self.random.choice(possible_steps) # self.model.grid.move_agent(self, new_position) - - -class ColorCell(mesa.Agent): + def estimate_real_distribution(self, area): + """ + The agent estimates the real color distribution in the area based on + her own knowledge (self.known_fields). + """ + relevant_cells = area.filter_cells(self.known_cells) + known_colors = np.array([cell.color for cell in relevant_cells]) + unique, counts = np.unique(known_colors, return_counts=True) + distribution = np.zeros(self.model.num_colors) + distribution[unique] = counts / known_colors.size + return distribution + +class ColorCell(Agent): """ Represents a cell's color """ diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index ecf8b1e..bb04a75 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -3,15 +3,21 @@ from math import sqrt import mesa from mesa import Agent -from participation_agent import VoteAgent, ColorCell +from democracy_sim.participation_agent import VoteAgent, ColorCell import numpy as np from itertools import permutations, product, combinations -election_cost = 5 # TODO: integrate properly - class Area(Agent): def __init__(self, unique_id, model, height, width, size_variance): + """ + Create a new area. + :param unique_id: The unique identifier of the area. + :param model: The simulation model of which the area is part of. + :param height: The average height of the area (see size_variance). + :param width: The average width of the area (see size_variance). + :param size_variance: A variance factor applied to height and width. + """ if TYPE_CHECKING: # Type hint for IDEs model = cast(ParticipationModel, model) super().__init__(unique_id, model) @@ -39,11 +45,17 @@ def idx_field(self): return self._idx_field @idx_field.setter - def idx_field(self, value: tuple): + def idx_field(self, pos: tuple): + """ + This method sets the areas indexing-field (top-left cell coordinate) + which determines which cells and agents on the grid belong to the area. + The cells and agents are added to the area's lists of cells and agents. + :param pos: (x, y) representing the areas top-left coordinates. + """ if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) try: - x_val, y_val = value + x_val, y_val = pos except ValueError: raise ValueError("The idx_field must be a tuple") if x_val < 0 or x_val >= self.model.width: @@ -87,20 +99,22 @@ def conduct_election(self, voting_rule, distance_func): the election in the area as well as handling the payments and rewards. """ # Ask agents to participate + # TODO: WHERE to discretize if needed? participating_agents = [] preference_profile = [] for agent in self.agents: if agent.ask_for_participation(area=self): participating_agents.append(agent) # collect the participation fee from the agents - agent.assets = agent.assets - election_cost + agent.assets = agent.assets - self.model.election_costs # Ask participating agents for their prefs - preference_profile.append(agent.vote(area=self)) # TODO use np! - # TODO: WHERE to discretize if needed? - # accumulate the prefs using the voting rule - aggreg_prefs = voting_rule(preference_profile) - # save the "elected" distribution in self.voted_distribution - winning_option = aggreg_prefs[0] + preference_profile.append(agent.vote(area=self)) + preference_profile = np.array(preference_profile) + # Aggregate the prefs using the voting rule + # TODO: How to deal with ties?? (Have to be fulfill neutrality!!) + aggregated_prefs = voting_rule(preference_profile) + # Save the "elected" distribution in self.voted_distribution + winning_option = aggregated_prefs[0] self.voted_distribution = self.model.options[winning_option] # calculate the distance to the real distribution using distance_func distance_factor = distance_func(self.voted_distribution, @@ -128,16 +142,16 @@ def update_color_distribution(self): def step(self) -> None: self.update_color_distribution() - self.conduct_election(self.model.voting_rule) + self.conduct_election(self.model.voting_rule, self.model.distance_func) def compute_collective_assets(model): - sum_assets = sum(agent.assets for agent in model.all_agents) + sum_assets = sum(agent.assets for agent in model.voting_agents) return sum_assets def get_num_agents(model): - return len(model.all_agents) + return len(model.voting_agents) def get_area_color_distributions(model): @@ -162,7 +176,7 @@ def color_by_dst(color_distribution) -> int: def create_all_options(n, include_ties=False): """ - Creates and returns the list of all possible ranking vectors, + Creates and returns a matrix (an array of all possible ranking vectors), if specified including ties. Rank values start from 0. :param n: The number of items to rank (number of colors in our case) @@ -179,17 +193,45 @@ def create_all_options(n, include_ties=False): return r +def create_personality(num_colors, num_personality_colors): + """ + Creates and returns a list of 'personalities' that are to be assigned + to agents. Each personality is a NumPy array of length 'num_colors' + but it is not a full ranking vector since the number of colors influencing + the personality is limited. The array is therefore not normalized. + White (color 0) is never part of a personality. + :param num_colors: The number of colors in the simulation. + :param num_personality_colors: Number of colors influencing the personality. + """ + # TODO add unit tests for this function + personality = np.random.randint(0, 100, num_colors) # TODO low=0 or 1? + # Save the sum to "normalize" the values later (no real normalization) + sum_value = sum(personality) + 1e-8 # To avoid division by zero + # Select only as many features as needed (num_personality_colors) + to_del = num_colors - num_personality_colors # How many to be deleted + if to_del > 0: + # The 'replace=False' ensures that indexes aren't chosen twice + indices = np.random.choice(num_colors, to_del, replace=False) + personality[indices] = 0 # 'Delete' the values + personality[0] = 0 # White is never part of the personality + # "Normalize" the rest of the values + personality = personality / sum_value + return personality + + class ParticipationModel(mesa.Model): """A model with some number of agents.""" - def __init__(self, height, width, num_agents, num_colors, num_areas, - av_area_height, av_area_width, area_size_variance, patch_power, - color_patches_steps, draw_borders, heterogeneity, voting_rule): + def __init__(self, height, width, num_agents, num_colors, num_personalities, + num_personality_colors, + num_areas, av_area_height, av_area_width, area_size_variance, + patch_power, color_patches_steps, draw_borders, heterogeneity, + voting_rule, distance_func, election_costs): super().__init__() self.height = height self.width = width self.num_agents = num_agents - self.all_agents = [] + self.voting_agents = [] self.num_colors = num_colors # Area variables self.num_areas = num_areas @@ -211,39 +253,74 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, self.vertical_bias = random.uniform(0, 1) self.horizontal_bias = random.uniform(0, 1) self.draw_borders = draw_borders - # Color distribution + # Color distribution (global) self.color_dst = self.create_color_distribution(heterogeneity) # Elections + self.election_costs = election_costs self.voting_rule = voting_rule + self.distance_func = distance_func self.options = create_all_options(num_colors) # Create search pairs once for faster iterations when comparing rankings - self.search_pairs = combinations(range(0, num_colors), 2) - self.color_vec = np.arange(num_colors) # Also for faster algorithms - # Create color ids for the cells + self.search_pairs = combinations(range(0, self.options.size), 2) # TODO check if correct! + self.option_vec = np.arange(self.options.size) # Also to speed up + self.initialize_color_cells() + # Create agents + self.num_personalities = num_personalities + self.num_personality_colors = num_personality_colors + self.personalities = self.create_personalities() + self.initialize_voting_agents() + # Create areas + self.av_area_width = av_area_width + self.av_area_height = av_area_height + self.area_size_variance = area_size_variance + self.initialize_areas() + # Data collector + self.datacollector = mesa.DataCollector( + model_reporters={ + "Collective assets": compute_collective_assets, + "Number of agents": get_num_agents, + "Area Color Distributions": get_area_color_distributions, + }, + agent_reporters={"Wealth": lambda ag: getattr(ag, "assets", None)}, + ) + # Adjust the color pattern to make it less random (see color patches) + self.adjust_color_pattern(color_patches_steps, patch_power) + # Collect initial data + self.datacollector.collect(self) + + def initialize_color_cells(self): + # Create a color cell for each cell in the grid for _, (row, col) in self.grid.coord_iter(): + # The colors are chosen by a predefined color distribution color = color_by_dst(self.color_dst) + # Create the cell cell = ColorCell((row, col), self, color) + # Add it to the grid self.grid.place_agent(cell, (row, col)) - # Add the cell color to the scheduler + # Add the color cell to the scheduler self.color_cell_scheduler.add(cell) - # Create agents + + def initialize_voting_agents(self): for a_id in range(self.num_agents): # Get a random position x = self.random.randrange(self.width) y = self.random.randrange(self.height) - a = VoteAgent(a_id, (x, y), self) - # Add the agent to the models' agents and scheduler - self.all_agents.append(a) - # Place at a random cell + personality = random.choice(self.personalities) + a = VoteAgent(a_id, (x, y), self, personality) + # Add the agent to the models' agent list + self.voting_agents.append(a) + # Place the agent on the grid self.grid.place_agent(a, (x, y)) - # Count the agent at the chosen cell + # Count +1 at the cell the agent is placed at TODO improve? agents = self.grid.get_cell_list_contents([(x, y)]) cell = [a for a in agents if isinstance(a, ColorCell)][0] cell.num_agents_in_cell = cell.num_agents_in_cell + 1 + + def initialize_areas(self): # Create areas spread approximately evenly across the grid roo_apx = round(sqrt(self.num_areas)) - nr_areas_x = self.grid.width // av_area_width - nr_areas_y = self.grid.width // av_area_height + nr_areas_x = self.grid.width // self.av_area_width + nr_areas_y = self.grid.width // self.av_area_height area_x_dist = self.grid.width // roo_apx area_y_dist = self.grid.height // roo_apx print(f"roo_apx: {roo_apx}, nr_areas_x: {nr_areas_x}, " @@ -263,38 +340,31 @@ def __init__(self, height, width, num_agents, num_colors, num_areas, a_id = next(a_ids, 0) if a_id == 0: break - area = Area(a_id, self, av_area_height, av_area_width, - area_size_variance) + area = Area(a_id, self, self.av_area_height, + self.av_area_width, self.area_size_variance) print(f"Area {area.unique_id} at {x_coord}, {y_coord}") area.idx_field = (x_coord, y_coord) self.area_scheduler.add(area) for x_coord, y_coord in zip(additional_x, additional_y): - area = Area(next(a_ids), self, av_area_height, av_area_width, - area_size_variance) + area = Area(next(a_ids), self, self.av_area_height, + self.av_area_width, self.area_size_variance) print(f"++ Area {area.unique_id} at {x_coord}, {y_coord}") area.idx_field = (x_coord, y_coord) self.area_scheduler.add(area) - # Data collector - self.datacollector = mesa.DataCollector( - model_reporters={ - "Collective assets": compute_collective_assets, - "Number of agents": get_num_agents, - "Area Color Distributions": get_area_color_distributions, - }, - agent_reporters={"Wealth": lambda ag: getattr(ag, "assets", None)}, - ) - # Adjust the color pattern to make it less random (see color patches) - for _ in range(color_patches_steps): - print(f"Color adjustment step {_}") - for cell in self.grid.coord_iter(): - agents = cell[0] - if TYPE_CHECKING: - agents = cast(list, agents) - c = [cell for cell in agents if isinstance(cell, ColorCell)][0] - most_common_color = self.color_patches(c, patch_power) - c.color = most_common_color - # Collect initial data - self.datacollector.collect(self) + + def create_personalities(self, n=None): + """ + TODO ensure that we end up with n personalities (with unique orderings) + maybe have to use orderings and convert them + """ + if n is None: + n = self.num_personalities + personalities = [] + for _ in range(n): + personality = create_personality(self.num_colors, + self.num_personality_colors) + personalities.append(personality) # TODO may not be unique rankings.. + return personalities def step(self): """Advance the model by one step.""" @@ -306,17 +376,30 @@ def step(self): # Collect data for monitoring and data analysis self.datacollector.collect(self) + def adjust_color_pattern(self, color_patches_steps, patch_power): + """Adjusting the color pattern to make it less random/predictable.""" + for _ in range(color_patches_steps): + print(f"Color adjustment step {_}") + for cell in self.grid.coord_iter(): + agents = cell[0] + if TYPE_CHECKING: + agents = cast(list, agents) + c = [cell for cell in agents if isinstance(cell, ColorCell)][0] + most_common_color = self.color_patches(c, patch_power) + c.color = most_common_color + def create_color_distribution(self, heterogeneity): """ This method is used to create a color distribution that has a bias according to the given heterogeneity factor. + :param heterogeneity: Factor used as sigma in 'random.gauss'. """ colors = range(self.num_colors) values = [abs(random.gauss(1, heterogeneity)) for _ in colors] # Normalize (with float division) total = sum(values) dst_array = [value / total for value in values] - print(f"Color distribution: {dst_array}") + print(f"Color distribution: {dst_array}") # TODO rm print return dst_array def color_patches(self, cell, patch_power): diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 0cfa995..6b2bfaf 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -1,7 +1,7 @@ # import webbrowser import mesa -from participation_model import ParticipationModel -from model_setup import model_params, canvas_element, a_chart, wealth_chart +from democracy_sim.participation_model import ParticipationModel +from democracy_sim.model_setup import model_params, canvas_element, a_chart, wealth_chart server = mesa.visualization.ModularServer( model_cls=ParticipationModel, diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 7b778af..75a6f7c 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -13,7 +13,24 @@ def majority_rule(pref_table): """ This function implements the majority rule social welfare function. - :param pref_table: The agents preferences as np matrix + :param pref_table: The agent's preferences as a NumPy matrix + containing the normalized ranking vectors of all agents. + :return: The resulting preference ranking + """ + # Count how often each ordering appears + first_choices = np.argmax(pref_table, axis=0) + first_choice_counts = {} + for choice in first_choices: + first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 + option_count_pairs = list(first_choice_counts.items()) + option_count_pairs.sort(key=lambda x: x[1], reverse=True) + return [pair[0] for pair in option_count_pairs] + + +def majority_rule_2(pref_table): + """ + This function implements the majority rule social welfare function. + :param pref_table: The agent's preferences as a NumPy matrix one row one agent, column number is color, values are the guessed distribution values (not the ranking!) :return: The resulting preference ranking @@ -28,16 +45,21 @@ def majority_rule(pref_table): return [pair[0] for pair in option_count_pairs] -# Helper functions - -def rank_arr_to_ordering(rank_arr): +def approval_voting(pref_table): + # TODO !!!!!!!!!!!!!!!!! + # => How should pref_tables be like / how do they need to be like (options=colors or options=combinations)??? + # !!!!!!!!!!!!!!! + # ANSWER: + # they have to span the options - not the colors """ - This function converts a rank array to an ordering array. - Rank vectors hold the rank of each option (option = index). - Ordering (or sequence) vectors hold options (rank = index). - :param rank_arr: Array of numeric values unambiguously determining a ranking - :return: The ordering determined by the rank array (options from 1 to n) + This function implements the approval voting social welfare function. + :param pref_table: The agent's preferences as a NumPy matrix + containing the normalized ranking vectors of all agents. + :return: The resulting preference ranking """ - tuples = enumerate(rank_arr, start=1) # (option, rank) - ordering = sorted(tuples, key=lambda x: x[1]) # Sort by rank - return ordering + # # Count how often each option is approved + # approval_counts = np.sum(pref_table, axis=0) + # option_count_pairs = list(enumerate(approval_counts)) + # option_count_pairs.sort(key=lambda x: x[1], reverse=True) + # return [pair[0] for pair in option_count_pairs] + pass diff --git a/tests/test_participation_agent.py b/tests/test_participation_agent.py new file mode 100644 index 0000000..16c6c41 --- /dev/null +++ b/tests/test_participation_agent.py @@ -0,0 +1,49 @@ +import unittest +from test_participation_model import * +from democracy_sim.participation_model import ParticipationModel, Area +from democracy_sim.participation_agent import VoteAgent, combine_and_normalize +import numpy as np +import random + + +class TestVotingAgent(unittest.TestCase): + + def setUp(self): + test_model = TestParticipationModel() + test_model.setUp() + self.model = test_model.model + personality = np.zeros(self.model.num_colors) + personality[0] = 0.3 + personality[1] = 0.7 + self.agent = VoteAgent(num_agents + 1, (0, 0), self.model, + personality=personality, assets=25) + + def test_combine_and_normalize_rank_arrays(self): + # TODO more test-cases and include estimated results + print("Test function combine_and_normalize_estimates") + a = np.array([0.0, 0.2, 0.7, 0.5, 0.1, 0.8, 1.0]) + a_rank = np.argsort(a) + print(f"Ranking of a: {a_rank}") + b = np.array([1.0, 0.2, 0.7, 0.5, 0.1, 0.8, 0.0]) + b_rank = np.argsort(b) + print(f"Ranking of b: {b_rank}") + factors = [0.0, 0.2, 0.5, 1.0] + for f in factors: + result = combine_and_normalize(a, b, f) + result_rank = np.argsort(result) + print(f"Ranking of r with factor {f}: {result_rank}") + + def test_estimate_real_distribution(self): + existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] + test_area = Area(999, model=self.model, height=5, width=5, + size_variance=0) + test_area.idx_field((0, 0)) + a = self.agent + assert a in test_area.agents # Test if agent is present + print(f"Areas color-cells: {test_area.cells}") + # Test the estimate_real_distribution method + known_cells = (random.sample(test_area.cells, 4) + + random.sample(existing_area, 4)) + print(f"Cells that agent {a.unique_id} knows of: {a.known_cells}") + # TODO finish this! + diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py new file mode 100644 index 0000000..ca5865c --- /dev/null +++ b/tests/test_participation_model.py @@ -0,0 +1,66 @@ +import unittest +from democracy_sim.participation_model import ParticipationModel, Area +from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, + num_agents, num_colors, num_areas, + draw_borders, rule_idx, voting_rules, + distance_idx, distance_functions, + color_heterogeneity as heterogeneity, + color_patches_steps, av_area_height, + av_area_width, area_size_variance, + patch_power, election_costs) +import mesa + + +class TestParticipationModel(unittest.TestCase): + + def setUp(self): + voting_rule = voting_rules[rule_idx] + distance_func = distance_functions[distance_idx] + self.model = ParticipationModel(height=height, width=width, + num_agents=num_agents, + num_colors=num_colors, + num_areas=num_areas, + draw_borders=draw_borders, + election_costs=election_costs, + voting_rule=voting_rule, + distance_func=distance_func, + heterogeneity=heterogeneity, + color_patches_steps=color_patches_steps, + av_area_height=av_area_height, + av_area_width=av_area_width, + area_size_variance=area_size_variance, + patch_power=patch_power) + + def test_initialization(self): + areas_count = len([area for area in self.model.area_scheduler.agents + if isinstance(area, Area)]) + self.assertEqual(areas_count, self.model.num_areas) + self.assertIsInstance(self.model.datacollector, mesa.DataCollector) + + def test_options(self): + self.assertEqual(self.model.av_area_width, av_area_width) + self.assertEqual(self.model.area_size_variance, area_size_variance) + + # def test_data_collection(self): + # self.model.datacollector.collect(self.model) + # data = self.model.datacollector.get_model_vars_dataframe() + # self.assertIn("Collective assets", data.columns) + # self.assertIn("Number of agents", data.columns) + # self.assertIn("Area Color Distributions", data.columns) + # + # def test_color_distribution(self): + # distribution = self.model.create_color_distribution(heterogeneity=0.5) + # self.assertEqual(len(distribution), self.model.num_colors) + # self.assertAlmostEqual(sum(distribution), 1.0, places=5) + # + # def test_color_patches(self): + # from democracy_sim.participation_agent import ColorCell + # cell = ColorCell(1, model=self.model, pos=(0, 0), initial_color=0) + # color = self.model.color_patches(cell, patch_power=1.0) + # self.assertIn(color, range(self.model.num_colors)) + # + def test_step(self): + initial_data = self.model.datacollector.get_model_vars_dataframe().copy() + self.model.step() + new_data = self.model.datacollector.get_model_vars_dataframe() + self.assertNotEqual(initial_data, new_data) From 0a9ca441bc5a65c50cf84e445bb278a56df7ffad Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 26 Jul 2024 14:01:18 +0200 Subject: [PATCH 10/38] continued with adding relevant functionality for the 'conduct_election' method - added more unit tests --- democracy_sim/participation_agent.py | 30 +++++++---- democracy_sim/participation_model.py | 47 ++++++++++------ tests/test_participation_agent.py | 81 +++++++++++++++++++++++----- tests/test_participation_model.py | 71 +++++++++++++++--------- 4 files changed, 163 insertions(+), 66 deletions(-) diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 13d9004..dd98848 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -24,25 +24,32 @@ class VoteAgent(Agent): """An agent that has limited knowledge and resources and can decide to use them to participate in elections.""" - def __init__(self, unique_id, pos, model, personality, assets=1): - """ - Create a new agent. + def __init__(self, unique_id, model, pos, personality, assets=1): + """ Create a new agent. :param unique_id: The unique identifier of the agent. - :param pos: The position of the agent in the models' grid. - :type pos: tuple :param model: The simulation model of which the agent is part of. :type model: ParticipationModel + :param pos: The position of the agent in the models' grid. + :type pos: tuple :param personality: Represents the agent's preferences among colors. :type personality: np.ndarray :param assets: The wealth/assets/motivation of the agent. """ # Pass the parameters to the parent class. - super().__init__(unique_id, model) - self._row = pos[0] - self._col = pos[1] + super().__init__(unique_id=unique_id, model=model) + try: + row, col = pos + except ValueError: + raise ValueError("Position must be a tuple of two integers.") + self._row = row + self._col = col self._assets = assets self.personality = personality self.known_cells = [] # ColorCell objects the agent knows (knowledge) + # Add the agent to the models' agent list + model.voting_agents.append(self) + # Place the agent on the grid + model.grid.place_agent(self, pos) @property def col(self): @@ -103,7 +110,7 @@ def compute_assumed_opt_dist(self, area): ass_opt = combine_and_normalize(self.personality, est_dist, a_factor) return ass_opt - def vote(self, area, voting_rule): + def vote(self, area): """ The agent votes in the election of a given area, i.e., she returns a preference ranking vector over all options. @@ -151,16 +158,17 @@ def estimate_real_distribution(self, area): distribution[unique] = counts / known_colors.size return distribution + class ColorCell(Agent): """ Represents a cell's color """ - def __init__(self, pos, model, initial_color: int): + def __init__(self, unique_id, model, pos, initial_color: int): """ Create a cell, in the given state, at the given row, col position. """ - super().__init__(pos, model) + super().__init__(unique_id, model) self._row = pos[0] self._col = pos[1] self._color = initial_color # The cell's current color (int) diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index bb04a75..480d534 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -20,7 +20,7 @@ def __init__(self, unique_id, model, height, width, size_variance): """ if TYPE_CHECKING: # Type hint for IDEs model = cast(ParticipationModel, model) - super().__init__(unique_id, model) + super().__init__(unique_id=unique_id, model=model) if size_variance == 0: self._width = width self._height = height @@ -58,6 +58,7 @@ def idx_field(self, pos: tuple): x_val, y_val = pos except ValueError: raise ValueError("The idx_field must be a tuple") + # Check if the values are within the grid if x_val < 0 or x_val >= self.model.width: raise ValueError(f"The x={x_val} value must be within the grid") if y_val < 0 or y_val >= self.model.height: @@ -111,7 +112,7 @@ def conduct_election(self, voting_rule, distance_func): preference_profile.append(agent.vote(area=self)) preference_profile = np.array(preference_profile) # Aggregate the prefs using the voting rule - # TODO: How to deal with ties?? (Have to be fulfill neutrality!!) + # TODO: How to deal with ties (Have to fulfill neutrality!!)?? aggregated_prefs = voting_rule(preference_profile) # Save the "elected" distribution in self.voted_distribution winning_option = aggregated_prefs[0] @@ -137,8 +138,16 @@ def update_color_distribution(self): for color in range(self.model.num_colors): dist_val = color_count.get(color, 0) / num_cells # Float division self.color_distribution[color] = dist_val - print(f"Area {self.unique_id} color " - f"distribution: {self.color_distribution}") + + def filter_cells(self, cell_list): + """ + This method is used to filter a given list of cells to return only + those which are within the area. + :param cell_list: A list of ColorCell cells to be filtered. + :return: A list of ColorCell cells that are within the area. + """ + cell_set = set(self.cells) + return [c for c in cell_list if c in cell_set] def step(self) -> None: self.update_color_distribution() @@ -263,6 +272,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # Create search pairs once for faster iterations when comparing rankings self.search_pairs = combinations(range(0, self.options.size), 2) # TODO check if correct! self.option_vec = np.arange(self.options.size) # Also to speed up + # Create color cells self.initialize_color_cells() # Create agents self.num_personalities = num_personalities @@ -290,11 +300,11 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, def initialize_color_cells(self): # Create a color cell for each cell in the grid - for _, (row, col) in self.grid.coord_iter(): + for unique_id, (_, (row, col)) in enumerate(self.grid.coord_iter()): # The colors are chosen by a predefined color distribution color = color_by_dst(self.color_dst) # Create the cell - cell = ColorCell((row, col), self, color) + cell = ColorCell(unique_id, self, (row, col), color) # Add it to the grid self.grid.place_agent(cell, (row, col)) # Add the color cell to the scheduler @@ -306,18 +316,22 @@ def initialize_voting_agents(self): x = self.random.randrange(self.width) y = self.random.randrange(self.height) personality = random.choice(self.personalities) - a = VoteAgent(a_id, (x, y), self, personality) - # Add the agent to the models' agent list - self.voting_agents.append(a) - # Place the agent on the grid - self.grid.place_agent(a, (x, y)) - # Count +1 at the cell the agent is placed at TODO improve? + VoteAgent(a_id, self, (x, y), personality) + # Count +1 at the color cell the agent is placed at TODO improve? agents = self.grid.get_cell_list_contents([(x, y)]) - cell = [a for a in agents if isinstance(a, ColorCell)][0] + color_cells = [a for a in agents if isinstance(a, ColorCell)] + if len(color_cells) > 1: + raise ValueError(f"There are several color cells at {(x, y)}!") + cell = color_cells[0] cell.num_agents_in_cell = cell.num_agents_in_cell + 1 def initialize_areas(self): - # Create areas spread approximately evenly across the grid + """ + This method initializes the areas in the models' grid in such a way + that the areas are spread approximately evenly across the grid. + Depending on grid size, the number of areas and their (average) sizes. + TODO create unit tests for this method (Tested manually so far) + """ roo_apx = round(sqrt(self.num_areas)) nr_areas_x = self.grid.width // self.av_area_width nr_areas_y = self.grid.width // self.av_area_height @@ -343,13 +357,13 @@ def initialize_areas(self): area = Area(a_id, self, self.av_area_height, self.av_area_width, self.area_size_variance) print(f"Area {area.unique_id} at {x_coord}, {y_coord}") - area.idx_field = (x_coord, y_coord) + area.idx_field = (x_coord, y_coord) # TODO integrate this step into the areas __init__ method self.area_scheduler.add(area) for x_coord, y_coord in zip(additional_x, additional_y): area = Area(next(a_ids), self, self.av_area_height, self.av_area_width, self.area_size_variance) print(f"++ Area {area.unique_id} at {x_coord}, {y_coord}") - area.idx_field = (x_coord, y_coord) + area.idx_field = (x_coord, y_coord) # TODO integrate this step into the areas __init__ method self.area_scheduler.add(area) def create_personalities(self, n=None): @@ -399,7 +413,6 @@ def create_color_distribution(self, heterogeneity): # Normalize (with float division) total = sum(values) dst_array = [value / total for value in values] - print(f"Color distribution: {dst_array}") # TODO rm print return dst_array def color_patches(self, cell, patch_power): diff --git a/tests/test_participation_agent.py b/tests/test_participation_agent.py index 16c6c41..c4b0e87 100644 --- a/tests/test_participation_agent.py +++ b/tests/test_participation_agent.py @@ -1,6 +1,5 @@ -import unittest -from test_participation_model import * -from democracy_sim.participation_model import ParticipationModel, Area +from .test_participation_model import * +from democracy_sim.participation_model import Area from democracy_sim.participation_agent import VoteAgent, combine_and_normalize import numpy as np import random @@ -15,7 +14,7 @@ def setUp(self): personality = np.zeros(self.model.num_colors) personality[0] = 0.3 personality[1] = 0.7 - self.agent = VoteAgent(num_agents + 1, (0, 0), self.model, + self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), personality=personality, assets=25) def test_combine_and_normalize_rank_arrays(self): @@ -33,17 +32,73 @@ def test_combine_and_normalize_rank_arrays(self): result_rank = np.argsort(result) print(f"Ranking of r with factor {f}: {result_rank}") + def test_update_color_distribution(self): + rand_area = random.sample(self.model.area_scheduler.agents, 1)[0] + init_dst = rand_area.color_distribution.copy() + print(f"Area {rand_area.unique_id}s initial color dist.: {init_dst}") + # Assign new (randomly chosen) cells to the area + all_color_cells = self.model.color_cell_scheduler.agents + rand_area.cells = random.sample(all_color_cells, len(rand_area.cells)) + # Run/test the update_color_distribution method + rand_area.update_color_distribution() + new_dst = rand_area.color_distribution + print(f"Area {rand_area.unique_id}s new color distribution: {new_dst}") + # Check if the distribution has changed + assert init_dst != new_dst + + def test_filter_cells(self): + # Get existing area + existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] + print(f"The areas color-cells: " + f"{[c.unique_id for c in existing_area.cells]}") + area_cell_sample = random.sample(existing_area.cells, 4) + other_cells = random.sample(self.model.color_cell_scheduler.agents, 4) + raw_cell_list = area_cell_sample + other_cells + print(f"Cells to be filtered: {[c.unique_id for c in raw_cell_list]}") + filtered_cells = existing_area.filter_cells(raw_cell_list) + print(f"Filtered cells: {[c.unique_id for c in filtered_cells]}") + # Check if the cells are filtered correctly + add_cells = existing_area.filter_cells(other_cells) + if len(add_cells) > 0: + print(f"Additional cells: {[c.unique_id for c in add_cells]}") + area_cell_sample += add_cells + self.assertEqual(area_cell_sample, filtered_cells) + def test_estimate_real_distribution(self): + # Get any existing area existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] - test_area = Area(999, model=self.model, height=5, width=5, - size_variance=0) - test_area.idx_field((0, 0)) + # Create test area + test_area = Area(self.model.num_areas + 1, model=self.model, height=5, + width=5, size_variance=0) + print(f"Test-Area: id={test_area.unique_id}, width={test_area._width}," + f" height={test_area._height}, idx={test_area.idx_field}") a = self.agent + test_area.idx_field = (0, 0) assert a in test_area.agents # Test if agent is present - print(f"Areas color-cells: {test_area.cells}") + print(f"Agent {a.unique_id} is in area {test_area.unique_id}") + print(f"Areas color-cells: {[c.unique_id for c in test_area.cells]}") # Test the estimate_real_distribution method - known_cells = (random.sample(test_area.cells, 4) + - random.sample(existing_area, 4)) - print(f"Cells that agent {a.unique_id} knows of: {a.known_cells}") - # TODO finish this! - + k = random.sample(range(2, len(test_area.cells)), 1)[0] + print(f"Sample size: {k}") + sample_1 = random.sample(test_area.cells, k) + sample_2 = random.sample(existing_area.cells, 3) + a.known_cells = sample_1 + sample_2 + a_colors = [c.color for c in a.known_cells] # To test against + print(f"Cells that agent {a.unique_id} knows of:\n" + f"{[c.unique_id for c in a.known_cells]} with colors: {a_colors}") + print(f"Cells not part of the area: {[c.unique_id for c in sample_2]}") + rel_cells = test_area.filter_cells(a.known_cells) + rel_color_vec = [c.color for c in rel_cells] + print("The relevant cells should be:\n", + [c.unique_id for c in rel_cells], "with colors", rel_color_vec) + est_distribution = a.estimate_real_distribution(test_area) + print(f"Agent {a.unique_id}s' estimated color distribution is: " + f"{est_distribution}") + len_colors = self.model.num_colors + self.assertEqual(len(est_distribution), len_colors) + counts = [rel_color_vec.count(color) for color in range(len_colors)] + print(f"Color counts: {counts}") + s = sum(counts) + expected_distribution = [i / s for i in counts] + print(f"Expected distribution: {expected_distribution}") + self.assertEqual(list(est_distribution), expected_distribution) diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index ca5865c..e7338d0 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -2,6 +2,8 @@ from democracy_sim.participation_model import ParticipationModel, Area from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, num_agents, num_colors, num_areas, + num_personalities, + num_personality_colors as npc, draw_borders, rule_idx, voting_rules, distance_idx, distance_functions, color_heterogeneity as heterogeneity, @@ -19,6 +21,8 @@ def setUp(self): self.model = ParticipationModel(height=height, width=width, num_agents=num_agents, num_colors=num_colors, + num_personalities=num_personalities, + num_personality_colors=npc, num_areas=num_areas, draw_borders=draw_borders, election_costs=election_costs, @@ -37,30 +41,47 @@ def test_initialization(self): self.assertEqual(areas_count, self.model.num_areas) self.assertIsInstance(self.model.datacollector, mesa.DataCollector) - def test_options(self): - self.assertEqual(self.model.av_area_width, av_area_width) + def test_model_options(self): + """ + def __init__(self, height, width, num_agents, num_colors, + num_personalities, num_personality_colors, + num_areas, av_area_height, av_area_width, area_size_variance, + patch_power, color_patches_steps, draw_borders, heterogeneity, + voting_rule, distance_func, election_costs): + """ + self.assertEqual(self.model.num_agents, num_agents) + self.assertEqual(self.model.num_colors, num_colors) + self.assertEqual(self.model.num_personalities, num_personalities) + self.assertEqual(self.model.num_personality_colors, npc) + self.assertEqual(self.model.num_areas, num_areas) self.assertEqual(self.model.area_size_variance, area_size_variance) + self.assertEqual(self.model.draw_borders, draw_borders) + self.assertEqual(self.model.heterogeneity, heterogeneity) + v_rule = voting_rules[rule_idx] + dist_func = distance_functions[distance_idx] + self.assertEqual(self.model.voting_rule, v_rule) + self.assertEqual(self.model.distance_func, dist_func) + self.assertEqual(self.model.election_costs, election_costs) - # def test_data_collection(self): - # self.model.datacollector.collect(self.model) - # data = self.model.datacollector.get_model_vars_dataframe() - # self.assertIn("Collective assets", data.columns) - # self.assertIn("Number of agents", data.columns) - # self.assertIn("Area Color Distributions", data.columns) - # - # def test_color_distribution(self): - # distribution = self.model.create_color_distribution(heterogeneity=0.5) - # self.assertEqual(len(distribution), self.model.num_colors) - # self.assertAlmostEqual(sum(distribution), 1.0, places=5) - # - # def test_color_patches(self): - # from democracy_sim.participation_agent import ColorCell - # cell = ColorCell(1, model=self.model, pos=(0, 0), initial_color=0) - # color = self.model.color_patches(cell, patch_power=1.0) - # self.assertIn(color, range(self.model.num_colors)) - # - def test_step(self): - initial_data = self.model.datacollector.get_model_vars_dataframe().copy() - self.model.step() - new_data = self.model.datacollector.get_model_vars_dataframe() - self.assertNotEqual(initial_data, new_data) + def test_create_color_distribution(self): + eq_dst = self.model.create_color_distribution(heterogeneity=0) + self.assertEqual([1/num_colors for _ in eq_dst], eq_dst) + print(f"Color distribution with heterogeneity=0: {eq_dst}") + het_dst = self.model.create_color_distribution(heterogeneity=1) + print(f"Color distribution with heterogeneity=1: {het_dst}") + mid_dst = self.model.create_color_distribution(heterogeneity=0.5) + print(f"Color distribution with heterogeneity=0.5: {mid_dst}") + assert het_dst != eq_dst + assert mid_dst != eq_dst + assert het_dst != mid_dst + + def test_initialize_areas(self): + # TODO (very non-trivial) - has been tested manually so far. + pass + + # TODO add test_step + # def test_step(self): + # initial_data = self.model.datacollector.get_model_vars_dataframe().copy() + # self.model.step() + # new_data = self.model.datacollector.get_model_vars_dataframe() + # self.assertNotEqual(initial_data, new_data) From 8ba65495c0600485a367de2a7ade2f7215d5a96c Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 14 Aug 2024 10:22:15 +0200 Subject: [PATCH 11/38] Added more unit-tests and improved comments and doc-strings --- democracy_sim/participation_agent.py | 29 +++++++++----- democracy_sim/participation_model.py | 10 +++++ tests/test_participation_agent.py | 56 ++++++++++++++++++++++++---- tests/test_participation_model.py | 10 ++--- 4 files changed, 82 insertions(+), 23 deletions(-) diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index dd98848..d72613e 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -7,17 +7,25 @@ from democracy_sim.participation_model import ParticipationModel -def combine_and_normalize(arr_1, arr_2, factor): +def combine_and_normalize(arr_1: np.array, arr_2: np.array, factor: float): + """ + Combine two arrays weighted by a factor favoring arr_1. + The first array is to be the estimated real distribution. + And the other is to be the personality vector of the agent. + :param arr_1: The first array to be combined (real distribution). + :param arr_2: The second array to be combined (personality vector). + :param factor: The factor to weigh the two arrays. + :return: The normalized result of the weighted linear combination. + """ # Ensure f is between 0 and 1 TODO: remove this on simulations to speed up if not (0 <= factor <= 1): raise ValueError("Factor f must be between 0 and 1") - # Linear combination res = factor * arr_1 + (1 - factor) * arr_2 - print(f"un-normalized result: {res}") # TODO rm - # Normalize the result - res_min = res.min() - return (res - res_min) / (res.max() - res_min + 1e-8) + # Normalize/scale result s. t. it resembles a distribution vector (sum=1) + total = sum(res) + # assert total == 1.0, f"Sum of result is {total} and not 1.0" + return res / total class VoteAgent(Agent): @@ -93,7 +101,7 @@ def decide_altruism_factor(self, area): # TODO Implement this (is to be decided upon a learned decision tree) # This part is important - also for monitoring - save/plot a_factors a_factor = random.uniform(0.0, 1.0) - print(f"{area}:", "Agent", self.unique_id, "altruism factor:", a_factor) + print(f"Agent {self.unique_id} has altruism factor: {a_factor}") return a_factor def compute_assumed_opt_dist(self, area): @@ -101,9 +109,12 @@ def compute_assumed_opt_dist(self, area): Computes a color distribution that the agent assumes to be an optimal choice in any election (regardless of whether it exists as a real option to vote for or not). It takes "altruistic" concepts into consideration. + :param area: The area in which the election takes place. + :return: The assumed optimal color distribution (normalized). + TODO add unit test for this method """ # Compute the "altruism_factor" via a decision tree - a_factor = self.decide_altruism_factor(area) + a_factor = self.decide_altruism_factor(area) # TODO: Implement this # compute the preference ranking vector as a mix between the agent's # own preferences/personality traits and the estimated real distribution est_dist = self.estimate_real_distribution(area) @@ -120,7 +131,7 @@ def vote(self, area): # TODO Implement this (is to be decided upon a learned decision tree) # Compute the color distribution that is assumed to be the best choice. est_best_dist = self.compute_assumed_opt_dist(area) - # make sure that r is normalized! + # Make sure that r= is normalized! # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!) ############## if TYPE_CHECKING: # Type hint for IDEs diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 480d534..49af7e6 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -275,6 +275,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # Create color cells self.initialize_color_cells() # Create agents + # TODO: Where do the agents get there known cells from and how!? self.num_personalities = num_personalities self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities() @@ -299,6 +300,9 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.datacollector.collect(self) def initialize_color_cells(self): + """ + This method initializes a color cells for each cell in the model's grid. + """ # Create a color cell for each cell in the grid for unique_id, (_, (row, col)) in enumerate(self.grid.coord_iter()): # The colors are chosen by a predefined color distribution @@ -311,6 +315,12 @@ def initialize_color_cells(self): self.color_cell_scheduler.add(cell) def initialize_voting_agents(self): + """ + This method initializes as many voting agents as set in the model with + a randomly chosen personality. It places them randomly on the grid. + It also ensures that each agent is assigned to the color cell it is + standing on. + """ for a_id in range(self.num_agents): # Get a random position x = self.random.randrange(self.width) diff --git a/tests/test_participation_agent.py b/tests/test_participation_agent.py index c4b0e87..613b38d 100644 --- a/tests/test_participation_agent.py +++ b/tests/test_participation_agent.py @@ -16,9 +16,12 @@ def setUp(self): personality[1] = 0.7 self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), personality=personality, assets=25) + self.additional_test_area = Area(self.model.num_areas + 1, + model=self.model, height=5, + width=5, size_variance=0) + self.additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) def test_combine_and_normalize_rank_arrays(self): - # TODO more test-cases and include estimated results print("Test function combine_and_normalize_estimates") a = np.array([0.0, 0.2, 0.7, 0.5, 0.1, 0.8, 1.0]) a_rank = np.argsort(a) @@ -32,6 +35,31 @@ def test_combine_and_normalize_rank_arrays(self): result_rank = np.argsort(result) print(f"Ranking of r with factor {f}: {result_rank}") + def test_combine_and_normalize(self): + a = self.agent + test_area = self.additional_test_area + assert a in test_area.agents # Test if agent is present + # Give the agent some cells to know of + k = random.sample(range(2, len(test_area.cells)), 1)[0] + print(f"Sample size: {k}") + a.known_cells = random.sample(test_area.cells, k) + est_dist = a.estimate_real_distribution(test_area) + own_prefs = a.personality + # own_prefs = np.array([0.25, 0.5, 0.0, 0.0]) # Should also work.. + print(f"Agent {a.unique_id}s' personality: {own_prefs}" + f" and estimated color distribution: {est_dist}") + for a_factor in [0.0, 0.2, 0.5, 1.0]: + comb = combine_and_normalize(own_prefs, est_dist, a_factor) + print(f"Assumed opt. distribution with factor {a_factor}: \n{comb}") + # Validation + if a_factor == 0.0: + self.assertEqual(list(comb), list(est_dist)) + elif a_factor == 1.0: + if sum(own_prefs) != 1.0: + own_prefs = own_prefs / sum(own_prefs) + self.assertEqual(list(comb), list(own_prefs)) + self.assertTrue(np.isclose(sum(comb), 1.0, atol=1e-8)) + def test_update_color_distribution(self): rand_area = random.sample(self.model.area_scheduler.agents, 1)[0] init_dst = rand_area.color_distribution.copy() @@ -67,13 +95,10 @@ def test_filter_cells(self): def test_estimate_real_distribution(self): # Get any existing area existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] - # Create test area - test_area = Area(self.model.num_areas + 1, model=self.model, height=5, - width=5, size_variance=0) + test_area = self.additional_test_area print(f"Test-Area: id={test_area.unique_id}, width={test_area._width}," f" height={test_area._height}, idx={test_area.idx_field}") a = self.agent - test_area.idx_field = (0, 0) assert a in test_area.agents # Test if agent is present print(f"Agent {a.unique_id} is in area {test_area.unique_id}") print(f"Areas color-cells: {[c.unique_id for c in test_area.cells]}") @@ -92,8 +117,8 @@ def test_estimate_real_distribution(self): print("The relevant cells should be:\n", [c.unique_id for c in rel_cells], "with colors", rel_color_vec) est_distribution = a.estimate_real_distribution(test_area) - print(f"Agent {a.unique_id}s' estimated color distribution is: " - f"{est_distribution}") + print(f"{a.unique_id}s' estimated color dist is: {est_distribution}") + self.assertEqual(sum(est_distribution), 1.0) len_colors = self.model.num_colors self.assertEqual(len(est_distribution), len_colors) counts = [rel_color_vec.count(color) for color in range(len_colors)] @@ -102,3 +127,20 @@ def test_estimate_real_distribution(self): expected_distribution = [i / s for i in counts] print(f"Expected distribution: {expected_distribution}") self.assertEqual(list(est_distribution), expected_distribution) + + def test_compute_assumed_opt_dist(self): + a = self.agent + test_area = self.additional_test_area + # Give the agent some cells to know of + max_size = len(test_area.cells) + k = random.sample(range(2, max_size), 1)[0] + a.known_cells = random.sample(test_area.cells, k=k) + est_dist = a.estimate_real_distribution(test_area) + own_prefs = a.personality + print(f"The agents\npersonality: {own_prefs} \nest_dist : {est_dist}") + r = a.compute_assumed_opt_dist(test_area) + print(f"Assumed optimal distribution: {r}") + self.assertTrue(np.isclose(sum(r), 1.0, atol=1e-8)) + + + diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index e7338d0..09237c5 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -40,15 +40,9 @@ def test_initialization(self): if isinstance(area, Area)]) self.assertEqual(areas_count, self.model.num_areas) self.assertIsInstance(self.model.datacollector, mesa.DataCollector) + # TODO ... more tests def test_model_options(self): - """ - def __init__(self, height, width, num_agents, num_colors, - num_personalities, num_personality_colors, - num_areas, av_area_height, av_area_width, area_size_variance, - patch_power, color_patches_steps, draw_borders, heterogeneity, - voting_rule, distance_func, election_costs): - """ self.assertEqual(self.model.num_agents, num_agents) self.assertEqual(self.model.num_colors, num_colors) self.assertEqual(self.model.num_personalities, num_personalities) @@ -79,6 +73,8 @@ def test_initialize_areas(self): # TODO (very non-trivial) - has been tested manually so far. pass + def test_step(self): + pass # TODO add test_step # def test_step(self): # initial_data = self.model.datacollector.get_model_vars_dataframe().copy() From 51c03e70b88c1c0bd7e666ce23113b30ccec49da Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 21 Aug 2024 19:10:06 +0200 Subject: [PATCH 12/38] added majority_rule but tie-breaking is not satisfactory yet --- democracy_sim/model_setup.py | 32 +++--- democracy_sim/participation_model.py | 30 +++--- democracy_sim/social_welfare_functions.py | 80 +++++++++++---- docs/technical/preference_relations.md | 23 +++++ mkdocs.yml | 3 +- tests/test_social_welfare_functions.py | 118 ++++++++++++++++++++++ 6 files changed, 237 insertions(+), 49 deletions(-) create mode 100644 docs/technical/preference_relations.md create mode 100644 tests/test_social_welfare_functions.py diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 77f470a..ac95228 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -69,22 +69,22 @@ "Purple", "Silver", "Teal", - "Pink", - "Brown", - "Gold", - "Coral", - "Crimson", - "DarkBlue", - "DarkRed", - "DarkGreen", - "DarkKhaki", - "DarkMagenta", - "DarkOliveGreen", - "DarkOrange", - "DarkTurquoise", - "DarkViolet", - "DeepPink", -] # 30 colors + # "Pink", + # "Brown", + # "Gold", + # "Coral", + # "Crimson", + # "DarkBlue", + # "DarkRed", + # "DarkGreen", + # "DarkKhaki", + # "DarkMagenta", + # "DarkOliveGreen", + # "DarkOrange", + # "DarkTurquoise", + # "DarkViolet", + # "DeepPink", +] # 16 colors def participation_draw(cell: ColorCell): diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 49af7e6..e108184 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -335,6 +335,16 @@ def initialize_voting_agents(self): cell = color_cells[0] cell.num_agents_in_cell = cell.num_agents_in_cell + 1 + def initialize_area(self, a_id, x_coord, y_coord): + """ + This method initializes one area in the models' grid. + """ + area = Area(a_id, self, self.av_area_height, self.av_area_width, + self.area_size_variance) + # Place the area in the grid using its indexing field + area.idx_field = (x_coord, y_coord) + self.area_scheduler.add(area) + def initialize_areas(self): """ This method initializes the areas in the models' grid in such a way @@ -342,14 +352,16 @@ def initialize_areas(self): Depending on grid size, the number of areas and their (average) sizes. TODO create unit tests for this method (Tested manually so far) """ + # Calculate the number of areas in each direction roo_apx = round(sqrt(self.num_areas)) nr_areas_x = self.grid.width // self.av_area_width nr_areas_y = self.grid.width // self.av_area_height + # Calculate the distance between the areas area_x_dist = self.grid.width // roo_apx area_y_dist = self.grid.height // roo_apx print(f"roo_apx: {roo_apx}, nr_areas_x: {nr_areas_x}, " f"nr_areas_y: {nr_areas_y}, area_x_dist: {area_x_dist}, " - f"area_y_dist: {area_y_dist}") + f"area_y_dist: {area_y_dist}") # TODO rm print x_coords = range(0, self.grid.width, area_x_dist) y_coords = range(0, self.grid.height, area_y_dist) # Add additional areas if necessary (num_areas not a square number) @@ -358,23 +370,17 @@ def initialize_areas(self): for _ in range(missing): additional_x.append(self.random.randrange(self.grid.width)) additional_y.append(self.random.randrange(self.grid.height)) + # Create the area's ids a_ids = iter(range(1, self.num_areas + 1)) + # Initialize all areas for x_coord in x_coords: for y_coord in y_coords: a_id = next(a_ids, 0) if a_id == 0: break - area = Area(a_id, self, self.av_area_height, - self.av_area_width, self.area_size_variance) - print(f"Area {area.unique_id} at {x_coord}, {y_coord}") - area.idx_field = (x_coord, y_coord) # TODO integrate this step into the areas __init__ method - self.area_scheduler.add(area) + self.initialize_area(a_id, x_coord, y_coord) for x_coord, y_coord in zip(additional_x, additional_y): - area = Area(next(a_ids), self, self.av_area_height, - self.av_area_width, self.area_size_variance) - print(f"++ Area {area.unique_id} at {x_coord}, {y_coord}") - area.idx_field = (x_coord, y_coord) # TODO integrate this step into the areas __init__ method - self.area_scheduler.add(area) + self.initialize_area(next(a_ids), x_coord, y_coord) def create_personalities(self, n=None): """ @@ -401,7 +407,7 @@ def step(self): self.datacollector.collect(self) def adjust_color_pattern(self, color_patches_steps, patch_power): - """Adjusting the color pattern to make it less random/predictable.""" + """Adjusting the color pattern to make it less predictable.""" for _ in range(color_patches_steps): print(f"Color adjustment step {_}") for cell in self.grid.coord_iter(): diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 75a6f7c..eb63cd2 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -10,39 +10,79 @@ import numpy as np -def majority_rule(pref_table): +def complete_ranking(ranking: np.array, num_options: int): """ - This function implements the majority rule social welfare function. - :param pref_table: The agent's preferences as a NumPy matrix - containing the normalized ranking vectors of all agents. - :return: The resulting preference ranking + This function adds options that are not in the ranking in a random order. + :param ranking: The ranking to be completed with the missing options. + :param num_options: The total number of options. + ------- + :return: The completed ranking. """ - # Count how often each ordering appears - first_choices = np.argmax(pref_table, axis=0) - first_choice_counts = {} - for choice in first_choices: - first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 - option_count_pairs = list(first_choice_counts.items()) - option_count_pairs.sort(key=lambda x: x[1], reverse=True) - return [pair[0] for pair in option_count_pairs] + all_options = np.arange(num_options) + mask = np.isin(all_options, ranking, invert=True) + non_included_options = all_options[mask] + np.random.shuffle(non_included_options) + return np.concatenate((ranking, non_included_options)) -def majority_rule_2(pref_table): +def majority_rule(pref_table, noise_factor=100): """ This function implements the majority rule social welfare function. :param pref_table: The agent's preferences as a NumPy matrix - one row one agent, column number is color, - values are the guessed distribution values (not the ranking!) - :return: The resulting preference ranking + containing the normalized ranking vectors of all agents. + :param noise_factor: Influences the amount of noise to be added + to the preference table to break ties (higher is less noise). + :return: The resulting preference ranking (beware: its not a pref. relation) """ - # Count how often each color is guessed as - first_choices = np.argmax(pref_table, axis=0) + n, m = pref_table.shape # n agents, m options + # Add a tiny amount of random noise to break ties + # Beware: Even with noise added, argmax will have a bias for lower indices + # on arrays with too many ties. So this is just a small tiebreaker. + variances = np.var(pref_table, axis=0) + 1e-10 + # Generate noise based on the variances to sensitively break ties + noise_eps = variances / noise_factor + noise = np.random.uniform(-noise_eps, noise_eps, (n, m)) + pref_table += noise + # Count how often an option is ranked first (indexes of max values) + first_choices = np.argmax(pref_table, axis=1) + # To avoid a bias toward voters with low indices in the counting, we shuffle + np.random.shuffle(first_choices) first_choice_counts = {} for choice in first_choices: first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 + # Get the ranking from the counts option_count_pairs = list(first_choice_counts.items()) option_count_pairs.sort(key=lambda x: x[1], reverse=True) - return [pair[0] for pair in option_count_pairs] + ranking = np.array([pair[0] for pair in option_count_pairs]) + # Faster: + # count_pairs = np.array(option_count_pairs) + # # Sort the array by the second element in descending order + # sorted_indices = np.argsort(count_pairs[:, 1])[::-1] + # count_pairs = count_pairs[sorted_indices] + # ranking = count_pairs[:, 0].astype(int) + # Fill up the ranking with the missing options (if any) + if ranking.shape[0] < m: + ranking = complete_ranking(ranking, m) + return ranking + + +#def majority_rule_2(pref_table): + # """ + # This function implements the majority rule social welfare function. + # :param pref_table: The agent's preferences as a NumPy matrix + # one row one agent, column number is color, + # values are the guessed distribution values (not the ranking!) + # :return: The resulting preference ranking + # """ + + # # Count how often each color is guessed as + # first_choices = np.argmax(pref_table, axis=0) + # first_choice_counts = {} + # for choice in first_choices: + # first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 + # option_count_pairs = list(first_choice_counts.items()) + # option_count_pairs.sort(key=lambda x: x[1], reverse=True) + # return [pair[0] for pair in option_count_pairs] def approval_voting(pref_table): diff --git a/docs/technical/preference_relations.md b/docs/technical/preference_relations.md new file mode 100644 index 0000000..9bb916e --- /dev/null +++ b/docs/technical/preference_relations.md @@ -0,0 +1,23 @@ +# How preference relations are defined and represented in the system + +## Introduction + +... + +## Definition + +A preference relation $\tau\in\mathbb{R}_{\geq 0}^m$ is a numpy vector of length $m$, +where $m$ is the number of options and each element $\tau[i]$ represents the normalized preference for option $i$, +with $\sum_{\tau}=1$. + +### Why using sum normalization? + +In computational social choice, **sum normalization** is more common than magnitude normalization. +This is because sum normalization aligns well with the interpretation of preference vectors as distributions +or weighted votes, which are prevalent in social choice scenarios. + +### Why using non-negative values? + +The preference values $\tau[i]$ are non-negative because they represent the strength of preference for each option. +Equvalently, they can be interpreted as the probability of selecting each option +or the (inverted or negative) distance of an option to the agents' ideal solution. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index c1aceb8..3aa4982 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -70,4 +70,5 @@ markdown_extensions: - attr_list - def_list - footnotes - - md_in_html \ No newline at end of file + - md_in_html + - pymdownx.arithmatex \ No newline at end of file diff --git a/tests/test_social_welfare_functions.py b/tests/test_social_welfare_functions.py new file mode 100644 index 0000000..5e47428 --- /dev/null +++ b/tests/test_social_welfare_functions.py @@ -0,0 +1,118 @@ +import numpy as np +from democracy_sim.social_welfare_functions import majority_rule + +simple = np.array([ + [0.5, 0.4, 0.1], + [0.1, 0.5, 0.4], + [0.4, 0.5, 0.1], + [0.1, 0.4, 0.5], + [0.1, 0.4, 0.5], + [0.1, 0.4, 0.5] +]) # => c, b, a ~ 2, 1, 0 + +# Following "paradoxical" example is taken from +# https://pub.dss.in.tum.de/brandt-research/minpara.pdf +# +# 5 4 3 2 +# ------- +# a e d b +# c b c d +# b c b e +# d d e c +# e a a a + +paradoxical = np.array([ + # 5 times a,c,b,d,e --> 0.4, 0.2, 0.3, 0.1, 0. + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + # 4 times e,b,c,d,a + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + # 3 times d,c,b,e,a + [0. , 0.2, 0.3, 0.4, 0.1], + [0. , 0.2, 0.3, 0.4, 0.1], + [0. , 0.2, 0.3, 0.4, 0.1], + # 2 times b,d,e,c,a + [0. , 0.4, 0.1, 0.3, 0.2], + [0. , 0.4, 0.1, 0.3, 0.2] +]) # Plurality => a, e, d, b, c ~ 0, 4, 3, 1, 2 + +majority_simple_cases = [ + (simple, [2, 1, 0]), + (paradoxical, [0, 4, 3, 1, 2]) +] + +def random_pref_profile(num_agents, num_options): + matrix_rand = matrix = np.random.rand(num_agents, num_options) + # Normalize the matrix + matrix_rand = matrix_rand / matrix_rand.sum(axis=1, keepdims=True) + return matrix_rand + +def test_majority_rule(): + # Test predefined cases + for pref_table, expected in majority_simple_cases: + res_ranking = majority_rule(pref_table) + assert list(res_ranking) == expected + winners_from_ties = {} + + +def test_majority_rule_with_ties_all(): + with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) # all equally possible + winners_from_ties = {} + for _ in range(500): + ranking = majority_rule(with_ties_all) + winner = ranking[0] + winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 + winners = list(winners_from_ties.keys()) + winners.sort() + assert winners == [0, 1, 2, 3] + + +def test_majority_with_ties_all_ab(): + with_ties_all_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + + winners_from_ties = {} + for _ in range(100): + ranking = majority_rule(with_ties_all_ab) + winner = ranking[0] + winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 + winners = list(winners_from_ties.keys()) + print(f"Winners from ties (all_ab): {winners_from_ties}") + assert winners == [0, 1, 2, 3] + assert winners_from_ties[0] > winners_from_ties[2] + assert winners_from_ties[1] > winners_from_ties[3] + +def test_majority_with_ties_ab(): + with_ties_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + winners_from_ties = {} + for _ in range(100): + ranking = majority_rule(with_ties_ab) + winner = ranking[0] + winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 + winners = list(winners_from_ties.keys()) + assert winners == [0, 1] + + # Test with random matrix + matrix_rand = random_pref_profile(5, 4) + print(f"Random matrix: {matrix_rand}") + result = majority_rule(matrix_rand) + print(f"Result: {result}") + #assert all(isinstance(x, int) for x in result) \ No newline at end of file From c84807a2ac020a219cdddadb9a0ae9cc572dd731 Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 26 Aug 2024 17:57:43 +0200 Subject: [PATCH 13/38] Completed implementing majority rule including unit tests --- democracy_sim/social_welfare_functions.py | 73 +++---- tests/test_majority_rule.py | 223 ++++++++++++++++++++++ tests/test_social_welfare_functions.py | 118 ------------ 3 files changed, 264 insertions(+), 150 deletions(-) create mode 100644 tests/test_majority_rule.py delete mode 100644 tests/test_social_welfare_functions.py diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index eb63cd2..386a648 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -24,29 +24,57 @@ def complete_ranking(ranking: np.array, num_options: int): np.random.shuffle(non_included_options) return np.concatenate((ranking, non_included_options)) +def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): + """ + This function prepares the preference table for majority rule such that + it handles ties in the voters' preferences. + Because majority rule cannot usually deal with ties. + The tie breaking is randomized to ensure anonymity and neutrality. + :param pref_table: The agent's preferences. + :param noise_factor: Influences the amount of noise to be added + to the preference table to break ties (higher is less noise). + :return: The preference table without any ties. + """ + # Add some random noise to break ties (based on the variances) + variances = np.var(pref_table, axis=1) + # If variances are zero, all values are equal, then select a random option + mask = (variances == 0) + # Split + pref_tab_var_zero = pref_table[mask] + pref_tab_var_non_zero = pref_table[~mask] + n, m = pref_tab_var_non_zero.shape + + # Set all values in the var_zero_part to zero and then add a random 1 + pref_tab_var_zero.fill(0) + for i in range(pref_tab_var_zero.shape[0]): + rand_option = np.random.randint(0, m) + pref_tab_var_zero[i, rand_option] = 1 + # On the non-zero part, add some noise to the values to break ties + non_zero_variances = variances[~mask] + # Generate noise based on the variances + noise_eps = non_zero_variances / noise_factor + noise = np.random.uniform(-noise_eps[:, np.newaxis], + noise_eps[:, np.newaxis], (n, m)) + # noise_eps[:, np.newaxis] reshapes noise_eps from shape (n,) to (n, 1) + pref_tab_var_non_zero += noise -def majority_rule(pref_table, noise_factor=100): + # Put the parts back together + return np.concatenate((pref_tab_var_non_zero, pref_tab_var_zero)) + +def majority_rule(pref_table): """ This function implements the majority rule social welfare function. :param pref_table: The agent's preferences as a NumPy matrix containing the normalized ranking vectors of all agents. - :param noise_factor: Influences the amount of noise to be added - to the preference table to break ties (higher is less noise). :return: The resulting preference ranking (beware: its not a pref. relation) """ n, m = pref_table.shape # n agents, m options - # Add a tiny amount of random noise to break ties - # Beware: Even with noise added, argmax will have a bias for lower indices - # on arrays with too many ties. So this is just a small tiebreaker. - variances = np.var(pref_table, axis=0) + 1e-10 - # Generate noise based on the variances to sensitively break ties - noise_eps = variances / noise_factor - noise = np.random.uniform(-noise_eps, noise_eps, (n, m)) - pref_table += noise + # Break ties if they exist + pref_table = run_tie_breaking_preparation_for_majority(pref_table) # Count how often an option is ranked first (indexes of max values) first_choices = np.argmax(pref_table, axis=1) - # To avoid a bias toward voters with low indices in the counting, we shuffle - np.random.shuffle(first_choices) + # To avoid bias toward voters of low indices in the counting, we shuffle + np.random.shuffle(first_choices) # (crucial when counting shows ties later) first_choice_counts = {} for choice in first_choices: first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 @@ -66,25 +94,6 @@ def majority_rule(pref_table, noise_factor=100): return ranking -#def majority_rule_2(pref_table): - # """ - # This function implements the majority rule social welfare function. - # :param pref_table: The agent's preferences as a NumPy matrix - # one row one agent, column number is color, - # values are the guessed distribution values (not the ranking!) - # :return: The resulting preference ranking - # """ - - # # Count how often each color is guessed as - # first_choices = np.argmax(pref_table, axis=0) - # first_choice_counts = {} - # for choice in first_choices: - # first_choice_counts[choice] = first_choice_counts.get(choice, 0) + 1 - # option_count_pairs = list(first_choice_counts.items()) - # option_count_pairs.sort(key=lambda x: x[1], reverse=True) - # return [pair[0] for pair in option_count_pairs] - - def approval_voting(pref_table): # TODO !!!!!!!!!!!!!!!!! # => How should pref_tables be like / how do they need to be like (options=colors or options=combinations)??? diff --git a/tests/test_majority_rule.py b/tests/test_majority_rule.py new file mode 100644 index 0000000..7cbcba3 --- /dev/null +++ b/tests/test_majority_rule.py @@ -0,0 +1,223 @@ +import numpy as np +import time +from democracy_sim.social_welfare_functions import majority_rule + +# Simple and standard cases + +simple = np.array([ + [0.5, 0.4, 0.1], + [0.1, 0.5, 0.4], + [0.4, 0.5, 0.1], + [0.1, 0.4, 0.5], + [0.1, 0.4, 0.5], + [0.1, 0.4, 0.5] +]) # => c, b, a ~ 2, 1, 0 + +# Following "paradoxical" example is taken from +# https://pub.dss.in.tum.de/brandt-research/minpara.pdf +# +# 5 4 3 2 +# ------- +# a e d b +# c b c d +# b c b e +# d d e c +# e a a a + +paradoxical = np.array([ + # 5 times a,c,b,d,e --> 0.4, 0.2, 0.3, 0.1, 0. + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + [0.4, 0.2, 0.3, 0.1, 0. ], + # 4 times e,b,c,d,a + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + [0. , 0.3, 0.2, 0.1, 0.4], + # 3 times d,c,b,e,a + [0. , 0.2, 0.3, 0.4, 0.1], + [0. , 0.2, 0.3, 0.4, 0.1], + [0. , 0.2, 0.3, 0.4, 0.1], + # 2 times b,d,e,c,a + [0. , 0.4, 0.1, 0.3, 0.2], + [0. , 0.4, 0.1, 0.3, 0.2] +]) # Plurality => a, e, d, b, c ~ 0, 4, 3, 1, 2 + +majority_simple_cases = [ + (simple, [2, 1, 0]), + (paradoxical, [0, 4, 3, 1, 2]) +] + +def test_majority_rule(): + # Test predefined cases + for pref_table, expected in majority_simple_cases: + res_ranking = majority_rule(pref_table) + assert list(res_ranking) == expected + +def majority_rule_with_ties_all(pref_rel, expected_winners, iterations=1000): + """ + Run majority rule with ties multiple times, check winners + and calculate the coefficient of variation (CV) of the winners. + :param pref_rel: Preference relation matrix. + :param expected_winners: An ordered list of expected winners, i.e. [0, 1]. + :param iterations: Number of iterations. + ------- + :return: Coefficient of variation (CV) of the winners. + """ + winners_from_ties = {} + for _ in range(iterations): + ranking = majority_rule(pref_rel) + winner = ranking[0] + winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 + winners = list(winners_from_ties.keys()) + winners.sort() + assert winners == expected_winners + counts = np.array(list(winners_from_ties.values())) + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + return cv + +# Cases with ties - "all equally possible" + +with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_overall_tie = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], +]) + +with_ties_mixed = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.25, 0.25, 0.25, 0.25], + [0.3, 0.2, 0.1, 0.4], +]) + +all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] + +def test_equally_possible(cv_threshold=0.125): + for pref_rel in all_equally_possible: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv < cv_threshold + +# Cases with ties - "not all equally possible" +with_ties_unequal = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_ties_all_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] + +def test_with_ties_unequal(): + for pref_rel in with_ties_unequal: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv > 0.125 + +# Random matrix + +def random_pref_profile(num_agents, num_options): + rand_matrix = np.random.rand(num_agents, num_options) + # Normalize the matrix + matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) + return matrix_rand + +def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): + """ + Run majority rule with ties multiple times, check winners + and calculate the coefficient of variation (CV) of the winners. + :param num_agents: Number of agents. + :param num_options: Number of options. + :param iterations: Number of iterations. + ------- + :return: Dictionary of winner counts {option: count}. + """ + winner_counts = {} + for _ in range(iterations): + # Create random matrix + matrix_rand = random_pref_profile(num_agents, num_options) + ranking = majority_rule(matrix_rand) + winner = ranking[0] + # Count winners + winner_counts[winner] = winner_counts.get(winner, 0) + 1 + return winner_counts + + +def test_with_random_matrix_small(): + """ + Test majority rule on a small random matrix with many iterations. + """ + num_agents = np.random.randint(2, 200) + # Keep num options small to expect all options to win at least once. + num_options = np.random.randint(2, 90) + iterations = 100*num_options + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) + stop_time = time.time() + # Extract winners from winner-counts dictionary and sort them + sorted_winners = list(wc.keys()) + sorted_winners.sort() + assert sorted_winners == list(range(num_options)) + # Extract count values + counts = np.array(list(wc.values())) + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.125 + print(f"\nCV: {cv}") + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." + f"With {num_agents} agents and {num_options} options.") + + +def test_with_random_matrix_large(): + """ + Test majority rule on a large random matrix (many agents, many options). + """ + num_its = 100 + num_agents = np.random.randint(1000, 3000) + num_options = np.random.randint(2000, 3000) + # Run majority rule test with random matrix + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) + stop_time = time.time() + # Len of winners should be approximately equal to the number of iterations + # because with a large number of options, winners should be mostly unique. + winners, counts = list(wc.keys()), list(wc.values()) + assert abs(np.mean(counts) - 1) < 0.1 + assert abs((len(winners) / num_its) - 1) < 0.1 + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.2 + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." + f"With {num_agents} agents and {num_options} options.") diff --git a/tests/test_social_welfare_functions.py b/tests/test_social_welfare_functions.py deleted file mode 100644 index 5e47428..0000000 --- a/tests/test_social_welfare_functions.py +++ /dev/null @@ -1,118 +0,0 @@ -import numpy as np -from democracy_sim.social_welfare_functions import majority_rule - -simple = np.array([ - [0.5, 0.4, 0.1], - [0.1, 0.5, 0.4], - [0.4, 0.5, 0.1], - [0.1, 0.4, 0.5], - [0.1, 0.4, 0.5], - [0.1, 0.4, 0.5] -]) # => c, b, a ~ 2, 1, 0 - -# Following "paradoxical" example is taken from -# https://pub.dss.in.tum.de/brandt-research/minpara.pdf -# -# 5 4 3 2 -# ------- -# a e d b -# c b c d -# b c b e -# d d e c -# e a a a - -paradoxical = np.array([ - # 5 times a,c,b,d,e --> 0.4, 0.2, 0.3, 0.1, 0. - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - # 4 times e,b,c,d,a - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], - # 3 times d,c,b,e,a - [0. , 0.2, 0.3, 0.4, 0.1], - [0. , 0.2, 0.3, 0.4, 0.1], - [0. , 0.2, 0.3, 0.4, 0.1], - # 2 times b,d,e,c,a - [0. , 0.4, 0.1, 0.3, 0.2], - [0. , 0.4, 0.1, 0.3, 0.2] -]) # Plurality => a, e, d, b, c ~ 0, 4, 3, 1, 2 - -majority_simple_cases = [ - (simple, [2, 1, 0]), - (paradoxical, [0, 4, 3, 1, 2]) -] - -def random_pref_profile(num_agents, num_options): - matrix_rand = matrix = np.random.rand(num_agents, num_options) - # Normalize the matrix - matrix_rand = matrix_rand / matrix_rand.sum(axis=1, keepdims=True) - return matrix_rand - -def test_majority_rule(): - # Test predefined cases - for pref_table, expected in majority_simple_cases: - res_ranking = majority_rule(pref_table) - assert list(res_ranking) == expected - winners_from_ties = {} - - -def test_majority_rule_with_ties_all(): - with_ties_all = np.array([ - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25] - ]) # all equally possible - winners_from_ties = {} - for _ in range(500): - ranking = majority_rule(with_ties_all) - winner = ranking[0] - winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 - winners = list(winners_from_ties.keys()) - winners.sort() - assert winners == [0, 1, 2, 3] - - -def test_majority_with_ties_all_ab(): - with_ties_all_ab = np.array([ - [0.3, 0.3, 0.2, 0.2], - [0.25, 0.25, 0.25, 0.25] - ]) # all possible (a or b up first is more likely) - - winners_from_ties = {} - for _ in range(100): - ranking = majority_rule(with_ties_all_ab) - winner = ranking[0] - winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 - winners = list(winners_from_ties.keys()) - print(f"Winners from ties (all_ab): {winners_from_ties}") - assert winners == [0, 1, 2, 3] - assert winners_from_ties[0] > winners_from_ties[2] - assert winners_from_ties[1] > winners_from_ties[3] - -def test_majority_with_ties_ab(): - with_ties_ab = np.array([ - [0.3, 0.3, 0.2, 0.2], - [0.3, 0.3, 0.2, 0.2], - [0.25, 0.25, 0.25, 0.25] - ]) # all possible (a or b up first is more likely) - winners_from_ties = {} - for _ in range(100): - ranking = majority_rule(with_ties_ab) - winner = ranking[0] - winners_from_ties[winner] = winners_from_ties.get(winner, 0) + 1 - winners = list(winners_from_ties.keys()) - assert winners == [0, 1] - - # Test with random matrix - matrix_rand = random_pref_profile(5, 4) - print(f"Random matrix: {matrix_rand}") - result = majority_rule(matrix_rand) - print(f"Result: {result}") - #assert all(isinstance(x, int) for x in result) \ No newline at end of file From b6094aed75ca9f212810b51913050efeea2c4138 Mon Sep 17 00:00:00 2001 From: jurikane Date: Sun, 15 Sep 2024 14:38:04 +0200 Subject: [PATCH 14/38] Continued implementing 'conduct_election'. Split up unit-testing for Agent classes into Area and Voting Agents. Implemented approval_voting rule but unit-testing is to be done and the implementation is done on the assumption that the model will have to work on general preference arrays that are normalized float arrays - might have to change that to not unjustifiable take away voters freedoms for approval voting --- democracy_sim/participation_agent.py | 8 +- democracy_sim/participation_model.py | 3 +- democracy_sim/social_welfare_functions.py | 40 ++-- docs/technical/approval_voting.md | 56 ++++++ tests/test_approval_voting.py | 172 ++++++++++++++++++ ...nt.py => test_participation_area_agent.py} | 90 ++------- tests/test_participation_voting_agent.py | 78 ++++++++ 7 files changed, 358 insertions(+), 89 deletions(-) create mode 100644 docs/technical/approval_voting.md create mode 100644 tests/test_approval_voting.py rename tests/{test_participation_agent.py => test_participation_area_agent.py} (56%) create mode 100644 tests/test_participation_voting_agent.py diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index d72613e..dcf6f00 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -1,7 +1,7 @@ from typing import TYPE_CHECKING, cast import numpy as np -from mesa import Agent, Model +from mesa import Agent from numpy import random if TYPE_CHECKING: # Type hint for IDEs from democracy_sim.participation_model import ParticipationModel @@ -225,7 +225,8 @@ def add_area(self, area): def color_step(self): """ - Determines the cells' color for the next step + Determines the cells' color for the next step. + TODO """ # _neighbor_iter = self.model.grid.iter_neighbors( # (self._row, self._col), True) @@ -242,7 +243,8 @@ def color_step(self): def advance(self): """ - Set the state of the agent to the next state + Set the state of the agent to the next state. + TODO """ # self._color = self._next_color pass diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index e108184..d801337 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -117,10 +117,11 @@ def conduct_election(self, voting_rule, distance_func): # Save the "elected" distribution in self.voted_distribution winning_option = aggregated_prefs[0] self.voted_distribution = self.model.options[winning_option] - # calculate the distance to the real distribution using distance_func + # Calculate the distance to the real distribution using distance_func distance_factor = distance_func(self.voted_distribution, self.color_distribution) # calculate the rewards for the agents + pass # TODO # distribute the rewards # TODO diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 386a648..eb43ef4 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -93,22 +93,38 @@ def majority_rule(pref_table): ranking = complete_ranking(ranking, m) return ranking +def preprocessing_for_approval(pref_table, threshold=None): + """ + This function prepares the preference table for approval voting + by interpreting evey value above the threshold as an approval. + The standard threshold is 1/m (m = number of options). + The reasoning is that if the preferences are normalized, + 1/m ensures the threshold to be proportionate to the number of options. + It also ensures that, on average, half of the options will be approved. + The actual number of approved options, however, + can still vary depending on the specific values in the preference table. + :param pref_table: The agent's preferences. + :param threshold: The threshold for approval. + :return: The preference table with the options approved or not. + """ + if threshold is None: + threshold = 1 / pref_table.shape[1] + return (pref_table >= threshold).astype(int) + def approval_voting(pref_table): - # TODO !!!!!!!!!!!!!!!!! - # => How should pref_tables be like / how do they need to be like (options=colors or options=combinations)??? - # !!!!!!!!!!!!!!! - # ANSWER: - # they have to span the options - not the colors """ This function implements the approval voting social welfare function. :param pref_table: The agent's preferences as a NumPy matrix containing the normalized ranking vectors of all agents. - :return: The resulting preference ranking + :return: The resulting preference ranking (beware: not a pref. relation). """ - # # Count how often each option is approved - # approval_counts = np.sum(pref_table, axis=0) - # option_count_pairs = list(enumerate(approval_counts)) - # option_count_pairs.sort(key=lambda x: x[1], reverse=True) - # return [pair[0] for pair in option_count_pairs] - pass + pref_table = preprocessing_for_approval(pref_table) + # Count how often each option is approved + approval_counts = np.sum(pref_table, axis=0) + # Add noise to break ties TODO check for bias + noise = np.random.uniform(-0.3, 0.3, len(approval_counts)) + #option_count_pairs = list(enumerate(approval_counts + noise)) + #option_count_pairs.sort(key=lambda x: x[1], reverse=True) + #return [pair[0] for pair in option_count_pairs] + return np.argsort(-(approval_counts + noise)) diff --git a/docs/technical/approval_voting.md b/docs/technical/approval_voting.md new file mode 100644 index 0000000..9b5d0ef --- /dev/null +++ b/docs/technical/approval_voting.md @@ -0,0 +1,56 @@ +# Problem of threshold in approval voting + +If we choose an architecture in which voters always provide a sum-normalized preference vector +for all voting rules, then approval voting has to have a threshold value to determine which options are approved. +This may take autonomy away from the voters, but it ensures that every voting rule is based on the same conditions +increasing comparability. It may also help to add more rules later on. + +### Idea + +Setting a fixed threshold of $ \frac{1}{m} $ for approval voting where m is the number of options. + +### Definitions and Setup + +- **Sum-normalized vector**: A preference vector $ \mathbf{p} = (p_1, p_2, \ldots, p_m) $ where each entry $ p_i $ represents the preference score for option $ i $, with the constraint $ \sum_{i=1}^m p_i = 1 $. +- **Threshold**: A fixed threshold of $ \frac{1}{m} $ is used to determine approval. If $ p_i \geq \frac{1}{m} $, the option $ i $ is considered "approved." + +### Average Number of Approved Values + +To find the average number of values approved, let's consider how many entries $ p_i $ would meet the threshold $ p_i \geq \frac{1}{m} $. + +1. **Expectation Calculation**: + - The expected number of approvals can be found by looking at the expected value of each $ p_i $ being greater than or equal to $ \frac{1}{m} $. + - For a sum-normalized vector, the average value of any $ p_i $ is $ \frac{1}{m} $. This is because the sum of all entries equals 1, and there are $ m $ entries. + +2. **Probability of Approval**: + - If the vector entries are randomly distributed, the probability of any given $ p_i $ being above the threshold is approximately 50%. This stems from the fact that the mean is $ \frac{1}{m} $, and assuming a uniform or symmetric distribution around this mean, half the entries would be above, and half below, in expectation. + +3. **Expected Number of Approvals**: + - Since each entry has a 50% chance of being above $ \frac{1}{m} $ in a uniform random distribution, the expected number of approved values is $ \frac{m}{2} $. + +Therefore, **on average, $ \frac{m}{2} $ values will be approved**. + +### Range of the Number of Approved Values + +The number of approved values can vary depending on how the preference scores are distributed. Here's the possible range: + +1. **Minimum Approved Values**: + - If all entries are below $ \frac{1}{m} $, then none would be approved. However, given the constraint that the vector sums to 1, at least one entry must be $ \frac{1}{m} $ or higher. Hence, the minimum number of approved values is **1**. + +2. **Maximum Approved Values**: + - The maximum occurs when as many values as possible are at least $ \frac{1}{m} $. In the extreme case, you could have all $ m $ entries equal $ \frac{1}{m} $ exactly, making them all approved. Thus, the maximum number of approved values is **m**. + +### Conclusion + +- **Average number of approved values**: $ \frac{m}{2} $. +- **Range of approved values**: From 1 (minimum) to $ m $ (maximum). + +Hence, in theory, voters can still approve between 1 and $ m $ options, +giving them the whole range of flexibility that approval voting offers. + +### Possibility for improvement + +We should consider implementing rule-specific voting into the agent's decision-making process +instead of leaving all rule-specifics to the aggregation process. +This would allow for a more realistic comparison of the rules. +For some rules, it would also give opportunities to significantly speed up the simulation process. \ No newline at end of file diff --git a/tests/test_approval_voting.py b/tests/test_approval_voting.py new file mode 100644 index 0000000..b260681 --- /dev/null +++ b/tests/test_approval_voting.py @@ -0,0 +1,172 @@ +import numpy as np +import time +from democracy_sim.social_welfare_functions import approval_voting +from tests.test_majority_rule import simple, paradoxical + +# TODO adapt to approval voting (state = merely copied from majority_rule.py) + +# Simple and standard cases +approval_simple_cases = [ + (simple, [2, 1, 0]), # TODO: Whats the expected result? + (paradoxical, [0, 4, 3, 1, 2]) # TODO '' '' +] + +# Following "paradoxical" example is taken from +# https://pub.dss.in.tum.de/brandt-research/minpara.pdf +# +# 5 4 3 2 +# ------- +# a e d b +# c b c d +# b c b e +# d d e c +# e a a a + +def test_approval_voting(): + # Test predefined cases + for pref_table, expected in approval_simple_cases: + res_ranking = approval_voting(pref_table) + assert list(res_ranking) == expected + +# Cases with ties - "all equally possible" + +with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_overall_tie = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], +]) + +with_ties_mixed = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.25, 0.25, 0.25, 0.25], + [0.3, 0.2, 0.1, 0.4], +]) + +all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] + +def test_equally_possible(cv_threshold=0.125): + for pref_rel in all_equally_possible: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv < cv_threshold + +# Cases with ties - "not all equally possible" +with_ties_unequal = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_ties_all_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_ab = np.array([ + [0.3, 0.3, 0.2, 0.2], + [0.3, 0.3, 0.2, 0.2], + [0.25, 0.25, 0.25, 0.25] + ]) # all possible (a or b up first is more likely) + +with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] + +def test_with_ties_unequal(): + for pref_rel in with_ties_unequal: + cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) + print(f"CV: {cv}") + assert cv > 0.125 + +# Random matrix + +def random_pref_profile(num_agents, num_options): + rand_matrix = np.random.rand(num_agents, num_options) + # Normalize the matrix + matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) + return matrix_rand + +def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): + """ + Run majority rule with ties multiple times, check winners + and calculate the coefficient of variation (CV) of the winners. + :param num_agents: Number of agents. + :param num_options: Number of options. + :param iterations: Number of iterations. + ------- + :return: Dictionary of winner counts {option: count}. + """ + winner_counts = {} + for _ in range(iterations): + # Create random matrix + matrix_rand = random_pref_profile(num_agents, num_options) + ranking = majority_rule(matrix_rand) + winner = ranking[0] + # Count winners + winner_counts[winner] = winner_counts.get(winner, 0) + 1 + return winner_counts + + +def test_with_random_matrix_small(): + """ + Test majority rule on a small random matrix with many iterations. + """ + num_agents = np.random.randint(2, 200) + # Keep num options small to expect all options to win at least once. + num_options = np.random.randint(2, 90) + iterations = 100*num_options + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) + stop_time = time.time() + # Extract winners from winner-counts dictionary and sort them + sorted_winners = list(wc.keys()) + sorted_winners.sort() + assert sorted_winners == list(range(num_options)) + # Extract count values + counts = np.array(list(wc.values())) + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.125 + print(f"\nCV: {cv}") + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." + f"With {num_agents} agents and {num_options} options.") + + +def test_with_random_matrix_large(): + """ + Test majority rule on a large random matrix (many agents, many options). + """ + num_its = 100 + num_agents = np.random.randint(1000, 3000) + num_options = np.random.randint(2000, 3000) + # Run majority rule test with random matrix + start_time = time.time() + wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) + stop_time = time.time() + # Len of winners should be approximately equal to the number of iterations + # because with a large number of options, winners should be mostly unique. + winners, counts = list(wc.keys()), list(wc.values()) + assert abs(np.mean(counts) - 1) < 0.1 + assert abs((len(winners) / num_its) - 1) < 0.1 + # Calculate the coefficient of variation (CV) + cv = np.std(counts) / np.mean(counts) + assert cv < 0.2 + # Print the time taken + elapsed_time = stop_time - start_time + print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." + f"With {num_agents} agents and {num_options} options.") diff --git a/tests/test_participation_agent.py b/tests/test_participation_area_agent.py similarity index 56% rename from tests/test_participation_agent.py rename to tests/test_participation_area_agent.py index 613b38d..e72a8e9 100644 --- a/tests/test_participation_agent.py +++ b/tests/test_participation_area_agent.py @@ -1,64 +1,16 @@ -from .test_participation_model import * -from democracy_sim.participation_model import Area -from democracy_sim.participation_agent import VoteAgent, combine_and_normalize -import numpy as np +import unittest import random +import numpy as np +from democracy_sim.participation_model import Area +from democracy_sim.participation_agent import VoteAgent +from .test_participation_model import TestParticipationModel, num_agents - -class TestVotingAgent(unittest.TestCase): +class TestArea(unittest.TestCase): def setUp(self): test_model = TestParticipationModel() test_model.setUp() self.model = test_model.model - personality = np.zeros(self.model.num_colors) - personality[0] = 0.3 - personality[1] = 0.7 - self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), - personality=personality, assets=25) - self.additional_test_area = Area(self.model.num_areas + 1, - model=self.model, height=5, - width=5, size_variance=0) - self.additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) - - def test_combine_and_normalize_rank_arrays(self): - print("Test function combine_and_normalize_estimates") - a = np.array([0.0, 0.2, 0.7, 0.5, 0.1, 0.8, 1.0]) - a_rank = np.argsort(a) - print(f"Ranking of a: {a_rank}") - b = np.array([1.0, 0.2, 0.7, 0.5, 0.1, 0.8, 0.0]) - b_rank = np.argsort(b) - print(f"Ranking of b: {b_rank}") - factors = [0.0, 0.2, 0.5, 1.0] - for f in factors: - result = combine_and_normalize(a, b, f) - result_rank = np.argsort(result) - print(f"Ranking of r with factor {f}: {result_rank}") - - def test_combine_and_normalize(self): - a = self.agent - test_area = self.additional_test_area - assert a in test_area.agents # Test if agent is present - # Give the agent some cells to know of - k = random.sample(range(2, len(test_area.cells)), 1)[0] - print(f"Sample size: {k}") - a.known_cells = random.sample(test_area.cells, k) - est_dist = a.estimate_real_distribution(test_area) - own_prefs = a.personality - # own_prefs = np.array([0.25, 0.5, 0.0, 0.0]) # Should also work.. - print(f"Agent {a.unique_id}s' personality: {own_prefs}" - f" and estimated color distribution: {est_dist}") - for a_factor in [0.0, 0.2, 0.5, 1.0]: - comb = combine_and_normalize(own_prefs, est_dist, a_factor) - print(f"Assumed opt. distribution with factor {a_factor}: \n{comb}") - # Validation - if a_factor == 0.0: - self.assertEqual(list(comb), list(est_dist)) - elif a_factor == 1.0: - if sum(own_prefs) != 1.0: - own_prefs = own_prefs / sum(own_prefs) - self.assertEqual(list(comb), list(own_prefs)) - self.assertTrue(np.isclose(sum(comb), 1.0, atol=1e-8)) def test_update_color_distribution(self): rand_area = random.sample(self.model.area_scheduler.agents, 1)[0] @@ -95,10 +47,19 @@ def test_filter_cells(self): def test_estimate_real_distribution(self): # Get any existing area existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] - test_area = self.additional_test_area + # Additional area and agent + personality = np.zeros(self.model.num_colors) + personality[0] = 0.3 + personality[1] = 0.7 + a = VoteAgent(num_agents + 1, self.model, pos=(0, 0), + personality=personality, assets=25) + additional_test_area = Area(self.model.num_areas + 1, + model=self.model, height=5, + width=5, size_variance=0) + additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) + test_area = additional_test_area print(f"Test-Area: id={test_area.unique_id}, width={test_area._width}," f" height={test_area._height}, idx={test_area.idx_field}") - a = self.agent assert a in test_area.agents # Test if agent is present print(f"Agent {a.unique_id} is in area {test_area.unique_id}") print(f"Areas color-cells: {[c.unique_id for c in test_area.cells]}") @@ -127,20 +88,3 @@ def test_estimate_real_distribution(self): expected_distribution = [i / s for i in counts] print(f"Expected distribution: {expected_distribution}") self.assertEqual(list(est_distribution), expected_distribution) - - def test_compute_assumed_opt_dist(self): - a = self.agent - test_area = self.additional_test_area - # Give the agent some cells to know of - max_size = len(test_area.cells) - k = random.sample(range(2, max_size), 1)[0] - a.known_cells = random.sample(test_area.cells, k=k) - est_dist = a.estimate_real_distribution(test_area) - own_prefs = a.personality - print(f"The agents\npersonality: {own_prefs} \nest_dist : {est_dist}") - r = a.compute_assumed_opt_dist(test_area) - print(f"Assumed optimal distribution: {r}") - self.assertTrue(np.isclose(sum(r), 1.0, atol=1e-8)) - - - diff --git a/tests/test_participation_voting_agent.py b/tests/test_participation_voting_agent.py new file mode 100644 index 0000000..2ab326d --- /dev/null +++ b/tests/test_participation_voting_agent.py @@ -0,0 +1,78 @@ +from .test_participation_model import * +from democracy_sim.participation_model import Area +from democracy_sim.participation_agent import VoteAgent, combine_and_normalize +import numpy as np +import random + + +class TestVotingAgent(unittest.TestCase): + + def setUp(self): + test_model = TestParticipationModel() + test_model.setUp() + self.model = test_model.model + personality = np.zeros(self.model.num_colors) + personality[0] = 0.3 + personality[1] = 0.7 + self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), + personality=personality, assets=25) + self.additional_test_area = Area(self.model.num_areas + 1, + model=self.model, height=5, + width=5, size_variance=0) + self.additional_test_area.idx_field = (0, 0) # Place the area at (0, 0) + + def test_combine_and_normalize_rank_arrays(self): + print("Test function combine_and_normalize_estimates") + a = np.array([0.0, 0.2, 0.7, 0.5, 0.1, 0.8, 1.0]) + a_rank = np.argsort(a) + print(f"Ranking of a: {a_rank}") + b = np.array([1.0, 0.2, 0.7, 0.5, 0.1, 0.8, 0.0]) + b_rank = np.argsort(b) + print(f"Ranking of b: {b_rank}") + factors = [0.0, 0.2, 0.5, 1.0] + for f in factors: + result = combine_and_normalize(a, b, f) + result_rank = np.argsort(result) + print(f"Ranking of r with factor {f}: {result_rank}") + + def test_combine_and_normalize(self): + a = self.agent + test_area = self.additional_test_area + assert a in test_area.agents # Test if agent is present + # Give the agent some cells to know of + k = random.sample(range(2, len(test_area.cells)), 1)[0] + print(f"Sample size: {k}") + a.known_cells = random.sample(test_area.cells, k) + est_dist = a.estimate_real_distribution(test_area) + own_prefs = a.personality + # own_prefs = np.array([0.25, 0.5, 0.0, 0.0]) # Should also work.. + print(f"Agent {a.unique_id}s' personality: {own_prefs}" + f" and estimated color distribution: {est_dist}") + for a_factor in [0.0, 0.2, 0.5, 1.0]: + comb = combine_and_normalize(own_prefs, est_dist, a_factor) + print(f"Assumed opt. distribution with factor {a_factor}: \n{comb}") + # Validation + if a_factor == 0.0: + self.assertEqual(list(comb), list(est_dist)) + elif a_factor == 1.0: + if sum(own_prefs) != 1.0: + own_prefs = own_prefs / sum(own_prefs) + self.assertEqual(list(comb), list(own_prefs)) + self.assertTrue(np.isclose(sum(comb), 1.0, atol=1e-8)) + + def test_compute_assumed_opt_dist(self): + a = self.agent + test_area = self.additional_test_area + # Give the agent some cells to know of + max_size = len(test_area.cells) + k = random.sample(range(2, max_size), 1)[0] + a.known_cells = random.sample(test_area.cells, k=k) + est_dist = a.estimate_real_distribution(test_area) + own_prefs = a.personality + print(f"The agents\npersonality: {own_prefs} \nest_dist : {est_dist}") + r = a.compute_assumed_opt_dist(test_area) + print(f"Assumed optimal distribution: {r}") + self.assertTrue(np.isclose(sum(r), 1.0, atol=1e-8)) + + + From 86968a9083ce7dfe781eac685ba5086b51b551be Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 23 Sep 2024 17:38:52 +0200 Subject: [PATCH 15/38] conduct_election is implemented - needs to be tested thoroughly, started to implement viewing more statistics but is not working yet --- democracy_sim/distance_functions.py | 74 ++++++++++++++++- democracy_sim/model_setup.py | 41 +++++++--- democracy_sim/participation_agent.py | 24 ++++-- democracy_sim/participation_model.py | 98 +++++++++++++++++------ democracy_sim/run.py | 7 +- democracy_sim/social_welfare_functions.py | 5 ++ tests/test_distance_functions.py | 15 ++-- tests/test_participation_area_agent.py | 11 +++ tests/test_participation_model.py | 16 ++-- 9 files changed, 230 insertions(+), 61 deletions(-) diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py index 333a92c..b71d42b 100644 --- a/democracy_sim/distance_functions.py +++ b/democracy_sim/distance_functions.py @@ -1,8 +1,13 @@ +from math import comb + import numpy as np -def kendall_tau(rank_arr_1, rank_arr_2, search_pairs, color_vec): +def kendall_tau_on_ranks(rank_arr_1, rank_arr_2, search_pairs, color_vec): """ + DON'T USE + (don't use for orderings!) + This function calculates the kendal tau distance between two rank vektors. (The Kendall tau rank distance is a metric that counts the number of pairwise disagreements between two ranking lists. @@ -38,13 +43,13 @@ def kendall_tau(rank_arr_1, rank_arr_2, search_pairs, color_vec): return kendall_distance -def kendall_tau_on_orderings(ordering_1, ordering_2, search_pais): +def unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs): """ This function calculates the kendal tau distance on two orderings. An ordering holds the option names in the order of their rank (rank=index). :param ordering_1: First (NumPy) array containing ranked options :param ordering_2: The second ordering array - :param search_pais: The pairs of indices (for efficiency) + :param search_pairs: The pairs of indices (for efficiency) :return: The kendall tau distance """ # Rename the elements to reduce the problem to counting inversions @@ -52,14 +57,48 @@ def kendall_tau_on_orderings(ordering_1, ordering_2, search_pais): renamed_arr_2 = np.array([mapping[option] for option in ordering_2]) # Count inversions using precomputed pairs kendall_distance = 0 - for i, j in search_pais: + for i, j in search_pairs: if renamed_arr_2[i] > renamed_arr_2[j]: kendall_distance += 1 return kendall_distance +def kendall_tau(ordering_1, ordering_2, model): + """ + This calculates the normalized Kendall tau distance of two orderings. + The Kendall tau rank distance is a metric that counts the number + of pairwise disagreements between two ranking lists. + The larger the distance, the more dissimilar the two lists are. + Kendall tau distance is also called bubble-sort distance. + An ordering holds the option names in the order of their rank (rank=index). + :param ordering_1: First (NumPy) array containing ranked options + :param ordering_2: The second ordering array + :param model: Containing the pairs of indices (for efficiency) + :return: The kendall tau distance + """ + # TODO: remove these tests (comment out) on actual simulations to speed up + n = ordering_1.size + if n > 0: + expected_arr = np.arange(n) + assert (np.array_equal(np.sort(ordering_1), expected_arr) + and np.array_equal(np.sort(ordering_2), expected_arr)) , \ + f"Error: Sequences {ordering_1}, {ordering_2} aren't comparable." + + # Get the unnormalized Kendall tau distance + search_pairs = model.search_pairs + dist = unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs) + # Maximum possible Kendall tau distance + max_distance = comb(n, 2) # This is n choose 2, or n(n-1)/2 + # Normalize the distance + normalized_distance = dist / max_distance + + return normalized_distance + + def spearman_distance(rank_arr_1, rank_arr_2): """ + Beware: don't use for orderings! + This function calculates the Spearman distance between two rank vektors. Spearman's foot rule is a measure of the distance between ranked lists. It is given as the sum of the absolute differences between the ranks @@ -78,3 +117,30 @@ def spearman_distance(rank_arr_1, rank_arr_2): and rank_arr_1.max() == rank_arr_2.max()), \ f"Error: Sequences {rank_arr_1}, {rank_arr_2} aren't comparable." return np.sum(np.abs(rank_arr_1 - rank_arr_2)) + + +def spearman(ordering_1, ordering_2, model): + """ + This calculates the normalized Spearman distance between two orderings. + Spearman's foot rule is a measure of the distance between ranked lists. + It is given as the sum of the absolute differences between the ranks + of the two orderings (values from 0 to n-1 in any order). + :param ordering_1: First (NumPy) array containing the ranks of each option + :param ordering_2: The second rank array + :param model: The mesa model + :return: The Spearman distance + """ + # TODO: remove these tests (comment out) on actual simulations to speed up + n = ordering_1.size + if n > 0: + expected_arr = np.arange(n) + assert (np.array_equal(np.sort(ordering_1), expected_arr) + and np.array_equal(np.sort(ordering_2), expected_arr)) , \ + f"Error: Sequences {ordering_1}, {ordering_2} aren't comparable." + distance = np.sum(np.abs(ordering_1 - ordering_2)) + # Normalize + if n % 2 == 0: # Even number of elements + max_dist = n**2 / 2 + else: # Odd number of elements + max_dist = n * (n - 1) / 2 + return distance / max_dist diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index ac95228..c918c5c 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -6,11 +6,11 @@ from math import comb import mesa from mesa.visualization.modules import ChartModule -from democracy_sim.participation_model import ParticipationModel +from democracy_sim.participation_model import (ParticipationModel, + distance_functions, + social_welfare_functions) from democracy_sim.participation_agent import ColorCell, VoteAgent import matplotlib.pyplot as plt -from democracy_sim.distance_functions import * -from democracy_sim.social_welfare_functions import * # Parameters @@ -18,11 +18,10 @@ # Elections # ############# election_costs = 5 +max_reward = 50 # Voting rules (see social_welfare_functions.py) -voting_rules = [majority_rule, approval_voting] rule_idx = 0 # Distance functions (see distance_functions.py) -distance_functions = [spearman_distance, kendall_tau] distance_idx = 0 #################### # Model parameters # @@ -139,9 +138,10 @@ def draw_color_dist_bars(color_distributions): plt.show() -a_chart = mesa.visualization.ChartModule([{"Label": "Number of agents", - "Color": "Black"}], - data_collector_name='datacollector') +voter_turnout_example = mesa.visualization.ChartModule( + [{"Label": "Voter turnout in % (first area)", + "Color": "Black"}], + data_collector_name='datacollector') wealth_chart = mesa.visualization.modules.ChartModule( @@ -149,17 +149,29 @@ def draw_color_dist_bars(color_distributions): data_collector_name='datacollector' ) +color_distribution_chart = mesa.visualization.modules.ChartModule( + [{"Label": str(i), "Color": _COLORS[i]} for i in range(num_colors)], + data_collector_name='datacollector' +) + +# Agent charts + +voter_turnout_chart = mesa.visualization.ChartModule( + [{"Label": "Voter Turnout", "Color": "Black"}], + data_collector_name='datacollector' +) + model_params = { "height": grid_rows, "width": grid_cols, "draw_borders": mesa.visualization.Checkbox( name="Draw border cells", value=draw_borders ), - "voting_rule": mesa.visualization.Slider( - name=f"Rule index {[r.__name__ for r in voting_rules]}", value=rule_idx, - min_value=0, max_value=len(voting_rules)-1, + "rule_idx": mesa.visualization.Slider( + name=f"Rule index {[r.__name__ for r in social_welfare_functions]}", + value=rule_idx, min_value=0, max_value=len(social_welfare_functions)-1, ), - "distance_func": mesa.visualization.Slider( + "distance_idx": mesa.visualization.Slider( name=f"Rule index {[f.__name__ for f in distance_functions]}", value=distance_idx, min_value=0, max_value=len(distance_functions)-1, ), @@ -167,6 +179,11 @@ def draw_color_dist_bars(color_distributions): name="Election costs", value=election_costs, min_value=0, max_value=100, step=1, description="The costs for participating in an election" ), + "max_reward": mesa.visualization.Slider( + name="Maximal reward", value=max_reward, min_value=0, + max_value=election_costs*100, + step=1, description="The costs for participating in an election" + ), "num_agents": mesa.visualization.Slider( name="# Agents", value=num_agents, min_value=10, max_value=99999, step=10 diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index dcf6f00..4a1e787 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -59,6 +59,10 @@ def __init__(self, unique_id, model, pos, personality, assets=1): # Place the agent on the grid model.grid.place_agent(self, pos) + def __str__(self): + return (f"Agent(id={self.unique_id}, pos={self.pos}, " + f"personality={self.personality}, assets={self.assets})") + @property def col(self): """Return the col location of this cell.""" @@ -89,8 +93,8 @@ def ask_for_participation(self, area): :param area: The area in which the election takes place. :return: True if the agent decides to participate, False otherwise """ - print("Agent", self.unique_id, "decides whether to participate", - "in election of area", area.unique_id) + #print("Agent", self.unique_id, "decides whether to participate", + # "in election of area", area.unique_id) # TODO Implement this (is to be decided upon a learned decision tree) return random.choice([True, False]) @@ -101,7 +105,7 @@ def decide_altruism_factor(self, area): # TODO Implement this (is to be decided upon a learned decision tree) # This part is important - also for monitoring - save/plot a_factors a_factor = random.uniform(0.0, 1.0) - print(f"Agent {self.unique_id} has altruism factor: {a_factor}") + #print(f"Agent {self.unique_id} has altruism factor: {a_factor}") return a_factor def compute_assumed_opt_dist(self, area): @@ -125,19 +129,27 @@ def vote(self, area): """ The agent votes in the election of a given area, i.e., she returns a preference ranking vector over all options. + (Options are indexes, values are preference values defining the order). The available options are set in the model. :param area: The area in which the election takes place. """ # TODO Implement this (is to be decided upon a learned decision tree) # Compute the color distribution that is assumed to be the best choice. - est_best_dist = self.compute_assumed_opt_dist(area) + # TODO est_best_dist = self.compute_assumed_opt_dist(area) # Make sure that r= is normalized! # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!) ############## if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) - r = self.model.options[random.choice(self.model.options.shape[0])] - print("Agent", self.unique_id, "voted:", r) + # For TESTING we just shuffle the option vector (ints) then normalize + # and interpret the result as a preference vector (values=prefs) + # (makes no sense, but it'll work for testing) + r = np.arange(self.model.options.shape[0]) + # Shuffle the array in place + np.random.shuffle(r) + r = np.array(r, dtype=float) + r /= r.sum() + #print("Agent", self.unique_id, "voted:", r) return r # def step(self): diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index d801337..96f55bf 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -6,6 +6,13 @@ from democracy_sim.participation_agent import VoteAgent, ColorCell import numpy as np from itertools import permutations, product, combinations +from democracy_sim.social_welfare_functions import majority_rule, approval_voting +from democracy_sim.distance_functions import spearman, kendall_tau + +# Voting rules to be accessible by index +social_welfare_functions = [majority_rule, approval_voting] +# Distance functions +distance_functions = [spearman, kendall_tau] class Area(Agent): @@ -37,8 +44,14 @@ def __init__(self, unique_id, model, height, width, size_variance): self.agents = [] self.cells = [] self._idx_field = None # An indexing position of the area in the grid - self.color_distribution = [0] * model.num_colors # Initialize to 0 - self.voted_distribution = [0] * model.num_colors + self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 + self.voted_distribution = np.zeros(model.num_colors) + self.voter_turnout = 0 # In percent + + def __str__(self): + return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " + f"num_agents={len(self.agents)}, num_cells={len(self.cells)}, " + f"color_distribution={self.color_distribution})") @property def idx_field(self): @@ -94,7 +107,18 @@ def add_agent(self, agent): def add_cell(self, cell): self.cells.append(cell) - def conduct_election(self, voting_rule, distance_func): + def curr_norm_dist(self): + """ + This method calculates the current distance of the area's real color + distribution (as ordering) + to the latest voted distribution ordering. + It uses the models distance function. + """ + real_color_ord = np.argsort(self.color_distribution) + voted_ord = self.voted_distribution + return self.model.distance_func(real_color_ord, voted_ord, self.model) + + def conduct_election(self): """ This method holds the primary logic of the simulation by simulating the election in the area as well as handling the payments and rewards. @@ -105,26 +129,37 @@ def conduct_election(self, voting_rule, distance_func): preference_profile = [] for agent in self.agents: if agent.ask_for_participation(area=self): + # TODO: if agent cant afford she cant participate participating_agents.append(agent) # collect the participation fee from the agents agent.assets = agent.assets - self.model.election_costs # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) preference_profile = np.array(preference_profile) - # Aggregate the prefs using the voting rule + # Aggregate the prefs using the v-rule => returns an option ordering # TODO: How to deal with ties (Have to fulfill neutrality!!)?? - aggregated_prefs = voting_rule(preference_profile) + aggregated = self.model.voting_rule(preference_profile) # Save the "elected" distribution in self.voted_distribution - winning_option = aggregated_prefs[0] + winning_option = aggregated[0] self.voted_distribution = self.model.options[winning_option] # Calculate the distance to the real distribution using distance_func - distance_factor = distance_func(self.voted_distribution, - self.color_distribution) - # calculate the rewards for the agents - pass - # TODO - # distribute the rewards - # TODO + normalized_dist = self.curr_norm_dist() + # Calculate the rewards per agent + reward_pa = (1 - normalized_dist) * self.model.max_reward + # Distribute the two types of rewards + for agent in self.agents: + # Personality-based reward + # TODO: Calculate value p\in(0,1) based on how well the consensus fits the personality of the agent (should better be fast) + p = random.uniform(0, 1) + # + Common reward (reward_pa) for all agents + agent.assets = agent.assets + p * reward_pa + reward_pa + # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process + # Statistics + self.voter_turnout = int((len(participating_agents) / + len(self.agents)) * 100) # In percent + + + def update_color_distribution(self): """ @@ -152,7 +187,16 @@ def filter_cells(self, cell_list): def step(self) -> None: self.update_color_distribution() - self.conduct_election(self.model.voting_rule, self.model.distance_func) + self.conduct_election() + self.model.datacollector.add_table_row( + "AreaData", + { + "Step": self.model.schedule.time, + "AreaID": self.unique_id, + "ColorDistribution": self.color_distribution.tolist(), + "VoterTurnout": self.voter_turnout + } + ) def compute_collective_assets(model): @@ -160,13 +204,13 @@ def compute_collective_assets(model): return sum_assets -def get_num_agents(model): - return len(model.voting_agents) +def get_voter_turnout(model): + return model.area_scheduler.agents[0].voter_turnout # Assuming one area def get_area_color_distributions(model): - return {area.unique_id: area.color_distribution - for area in model.area_scheduler.agents} + data = {f"Share of color {i}": model.area_scheduler.agents[0].color_distribution[i] for i in range(model.num_colors)} + return data def color_by_dst(color_distribution) -> int: @@ -236,7 +280,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, num_personality_colors, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, - voting_rule, distance_func, election_costs): + rule_idx, distance_idx, election_costs, max_reward): super().__init__() self.height = height self.width = width @@ -267,8 +311,9 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.color_dst = self.create_color_distribution(heterogeneity) # Elections self.election_costs = election_costs - self.voting_rule = voting_rule - self.distance_func = distance_func + self.max_reward = max_reward + self.voting_rule = social_welfare_functions[rule_idx] + self.distance_func = distance_functions[distance_idx] self.options = create_all_options(num_colors) # Create search pairs once for faster iterations when comparing rankings self.search_pairs = combinations(range(0, self.options.size), 2) # TODO check if correct! @@ -290,10 +335,17 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.datacollector = mesa.DataCollector( model_reporters={ "Collective assets": compute_collective_assets, - "Number of agents": get_num_agents, + "Voter turnout (first area)": get_voter_turnout, "Area Color Distributions": get_area_color_distributions, }, - agent_reporters={"Wealth": lambda ag: getattr(ag, "assets", None)}, + agent_reporters={ + "Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + "Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, + }, + tables={ + "AreaData": ["Step", "AreaID", "ColorDistribution", + "VoterTurnout"] + } ) # Adjust the color pattern to make it less random (see color patches) self.adjust_color_pattern(color_patches_steps, patch_power) diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 6b2bfaf..6e44358 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -1,11 +1,14 @@ # import webbrowser import mesa from democracy_sim.participation_model import ParticipationModel -from democracy_sim.model_setup import model_params, canvas_element, a_chart, wealth_chart +from democracy_sim.model_setup import (model_params, canvas_element, + voter_turnout_example, wealth_chart, + color_distribution_chart) server = mesa.visualization.ModularServer( model_cls=ParticipationModel, - visualization_elements=[canvas_element, wealth_chart, a_chart], + visualization_elements=[canvas_element, wealth_chart, voter_turnout_example, + color_distribution_chart], name="DemocracySim", model_params=model_params, ) diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index eb43ef4..d325922 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -64,6 +64,8 @@ def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): def majority_rule(pref_table): """ This function implements the majority rule social welfare function. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). :param pref_table: The agent's preferences as a NumPy matrix containing the normalized ranking vectors of all agents. :return: The resulting preference ranking (beware: its not a pref. relation) @@ -115,6 +117,8 @@ def preprocessing_for_approval(pref_table, threshold=None): def approval_voting(pref_table): """ This function implements the approval voting social welfare function. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). :param pref_table: The agent's preferences as a NumPy matrix containing the normalized ranking vectors of all agents. :return: The resulting preference ranking (beware: not a pref. relation). @@ -128,3 +132,4 @@ def approval_voting(pref_table): #option_count_pairs.sort(key=lambda x: x[1], reverse=True) #return [pair[0] for pair in option_count_pairs] return np.argsort(-(approval_counts + noise)) + diff --git a/tests/test_distance_functions.py b/tests/test_distance_functions.py index 52ba71f..cff76c6 100644 --- a/tests/test_distance_functions.py +++ b/tests/test_distance_functions.py @@ -6,9 +6,11 @@ class TestKendallTauDistance(unittest.TestCase): - def test_kendall_tau(self): + #TODO test normalized version - print("TEST kendall_tau function") + def test_kendall_tau_on_ranks(self): + + print("TEST kendall_tau_on_ranks function") # Test cases kendall tau (rank-vektors) sequences = [ @@ -80,13 +82,14 @@ def test_kendall_tau(self): item_vec = np.arange(n) # assert set(np.unique(seq1)) == set(np.unique(seq2)), \ # "Test failed: sequences must have the same elements" - d = kendall_tau(np.array(seq1), np.array(seq2), pairs, item_vec) + d = kendall_tau_on_ranks(np.array(seq1), np.array(seq2), + pairs, item_vec) print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") assert d == expected, f"Test failed for input {seq1}, {seq2}" def test_kendall_tau_on_orderings(self): - print("\nTEST kendall_tau_on_orderings function\n") + print("\nTEST kendall_tau_on_orderings (not normalized) function\n") # Test cases kendall tau (on orderings) ordering_seqs = [ @@ -119,13 +122,15 @@ def test_kendall_tau_on_orderings(self): "Test failed: sequences must have the same length" pairs = combinations(range(0, n), 2) # Test the ordering version - d = kendall_tau_on_orderings(np.array(seq1), np.array(seq2), pairs) + d = unnormalized_kendall_tau(np.array(seq1), np.array(seq2), pairs) print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") assert d == expected, f"Test failed for input {seq1}, {seq2}" class TestSpearmanDistance(unittest.TestCase): + #TODO test normalized version + def test_spearman_distance(self): print("\nTEST spearman_distance function\n") diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py index e72a8e9..8359a9c 100644 --- a/tests/test_participation_area_agent.py +++ b/tests/test_participation_area_agent.py @@ -4,6 +4,9 @@ from democracy_sim.participation_model import Area from democracy_sim.participation_agent import VoteAgent from .test_participation_model import TestParticipationModel, num_agents +from democracy_sim.social_welfare_functions import majority_rule, approval_voting +from democracy_sim.distance_functions import kendall_tau, spearman + class TestArea(unittest.TestCase): @@ -44,6 +47,14 @@ def test_filter_cells(self): area_cell_sample += add_cells self.assertEqual(area_cell_sample, filtered_cells) + def test_conduct_election(self): + area = random.sample(self.model.area_scheduler.agents, 1)[0] + area.conduct_election(majority_rule, spearman) + area.conduct_election(approval_voting, spearman) + area.conduct_election(majority_rule, kendall_tau) + area.conduct_election(approval_voting, kendall_tau) + # TODO + def test_estimate_real_distribution(self): # Get any existing area existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 09237c5..3d05735 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -4,20 +4,17 @@ num_agents, num_colors, num_areas, num_personalities, num_personality_colors as npc, - draw_borders, rule_idx, voting_rules, - distance_idx, distance_functions, + draw_borders, rule_idx, distance_idx, color_heterogeneity as heterogeneity, color_patches_steps, av_area_height, av_area_width, area_size_variance, - patch_power, election_costs) + patch_power, election_costs, max_reward) import mesa class TestParticipationModel(unittest.TestCase): def setUp(self): - voting_rule = voting_rules[rule_idx] - distance_func = distance_functions[distance_idx] self.model = ParticipationModel(height=height, width=width, num_agents=num_agents, num_colors=num_colors, @@ -26,14 +23,15 @@ def setUp(self): num_areas=num_areas, draw_borders=draw_borders, election_costs=election_costs, - voting_rule=voting_rule, - distance_func=distance_func, + rule_idx=rule_idx, + distance_idx=distance_idx, heterogeneity=heterogeneity, color_patches_steps=color_patches_steps, av_area_height=av_area_height, av_area_width=av_area_width, area_size_variance=area_size_variance, - patch_power=patch_power) + patch_power=patch_power, + max_reward=max_reward) def test_initialization(self): areas_count = len([area for area in self.model.area_scheduler.agents @@ -51,7 +49,7 @@ def test_model_options(self): self.assertEqual(self.model.area_size_variance, area_size_variance) self.assertEqual(self.model.draw_borders, draw_borders) self.assertEqual(self.model.heterogeneity, heterogeneity) - v_rule = voting_rules[rule_idx] + v_rule = social_welfare_functions[rule_idx] dist_func = distance_functions[distance_idx] self.assertEqual(self.model.voting_rule, v_rule) self.assertEqual(self.model.distance_func, dist_func) From 3d686da9900d6ef43e273c2fb1bbde0406f4b74e Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 26 Sep 2024 14:32:51 +0200 Subject: [PATCH 16/38] color_distribution_chart implemented (works only for four colors so far) --- democracy_sim/model_setup.py | 32 +++++------ democracy_sim/participation_model.py | 79 ++++++++++++++++++++-------- democracy_sim/run.py | 6 +-- 3 files changed, 76 insertions(+), 41 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index c918c5c..2a687cf 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -114,8 +114,10 @@ def participation_draw(cell: ColorCell): if color == "White": portrayal["Color"] = "LightGrey" if cell.num_agents_in_cell > 0: - portrayal["text"] = str(cell.num_agents_in_cell) + portrayal[f"text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" + for area in cell.areas: + portrayal[f"Area {area.unique_id}"] = f"color dist: {area.color_distribution}" return portrayal @@ -137,29 +139,29 @@ def draw_color_dist_bars(color_distributions): plt.xticks(range(len(color_distributions))) plt.show() - -voter_turnout_example = mesa.visualization.ChartModule( - [{"Label": "Voter turnout in % (first area)", - "Color": "Black"}], - data_collector_name='datacollector') - +color_distribution_chart = mesa.visualization.modules.ChartModule( + [{"Label": f"Color {i}", + "Color": "LightGrey" if _COLORS[i] == "White" else _COLORS[i]} for i in + range(num_colors)], data_collector_name='datacollector' +) wealth_chart = mesa.visualization.modules.ChartModule( [{"Label": "Collective assets", "Color": "Black"}], data_collector_name='datacollector' ) -color_distribution_chart = mesa.visualization.modules.ChartModule( - [{"Label": str(i), "Color": _COLORS[i]} for i in range(num_colors)], - data_collector_name='datacollector' -) +voter_turnout = mesa.visualization.ChartModule( + [{"Label": "Voter turnout in percent", + "Color": "Black"}], + data_collector_name='datacollector') + # Agent charts -voter_turnout_chart = mesa.visualization.ChartModule( - [{"Label": "Voter Turnout", "Color": "Black"}], - data_collector_name='datacollector' -) +# voter_turnout_chart = mesa.visualization.ChartModule( +# [{"Label": "Voter Turnout", "Color": "Black"}], +# data_collector_name='datacollector' +# ) model_params = { "height": grid_rows, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 96f55bf..3055723 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -136,8 +136,7 @@ def conduct_election(self): # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) preference_profile = np.array(preference_profile) - # Aggregate the prefs using the v-rule => returns an option ordering - # TODO: How to deal with ties (Have to fulfill neutrality!!)?? + # Aggregate the prefs using the v-rule ⇒ returns an option ordering aggregated = self.model.voting_rule(preference_profile) # Save the "elected" distribution in self.voted_distribution winning_option = aggregated[0] @@ -188,29 +187,36 @@ def filter_cells(self, cell_list): def step(self) -> None: self.update_color_distribution() self.conduct_election() - self.model.datacollector.add_table_row( - "AreaData", - { - "Step": self.model.schedule.time, - "AreaID": self.unique_id, - "ColorDistribution": self.color_distribution.tolist(), - "VoterTurnout": self.voter_turnout - } - ) + # self.model.datacollector.add_table_row( + # "AreaData", + # { + # "Step": self.model.schedule.time, + # "AreaID": self.unique_id, + # "ColorDistribution": self.color_distribution.tolist(), + # "VoterTurnout": self.voter_turnout + # } + # ) def compute_collective_assets(model): sum_assets = sum(agent.assets for agent in model.voting_agents) return sum_assets +def get_area_by_id(model, agent_id): + """TODO: rm (only for testing)""" + return next(area for area in model.area_scheduler.agents + if area.unique_id == agent_id) def get_voter_turnout(model): - return model.area_scheduler.agents[0].voter_turnout # Assuming one area - + area = get_area_by_id(model, 1) # For testing.. + return area.voter_turnout # Assuming one area def get_area_color_distributions(model): - data = {f"Share of color {i}": model.area_scheduler.agents[0].color_distribution[i] for i in range(model.num_colors)} - return data + area = get_area_by_id(model, 1) # For testing.. + # d = {f"Color {i}": area.color_distribution[i] for i in range(model.num_colors)} + # print(f"Area {area} has color distribution:\n{d}") + d = area.color_distribution + return d def color_by_dst(color_distribution) -> int: @@ -309,6 +315,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.draw_borders = draw_borders # Color distribution (global) self.color_dst = self.create_color_distribution(heterogeneity) + self._av_area_color_dst = self.color_dst # Elections self.election_costs = election_costs self.max_reward = max_reward @@ -332,26 +339,37 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.area_size_variance = area_size_variance self.initialize_areas() # Data collector + color_data = { + f"Color {i}": (lambda i=i: lambda m: m.av_area_color_dst[i])() for i + in range(self.num_colors)} self.datacollector = mesa.DataCollector( model_reporters={ "Collective assets": compute_collective_assets, - "Voter turnout (first area)": get_voter_turnout, - "Area Color Distributions": get_area_color_distributions, + "Voter turnout in percent": get_voter_turnout, + **color_data }, agent_reporters={ - "Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, - "Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, + #"Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + #"Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, }, - tables={ - "AreaData": ["Step", "AreaID", "ColorDistribution", - "VoterTurnout"] - } + #tables={ + # "AreaData": ["Step", "AreaID", "ColorDistribution", + # "VoterTurnout"] + #} ) # Adjust the color pattern to make it less random (see color patches) self.adjust_color_pattern(color_patches_steps, patch_power) # Collect initial data self.datacollector.collect(self) + @property + def av_area_color_dst(self): + return self._av_area_color_dst + + @av_area_color_dst.setter + def av_area_color_dst(self, value): + self._av_area_color_dst = value + def initialize_color_cells(self): """ This method initializes a color cells for each cell in the model's grid. @@ -456,6 +474,8 @@ def step(self): self.area_scheduler.step() # Mutate the color cells according to election outcomes self.color_cell_scheduler.step() + # Update the global color distribution + self.update_av_area_color_dst() # Collect data for monitoring and data analysis self.datacollector.collect(self) @@ -518,3 +538,16 @@ def color_patches(self, cell, patch_power): if count == max_count] return self.random.choice(most_common_colors) return cell.color # Return the cell's own color if no consensus + + def update_av_area_color_dst(self): + """ + This method updates the av_area_color_dst attribute of the model. + Beware: On overlapping areas, cells are counted several times. + """ + sums = np.zeros(self.num_colors) + for area in self.area_scheduler.agents: + if TYPE_CHECKING: + area = cast(Area, area) + sums += area.color_distribution + # Return the average color distributions + self.av_area_color_dst = sums / len(self.area_scheduler.agents) diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 6e44358..5c28871 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -2,13 +2,13 @@ import mesa from democracy_sim.participation_model import ParticipationModel from democracy_sim.model_setup import (model_params, canvas_element, - voter_turnout_example, wealth_chart, + voter_turnout, wealth_chart, color_distribution_chart) server = mesa.visualization.ModularServer( model_cls=ParticipationModel, - visualization_elements=[canvas_element, wealth_chart, voter_turnout_example, - color_distribution_chart], + visualization_elements=[canvas_element, color_distribution_chart, + wealth_chart, voter_turnout], name="DemocracySim", model_params=model_params, ) From 6ef3e95c2fc3cb7ded49180aa5e439c5b6138396 Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 26 Sep 2024 15:56:53 +0200 Subject: [PATCH 17/38] color_distribution_chart works now with all colors - set max colors to 10 globally --- democracy_sim/model_setup.py | 27 ++++++++++--------- democracy_sim/participation_model.py | 39 +++++++++++++++------------- 2 files changed, 36 insertions(+), 30 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 2a687cf..85e4462 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -59,15 +59,15 @@ "Yellow", "Aqua", "Fuchsia", - "Lavender", + #"Lavender", "Lime", "Maroon", - "Navy", - "Olive", + #"Navy", + #"Olive", "Orange", - "Purple", - "Silver", - "Teal", + #"Purple", + #"Silver", + #"Teal", # "Pink", # "Brown", # "Gold", @@ -83,7 +83,7 @@ # "DarkTurquoise", # "DarkViolet", # "DeepPink", -] # 16 colors +] # 10 colors def participation_draw(cell: ColorCell): @@ -139,17 +139,20 @@ def draw_color_dist_bars(color_distributions): plt.xticks(range(len(color_distributions))) plt.show() -color_distribution_chart = mesa.visualization.modules.ChartModule( - [{"Label": f"Color {i}", - "Color": "LightGrey" if _COLORS[i] == "White" else _COLORS[i]} for i in - range(num_colors)], data_collector_name='datacollector' -) wealth_chart = mesa.visualization.modules.ChartModule( [{"Label": "Collective assets", "Color": "Black"}], data_collector_name='datacollector' ) + +color_distribution_chart = mesa.visualization.modules.ChartModule( + [{"Label": f"Color {i}", + "Color": "LightGrey" if _COLORS[i] == "White" else _COLORS[i]} + for i in range(len(_COLORS))], + data_collector_name='datacollector' + ) + voter_turnout = mesa.visualization.ChartModule( [{"Label": "Voter turnout in percent", "Color": "Black"}], diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 3055723..f5f372c 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -339,24 +339,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.area_size_variance = area_size_variance self.initialize_areas() # Data collector - color_data = { - f"Color {i}": (lambda i=i: lambda m: m.av_area_color_dst[i])() for i - in range(self.num_colors)} - self.datacollector = mesa.DataCollector( - model_reporters={ - "Collective assets": compute_collective_assets, - "Voter turnout in percent": get_voter_turnout, - **color_data - }, - agent_reporters={ - #"Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, - #"Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, - }, - #tables={ - # "AreaData": ["Step", "AreaID", "ColorDistribution", - # "VoterTurnout"] - #} - ) + self.datacollector = self.initialize_datacollector() # Adjust the color pattern to make it less random (see color patches) self.adjust_color_pattern(color_patches_steps, patch_power) # Collect initial data @@ -467,6 +450,26 @@ def create_personalities(self, n=None): personalities.append(personality) # TODO may not be unique rankings.. return personalities + def initialize_datacollector(self): + color_data = { + f"Color {i}": (lambda i=i: lambda m: m.av_area_color_dst[i])() + for i in range(self.num_colors)} + return mesa.DataCollector( + model_reporters={ + "Collective assets": compute_collective_assets, + "Voter turnout in percent": get_voter_turnout, + **color_data + }, + agent_reporters={ + # "Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + # "Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, + }, + # tables={ + # "AreaData": ["Step", "AreaID", "ColorDistribution", + # "VoterTurnout"] + # } + ) + def step(self): """Advance the model by one step.""" From a76aa7d108b37a0a9d71bc824488d78d03b735b1 Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 26 Sep 2024 17:12:55 +0200 Subject: [PATCH 18/38] minor code fixes / typos and cleaning up --- democracy_sim/distance_functions.py | 13 +++---- democracy_sim/model_setup.py | 9 ++--- democracy_sim/participation_agent.py | 14 +++---- democracy_sim/participation_model.py | 58 +++++++++++++--------------- democracy_sim/run.py | 1 - 5 files changed, 43 insertions(+), 52 deletions(-) diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py index b71d42b..5f20ae9 100644 --- a/democracy_sim/distance_functions.py +++ b/democracy_sim/distance_functions.py @@ -1,12 +1,11 @@ from math import comb - import numpy as np def kendall_tau_on_ranks(rank_arr_1, rank_arr_2, search_pairs, color_vec): """ DON'T USE - (don't use for orderings!) + (don't use this for orderings!) This function calculates the kendal tau distance between two rank vektors. (The Kendall tau rank distance is a metric that counts the number @@ -97,7 +96,7 @@ def kendall_tau(ordering_1, ordering_2, model): def spearman_distance(rank_arr_1, rank_arr_2): """ - Beware: don't use for orderings! + Beware: don't use this for orderings! This function calculates the Spearman distance between two rank vektors. Spearman's foot rule is a measure of the distance between ranked lists. @@ -119,15 +118,15 @@ def spearman_distance(rank_arr_1, rank_arr_2): return np.sum(np.abs(rank_arr_1 - rank_arr_2)) -def spearman(ordering_1, ordering_2, model): +def spearman(ordering_1, ordering_2, _model=None): """ This calculates the normalized Spearman distance between two orderings. Spearman's foot rule is a measure of the distance between ranked lists. It is given as the sum of the absolute differences between the ranks of the two orderings (values from 0 to n-1 in any order). - :param ordering_1: First (NumPy) array containing the ranks of each option - :param ordering_2: The second rank array - :param model: The mesa model + :param ordering_1: The first (NumPy) array containing the option's ranks. + :param ordering_2: The second rank array. + :param _model: This parameter (mesa model) is intentionally unused. :return: The Spearman distance """ # TODO: remove these tests (comment out) on actual simulations to speed up diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 85e4462..2352c93 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -1,16 +1,15 @@ """ -handles the definition of the canvas parameters and -the drawing of the model representation on the canvas +This file handles the definition of the canvas and model parameters. """ from typing import TYPE_CHECKING, cast -from math import comb -import mesa from mesa.visualization.modules import ChartModule +from democracy_sim.participation_agent import ColorCell, VoteAgent from democracy_sim.participation_model import (ParticipationModel, distance_functions, social_welfare_functions) -from democracy_sim.participation_agent import ColorCell, VoteAgent import matplotlib.pyplot as plt +from math import comb +import mesa # Parameters diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 4a1e787..727245a 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -1,8 +1,6 @@ from typing import TYPE_CHECKING, cast - import numpy as np from mesa import Agent -from numpy import random if TYPE_CHECKING: # Type hint for IDEs from democracy_sim.participation_model import ParticipationModel @@ -38,9 +36,9 @@ def __init__(self, unique_id, model, pos, personality, assets=1): :param model: The simulation model of which the agent is part of. :type model: ParticipationModel :param pos: The position of the agent in the models' grid. - :type pos: tuple + :type pos: Tuple :param personality: Represents the agent's preferences among colors. - :type personality: np.ndarray + :type personality: Numpy.ndarray :param assets: The wealth/assets/motivation of the agent. """ # Pass the parameters to the parent class. @@ -96,7 +94,7 @@ def ask_for_participation(self, area): #print("Agent", self.unique_id, "decides whether to participate", # "in election of area", area.unique_id) # TODO Implement this (is to be decided upon a learned decision tree) - return random.choice([True, False]) + return np.random.choice([True, False]) def decide_altruism_factor(self, area): """ @@ -104,8 +102,8 @@ def decide_altruism_factor(self, area): """ # TODO Implement this (is to be decided upon a learned decision tree) # This part is important - also for monitoring - save/plot a_factors - a_factor = random.uniform(0.0, 1.0) - #print(f"Agent {self.unique_id} has altruism factor: {a_factor}") + a_factor = np.random.uniform(0.0, 1.0) + #print(f"Agent {self.unique_id} has an altruism factor of: {a_factor}") return a_factor def compute_assumed_opt_dist(self, area): @@ -141,7 +139,7 @@ def vote(self, area): ############## if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) - # For TESTING we just shuffle the option vector (ints) then normalize + # For TESTING, we just shuffle the option vector (ints) then normalize # and interpret the result as a preference vector (values=prefs) # (makes no sense, but it'll work for testing) r = np.arange(self.model.options.shape[0]) diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index f5f372c..d847999 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,13 +1,11 @@ from typing import TYPE_CHECKING, cast -import random -from math import sqrt import mesa -from mesa import Agent from democracy_sim.participation_agent import VoteAgent, ColorCell -import numpy as np -from itertools import permutations, product, combinations from democracy_sim.social_welfare_functions import majority_rule, approval_voting from democracy_sim.distance_functions import spearman, kendall_tau +from itertools import permutations, product, combinations +from math import sqrt +import numpy as np # Voting rules to be accessible by index social_welfare_functions = [majority_rule, approval_voting] @@ -15,7 +13,7 @@ distance_functions = [spearman, kendall_tau] -class Area(Agent): +class Area(mesa.Agent): def __init__(self, unique_id, model, height, width, size_variance): """ Create a new area. @@ -35,8 +33,8 @@ def __init__(self, unique_id, model, height, width, size_variance): elif size_variance > 1 or size_variance < 0: raise ValueError("Size variance must be between 0 and 1") else: # Apply variance - w_var_factor = random.uniform(1 - size_variance, 1 + size_variance) - h_var_factor = random.uniform(1 - size_variance, 1 + size_variance) + w_var_factor = self.random.uniform(1 - size_variance, 1 + size_variance) + h_var_factor = self.random.uniform(1 - size_variance, 1 + size_variance) self._width = int(width * w_var_factor) self.width_off = abs(width - self._width) self._height = int(height * h_var_factor) @@ -149,7 +147,7 @@ def conduct_election(self): for agent in self.agents: # Personality-based reward # TODO: Calculate value p\in(0,1) based on how well the consensus fits the personality of the agent (should better be fast) - p = random.uniform(0, 1) + p = self.random.uniform(0, 1) # + Common reward (reward_pa) for all agents agent.assets = agent.assets + p * reward_pa + reward_pa # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process @@ -202,21 +200,12 @@ def compute_collective_assets(model): sum_assets = sum(agent.assets for agent in model.voting_agents) return sum_assets -def get_area_by_id(model, agent_id): - """TODO: rm (only for testing)""" - return next(area for area in model.area_scheduler.agents - if area.unique_id == agent_id) def get_voter_turnout(model): - area = get_area_by_id(model, 1) # For testing.. - return area.voter_turnout # Assuming one area - -def get_area_color_distributions(model): - area = get_area_by_id(model, 1) # For testing.. - # d = {f"Color {i}": area.color_distribution[i] for i in range(model.num_colors)} - # print(f"Area {area} has color distribution:\n{d}") - d = area.color_distribution - return d + voter_turnout_sum = 0 + for area in model.area_scheduler.agents: + voter_turnout_sum += area.voter_turnout + return voter_turnout_sum / len(model.area_scheduler.agents) def color_by_dst(color_distribution) -> int: @@ -227,7 +216,7 @@ def color_by_dst(color_distribution) -> int: Example: color_distribution = [0.2, 0.3, 0.5] Color 1 is selected with a probability of 0.3 """ - r = random.random() + r = np.random.random() for color_idx, prob in enumerate(color_distribution): if r < prob: return color_idx @@ -279,6 +268,14 @@ def create_personality(num_colors, num_personality_colors): return personality +def get_color_distribution_function(color): + """ + This method returns a lambda function for the color distribution chart. + :param color: The color number (used as index). + """ + return lambda m: m.av_area_color_dst[color] + + class ParticipationModel(mesa.Model): """A model with some number of agents.""" @@ -310,8 +307,8 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) self.heterogeneity = heterogeneity # Random bias factors that affect the initial color distribution - self.vertical_bias = random.uniform(0, 1) - self.horizontal_bias = random.uniform(0, 1) + self.vertical_bias = self.random.uniform(0, 1) + self.horizontal_bias = self.random.uniform(0, 1) self.draw_borders = draw_borders # Color distribution (global) self.color_dst = self.create_color_distribution(heterogeneity) @@ -379,7 +376,7 @@ def initialize_voting_agents(self): # Get a random position x = self.random.randrange(self.width) y = self.random.randrange(self.height) - personality = random.choice(self.personalities) + personality = self.random.choice(self.personalities) VoteAgent(a_id, self, (x, y), personality) # Count +1 at the color cell the agent is placed at TODO improve? agents = self.grid.get_cell_list_contents([(x, y)]) @@ -451,9 +448,8 @@ def create_personalities(self, n=None): return personalities def initialize_datacollector(self): - color_data = { - f"Color {i}": (lambda i=i: lambda m: m.av_area_color_dst[i])() - for i in range(self.num_colors)} + color_data = {f"Color {i}": get_color_distribution_function(i) for i in + range(self.num_colors)} return mesa.DataCollector( model_reporters={ "Collective assets": compute_collective_assets, @@ -501,7 +497,7 @@ def create_color_distribution(self, heterogeneity): :param heterogeneity: Factor used as sigma in 'random.gauss'. """ colors = range(self.num_colors) - values = [abs(random.gauss(1, heterogeneity)) for _ in colors] + values = [abs(self.random.gauss(1, heterogeneity)) for _ in colors] # Normalize (with float division) total = sum(values) dst_array = [value / total for value in values] @@ -524,7 +520,7 @@ def color_patches(self, cell, patch_power): + abs(normalized_y - self.vertical_bias)) # The closer the cell to the bias-point, the less often it is # to be replaced by a color chosen from the initial distribution: - if abs(random.gauss(0, patch_power)) < bias_factor: + if abs(self.random.gauss(0, patch_power)) < bias_factor: return color_by_dst(self.color_dst) # Otherwise, apply the color patches logic neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 5c28871..afe3c39 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -1,4 +1,3 @@ -# import webbrowser import mesa from democracy_sim.participation_model import ParticipationModel from democracy_sim.model_setup import (model_params, canvas_element, From 8a0f6603ef871e1a2a66f74edadfc7d573319301 Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 26 Sep 2024 18:31:22 +0200 Subject: [PATCH 19/38] implemented (global) Gini-Index statistics in charts --- democracy_sim/model_setup.py | 4 ++-- democracy_sim/participation_model.py | 18 +++++++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 2352c93..9f76274 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -153,8 +153,8 @@ def draw_color_dist_bars(color_distributions): ) voter_turnout = mesa.visualization.ChartModule( - [{"Label": "Voter turnout in percent", - "Color": "Black"}], + [{"Label": "Voter turnout globally (in percent)", "Color": "Black"}, + {"Label": "Gini Index", "Color": "Red"}], data_collector_name='datacollector') diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index d847999..a79cc76 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -201,6 +201,21 @@ def compute_collective_assets(model): return sum_assets +def compute_gini_index(model): + # TODO: seperate to be able to calculate it zone-wise as well as globally + # TODO: Unit-test this function + # Extract the list of assets for all agents + assets = [agent.assets for agent in model.voting_agents] + n = len(assets) + # Sort the assets + sorted_assets = sorted(assets) + # Calculate the Gini Index + cumulative_sum = sum((i + 1) * sorted_assets[i] for i in range(n)) + total_sum = sum(sorted_assets) + gini_index = (2 * cumulative_sum) / (n * total_sum) - (n + 1) / n + return gini_index * 100 # Return in "percent" (0-100) + + def get_voter_turnout(model): voter_turnout_sum = 0 for area in model.area_scheduler.agents: @@ -453,7 +468,8 @@ def initialize_datacollector(self): return mesa.DataCollector( model_reporters={ "Collective assets": compute_collective_assets, - "Voter turnout in percent": get_voter_turnout, + "Gini Index": compute_gini_index, + "Voter turnout globally (in percent)": get_voter_turnout, **color_data }, agent_reporters={ From 9a4667f6f79740cece4c8f2d5fa36eb3448cc7f3 Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 9 Oct 2024 19:44:39 +0200 Subject: [PATCH 20/38] implemented more stats, fixed typos, plan to merge the two schedulers into one to avoid issues that having several schedulers causes --- democracy_sim/model_setup.py | 42 +++++--- democracy_sim/participation_model.py | 53 +++++++-- democracy_sim/run.py | 10 +- democracy_sim/visualisation_elements.py | 137 ++++++++++++++++++++++++ docs/mesa_docs.md | 2 +- mkdocs.yml | 9 +- 6 files changed, 228 insertions(+), 25 deletions(-) create mode 100644 democracy_sim/visualisation_elements.py diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 9f76274..3ab0457 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -8,7 +8,9 @@ distance_functions, social_welfare_functions) import matplotlib.pyplot as plt +import seaborn as sns from math import comb +import numpy as np import mesa # Parameters @@ -48,6 +50,10 @@ av_area_width = 20 # area_width = grid_cols // int(sqrt(num_areas)) area_size_variance = 0.0 +######################## +# Statistics and Views # +######################## +show_area_stats = False _COLORS = [ @@ -125,18 +131,25 @@ def participation_draw(cell: ColorCell): ) -# Draw bars TODO: Implement to use within the mesa framework.. -def draw_color_dist_bars(color_distributions): - # Setup plot - fig, ax = plt.subplots() - for i, dist in enumerate(color_distributions): - bottom = 0 - for j, part in enumerate(color_distributions): - ax.bar(i, part, bottom=bottom, color=_COLORS[j % len(_COLORS)]) - bottom += part - # Set x-ticks to be distribution indices - plt.xticks(range(len(color_distributions))) - plt.show() +# # Draw bars (Test) +# def draw_color_dist_bars(color_distributions): +# # Setup plot +# fig, ax = plt.subplots() +# for i, dist in enumerate(color_distributions): +# bottom = 0 +# for j, part in enumerate(color_distributions): +# ax.bar(i, part, bottom=bottom, color=_COLORS[j % len(_COLORS)]) +# bottom += part +# # Set x-ticks to be distribution indices +# plt.xticks(range(len(color_distributions))) +# plt.show() +# +# +# def plot_color_distribution(model, ax): +# agent_df = model.datacollector.get_agent_vars_dataframe() +# color_distributions = agent_df.groupby('Step')['Color Distribution'].apply(list).tolist() +# sns.barplot(data=color_distributions, ax=ax) +# ax.set_title('Color Distribution Over Time') wealth_chart = mesa.visualization.modules.ChartModule( @@ -154,7 +167,7 @@ def draw_color_dist_bars(color_distributions): voter_turnout = mesa.visualization.ChartModule( [{"Label": "Voter turnout globally (in percent)", "Color": "Black"}, - {"Label": "Gini Index", "Color": "Red"}], + {"Label": "Gini Index (0-100)", "Color": "Red"}], data_collector_name='datacollector') @@ -240,4 +253,7 @@ def draw_color_dist_bars(color_distributions): min_value=0.0, max_value=0.99, step=0.1, description="Select the variance of the area sizes" ), + "show_area_stats": mesa.visualization.Checkbox( + name="Show all statistics", value=show_area_stats + ), } diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index a79cc76..20d1223 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -45,6 +45,7 @@ def __init__(self, unique_id, model, height, width, size_variance): self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 self.voted_distribution = np.zeros(model.num_colors) self.voter_turnout = 0 # In percent + self.dist_to_reality = None # Elected vs. actual color distribution def __str__(self): return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " @@ -140,9 +141,9 @@ def conduct_election(self): winning_option = aggregated[0] self.voted_distribution = self.model.options[winning_option] # Calculate the distance to the real distribution using distance_func - normalized_dist = self.curr_norm_dist() + self.dist_to_reality = self.curr_norm_dist() # Calculate the rewards per agent - reward_pa = (1 - normalized_dist) * self.model.max_reward + reward_pa = (1 - self.dist_to_reality) * self.model.max_reward # Distribute the two types of rewards for agent in self.agents: # Personality-based reward @@ -202,7 +203,7 @@ def compute_collective_assets(model): def compute_gini_index(model): - # TODO: seperate to be able to calculate it zone-wise as well as globally + # TODO: separate to be able to calculate it zone-wise as well as globally # TODO: Unit-test this function # Extract the list of assets for all agents assets = [agent.assets for agent in model.voting_agents] @@ -213,7 +214,7 @@ def compute_gini_index(model): cumulative_sum = sum((i + 1) * sorted_assets[i] for i in range(n)) total_sum = sum(sorted_assets) gini_index = (2 * cumulative_sum) / (n * total_sum) - (n + 1) / n - return gini_index * 100 # Return in "percent" (0-100) + return int(gini_index * 100) # Return in "percent" (0-100) def get_voter_turnout(model): @@ -291,6 +292,30 @@ def get_color_distribution_function(color): return lambda m: m.av_area_color_dst[color] +def get_area_voter_turnout(area): + if isinstance(area, Area): + return area.voter_turnout + return None + +def get_area_closeness_to_reality(area): + if isinstance(area, Area): + return area.dist_to_reality + return None + +def get_area_color_distribution(area): + if isinstance(area, Area): + return area.color_distribution.tolist() + return None + +# def get_area_personality_based_reward(area): +# # Assuming you have a method to calculate this in the Area class +# return area.calculate_personality_reward() +# +# def get_area_gini_index(area): +# # Assuming you have a method to calculate this in the Area class +# return area.calculate_gini_index() + + class ParticipationModel(mesa.Model): """A model with some number of agents.""" @@ -298,7 +323,8 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, num_personality_colors, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, - rule_idx, distance_idx, election_costs, max_reward): + rule_idx, distance_idx, election_costs, max_reward, + show_area_stats): super().__init__() self.height = height self.width = width @@ -350,12 +376,14 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.av_area_height = av_area_height self.area_size_variance = area_size_variance self.initialize_areas() - # Data collector - self.datacollector = self.initialize_datacollector() # Adjust the color pattern to make it less random (see color patches) self.adjust_color_pattern(color_patches_steps, patch_power) + # Data collector + self.datacollector = self.initialize_datacollector() # Collect initial data - self.datacollector.collect(self) + #self.datacollector.collect(self) + # Statistics + self.show_area_stats = show_area_stats @property def av_area_color_dst(self): @@ -468,13 +496,20 @@ def initialize_datacollector(self): return mesa.DataCollector( model_reporters={ "Collective assets": compute_collective_assets, - "Gini Index": compute_gini_index, + "Gini Index (0-100)": compute_gini_index, "Voter turnout globally (in percent)": get_voter_turnout, **color_data }, agent_reporters={ # "Voter Turnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, # "Color Distribution": lambda a: a.color_distribution if isinstance(a, Area) else None, + # + #"VoterTurnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, + "VoterTurnout": get_area_voter_turnout, + "Closeness to Reality": get_area_closeness_to_reality, + "ColorDistribution": get_area_color_distribution, + # "Personality-Based Reward": get_area_personality_based_reward, + # "Gini Index": get_area_gini_index }, # tables={ # "AreaData": ["Step", "AreaID", "ColorDistribution", diff --git a/democracy_sim/run.py b/democracy_sim/run.py index afe3c39..70319e0 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -3,11 +3,19 @@ from democracy_sim.model_setup import (model_params, canvas_element, voter_turnout, wealth_chart, color_distribution_chart) +from democracy_sim.visualisation_elements import * + + +color_distribution_element = ColorDistributionElement() +steps_text = StepsTextElement() +vto_areas = VoterTurnoutElement() + server = mesa.visualization.ModularServer( model_cls=ParticipationModel, visualization_elements=[canvas_element, color_distribution_chart, - wealth_chart, voter_turnout], + wealth_chart, voter_turnout, vto_areas, + color_distribution_element, steps_text], name="DemocracySim", model_params=model_params, ) diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py new file mode 100644 index 0000000..79e99d2 --- /dev/null +++ b/democracy_sim/visualisation_elements.py @@ -0,0 +1,137 @@ +import matplotlib.pyplot as plt +from mesa.visualization import TextElement +from model_setup import _COLORS +import base64 +import math +import io + +_COLORS[0] = "LightGray" + +def save_plot_to_base64(fig): + buf = io.BytesIO() + plt.savefig(buf, format='png') + plt.close(fig) + buf.seek(0) + image_base64 = base64.b64encode(buf.read()).decode('utf-8') + buf.close() + return f'' + + +class ColorDistributionElement(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.color_cell_scheduler.steps + if not model.show_area_stats or step == 0 or step % 10 != 0: + return "" + + # Fetch data from the datacollector + data = model.datacollector.get_agent_vars_dataframe() + color_distribution = data['ColorDistribution'].dropna() + + # Extract unique area IDs + area_ids = color_distribution.index.get_level_values(1).unique() + num_colors = len(color_distribution.iloc[0]) + + # Create subplots within a single figure + num_areas = len(area_ids) + num_cols = math.ceil(math.sqrt(num_areas)) + num_rows = math.ceil(num_areas / num_cols) + fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(20, 20), sharex=True) + + for ax, area_id in zip(axes.flatten(), area_ids): + area_data = color_distribution.xs(area_id, level=1) + for color_idx in range(num_colors): + color_data = area_data.apply(lambda x: x[color_idx]) + ax.plot(color_data.index, color_data.values, + label=f'Color {color_idx}', color=_COLORS[color_idx]) + ax.set_title(f'Area {area_id}') + ax.set_xlabel('Step') + ax.set_ylabel('Color Distribution') + #ax.legend() + + plt.tight_layout() + return save_plot_to_base64(fig) + + +# class VoterTurnoutElement(TextElement): +# def render(self, model): +# # Only render if show_area_stats is enabled +# step = model.color_cell_scheduler.steps +# if not model.show_area_stats or step == 0 or step % 10 != 0: +# return "" +# # Fetch data from the datacollector +# data = model.datacollector.get_agent_vars_dataframe() +# voter_turnout = data['VoterTurnout'].dropna() +# +# # Extract unique area IDs +# area_ids = voter_turnout.index.get_level_values(1).unique() +# +# # Create subplots within a single figure +# num_areas = len(area_ids) +# fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(20, 20), sharex=True) +# +# for ax, area_id in zip(axes.flatten(), area_ids): +# area_data = voter_turnout.xs(area_id, level=1) +# ax.plot(area_data.index, area_data.values, label=f'Area {area_id}') +# ax.set_title(f'Area {area_id}') +# ax.set_xlabel('Step') +# ax.set_ylabel('Voter Turnout (%)') +# ax.legend() +# +# plt.tight_layout() +# return save_plot_to_base64(fig) + +class VoterTurnoutElement(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.color_cell_scheduler.steps + if not model.show_area_stats or step == 0 or step % 10 != 0: + return "" + # Fetch data from the datacollector + data = model.datacollector.get_agent_vars_dataframe() + voter_turnout = data['VoterTurnout'].dropna() + + # Extract unique area IDs + area_ids = voter_turnout.index.get_level_values(1).unique() + + # Create a single plot + fig, ax = plt.subplots(figsize=(10, 6)) + + for area_id in area_ids: + area_data = voter_turnout.xs(area_id, level=1) + ax.plot(area_data.index, area_data.values, label=f'Area {area_id}') + + ax.set_title('Voter Turnout by Area Over Time') + ax.set_xlabel('Step') + ax.set_ylabel('Voter Turnout (%)') + ax.legend() + + return save_plot_to_base64(fig) + + +class MatplotlibElement(TextElement): + def render(self, model): + # Only render if show_area_stats is enabled + step = model.color_cell_scheduler + if not model.show_area_stats or step == 0 or step % 10 != 0: + return "" + # Fetch data from the datacollector + data = model.datacollector.get_model_vars_dataframe() + collective_assets = data["Collective assets"] + + # Create a plot + fig, ax = plt.subplots() + ax.plot(collective_assets, label="Collective assets") + ax.set_title("Collective Assets Over Time") + ax.set_xlabel("Time") + ax.set_ylabel("Collective Assets") + ax.legend() + + return save_plot_to_base64(fig) + +class StepsTextElement(TextElement): + def render(self, model): + color_cell_step = model.color_cell_scheduler.steps + area_step = model.area_scheduler.steps + return f"Steps: {color_cell_step} (Color Cell), {area_step} (Area)" diff --git a/docs/mesa_docs.md b/docs/mesa_docs.md index 03e2ef6..5c2f93b 100644 --- a/docs/mesa_docs.md +++ b/docs/mesa_docs.md @@ -10,7 +10,7 @@ Mesa is highly flexible, allowing to simulate complex systems and observe emerge ### Agent-Based Modeling and Complex Societal Questions -Multi-agent based simulation is a valuable tool to research voting rules and collective decision-making +Multi-agent-based simulation is a valuable tool to research voting rules and collective decision-making as it allows for the modeling of very complex interactions that are challenging to capture with traditional methods[^3]. ABM is mainly used to research and analyze complex relationships. The focus is on understanding how individual behaviors and interactions lead to collective outcomes. diff --git a/mkdocs.yml b/mkdocs.yml index 3aa4982..93627d1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,8 @@ edit_uri: edit/dev/docs/ nav: - Home: index.md - Teaser: teaser.md + #- Overview: overview.md + #- Code: the_voting_process_step_by_step.md - Mesa: mesa_docs.md theme: @@ -71,4 +73,9 @@ markdown_extensions: - def_list - footnotes - md_in_html - - pymdownx.arithmatex \ No newline at end of file + - pymdownx.arithmatex + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format From 33d2db8c30f1255c5a17ffa29819a502ec97a519 Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 16 Oct 2024 17:33:29 +0200 Subject: [PATCH 21/38] started major cleanup - removed schedulers in favor of a single custom scheduler + changed how agents are stored in the system --- democracy_sim/model_setup.py | 8 +- democracy_sim/participation_agent.py | 73 ++++++------ democracy_sim/participation_model.py | 151 +++++++++++++++--------- democracy_sim/run.py | 29 ++++- democracy_sim/visualisation_elements.py | 18 +-- 5 files changed, 170 insertions(+), 109 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 3ab0457..ff41810 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -7,10 +7,7 @@ from democracy_sim.participation_model import (ParticipationModel, distance_functions, social_welfare_functions) -import matplotlib.pyplot as plt -import seaborn as sns from math import comb -import numpy as np import mesa # Parameters @@ -121,8 +118,9 @@ def participation_draw(cell: ColorCell): if cell.num_agents_in_cell > 0: portrayal[f"text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" - for area in cell.areas: - portrayal[f"Area {area.unique_id}"] = f"color dist: {area.color_distribution}" + for a in cell.areas: + text = f"{a.num_agents} agents and color dist: {a.color_distribution}" + portrayal[f"Area {a.unique_id}"] = text return portrayal diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 727245a..490d9a3 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -30,46 +30,52 @@ class VoteAgent(Agent): """An agent that has limited knowledge and resources and can decide to use them to participate in elections.""" - def __init__(self, unique_id, model, pos, personality, assets=1): + def __init__(self, unique_id, model, pos, personality, assets=1, + append_to_list=True): """ Create a new agent. :param unique_id: The unique identifier of the agent. :param model: The simulation model of which the agent is part of. :type model: ParticipationModel - :param pos: The position of the agent in the models' grid. + :param pos: The position of the agent in the grid. :type pos: Tuple :param personality: Represents the agent's preferences among colors. :type personality: Numpy.ndarray :param assets: The wealth/assets/motivation of the agent. + :append_to_list: Whether to add the agent to the model's agent list. """ # Pass the parameters to the parent class. super().__init__(unique_id=unique_id, model=model) + # The "pos" variable in mesa is special, so I avoid it here try: row, col = pos except ValueError: raise ValueError("Position must be a tuple of two integers.") - self._row = row - self._col = col + self._position = row, col self._assets = assets self.personality = personality self.known_cells = [] # ColorCell objects the agent knows (knowledge) # Add the agent to the models' agent list - model.voting_agents.append(self) - # Place the agent on the grid - model.grid.place_agent(self, pos) + if append_to_list: + model.voting_agents.append(self) def __str__(self): - return (f"Agent(id={self.unique_id}, pos={self.pos}, " + return (f"Agent(id={self.unique_id}, pos={self.position}, " f"personality={self.personality}, assets={self.assets})") @property - def col(self): - """Return the col location of this cell.""" - return self._col + def position(self): + """Return the location of the agent.""" + return self._position @property def row(self): - """Return the row location of this cell.""" - return self._row + """Return the row location of the agent.""" + return self._position[0] + + @property + def col(self): + """Return the col location of the agent.""" + return self._position[1] @property def assets(self): @@ -150,23 +156,6 @@ def vote(self, area): #print("Agent", self.unique_id, "voted:", r) return r - # def step(self): - # # Verify agent has some wealth - # if self.wealth > 0: - # other_a = self.random.choice(self.model.agent_scheduler.agents) - # if other_agent is not None: - # other_a.wealth += 1 - # self.wealth -= 1 - - # def move(self): - # if TYPE_CHECKING: # Type hint for IDEs - # self.model = cast(ParticipationModel, self.model) - # possible_steps = self.model.grid.get_neighborhood( - # self.pos, - # moore=True, # Moore vs. von neumann - # include_center=False) - # new_position = self.random.choice(possible_steps) - # self.model.grid.move_agent(self, new_position) def estimate_real_distribution(self, area): """ The agent estimates the real color distribution in the area based on @@ -190,14 +179,19 @@ def __init__(self, unique_id, model, pos, initial_color: int): Create a cell, in the given state, at the given row, col position. """ super().__init__(unique_id, model) + # The "pos" variable in mesa is special, so I avoid it here self._row = pos[0] self._col = pos[1] self._color = initial_color # The cell's current color (int) self._next_color = None - self._num_agents_in_cell = 0 + self.agents = [] self.areas = [] self.is_border_cell = False + def __str__(self): + return (f"Cell ({self.unique_id}, pos={self.position}, " + f"color={self.color}, num_agents={self.num_agents_in_cell})") + @property def col(self): """The col location of this cell.""" @@ -208,6 +202,11 @@ def row(self): """The row location of this cell.""" return self._row + @property + def position(self): # The variable pos is special in mesa! + """The location of this cell.""" + return self._row, self._col + @property def color(self): """The current color of this cell.""" @@ -220,15 +219,13 @@ def color(self, value): @property def num_agents_in_cell(self): """The number of agents in this cell.""" - return self._num_agents_in_cell + return len(self.agents) - @num_agents_in_cell.setter - def num_agents_in_cell(self, value): - self._num_agents_in_cell = value + def add_agent(self, agent): + self.agents.append(agent) - @num_agents_in_cell.deleter - def num_agents_in_cell(self): - del self._num_agents_in_cell + def remove_agent(self, agent): + self.agents.remove(agent) def add_area(self, area): self.areas.append(area) diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 20d1223..554b58b 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, cast, List, Optional import mesa from democracy_sim.participation_agent import VoteAgent, ColorCell from democracy_sim.social_welfare_functions import majority_rule, approval_voting @@ -26,19 +26,7 @@ def __init__(self, unique_id, model, height, width, size_variance): if TYPE_CHECKING: # Type hint for IDEs model = cast(ParticipationModel, model) super().__init__(unique_id=unique_id, model=model) - if size_variance == 0: - self._width = width - self._height = height - self.width_off, self.height_off = 0, 0 - elif size_variance > 1 or size_variance < 0: - raise ValueError("Size variance must be between 0 and 1") - else: # Apply variance - w_var_factor = self.random.uniform(1 - size_variance, 1 + size_variance) - h_var_factor = self.random.uniform(1 - size_variance, 1 + size_variance) - self._width = int(width * w_var_factor) - self.width_off = abs(width - self._width) - self._height = int(height * h_var_factor) - self.height_off = abs(height - self._height) + self._set_dimensions(width, height, size_variance) self.agents = [] self.cells = [] self._idx_field = None # An indexing position of the area in the grid @@ -49,9 +37,39 @@ def __init__(self, unique_id, model, height, width, size_variance): def __str__(self): return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " - f"num_agents={len(self.agents)}, num_cells={len(self.cells)}, " + f"at idx_field={self._idx_field}, " + f"num_agents={self.num_agents}, num_cells={self.num_cells}, " f"color_distribution={self.color_distribution})") + def _set_dimensions(self, width, height, size_var): + """ + Set the dimensions of the area right, based on the size variance. + :param width: The average width of the area. + :param height: The average height of the area. + :param size_var: A variance factor applied to height and width. + """ + if size_var == 0: + self._width = width + self._height = height + self.width_off, self.height_off = 0, 0 + elif size_var > 1 or size_var < 0: + raise ValueError("Size variance must be between 0 and 1") + else: # Apply variance + w_var_factor = self.random.uniform(1 - size_var, 1 + size_var) + h_var_factor = self.random.uniform(1 - size_var, 1 + size_var) + self._width = int(width * w_var_factor) + self.width_off = abs(width - self._width) + self._height = int(height * h_var_factor) + self.height_off = abs(height - self._height) + + @property + def num_agents(self): + return len(self.agents) + + @property + def num_cells(self): + return len(self.cells) + @property def idx_field(self): return self._idx_field @@ -64,6 +82,8 @@ def idx_field(self, pos: tuple): The cells and agents are added to the area's lists of cells and agents. :param pos: (x, y) representing the areas top-left coordinates. """ + # TODO: Check - isn't it better to make sure agents are added to the area when they are created? + # TODO -- There is something wrong here!!! (Agents are not added to the areas) if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) try: @@ -154,9 +174,7 @@ def conduct_election(self): # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process # Statistics self.voter_turnout = int((len(participating_agents) / - len(self.agents)) * 100) # In percent - - + self.num_agents) * 100) # In percent def update_color_distribution(self): @@ -165,12 +183,11 @@ def update_color_distribution(self): and saves it in the color_distribution attribute. """ color_count = {} - num_cells = len(self.cells) for cell in self.cells: color = cell.color color_count[color] = color_count.get(color, 0) + 1 for color in range(self.model.num_colors): - dist_val = color_count.get(color, 0) / num_cells # Float division + dist_val = color_count.get(color, 0) / self.num_cells # Float self.color_distribution[color] = dist_val def filter_cells(self, cell_list): @@ -219,9 +236,9 @@ def compute_gini_index(model): def get_voter_turnout(model): voter_turnout_sum = 0 - for area in model.area_scheduler.agents: + for area in model.areas: voter_turnout_sum += area.voter_turnout - return voter_turnout_sum / len(model.area_scheduler.agents) + return voter_turnout_sum / model.num_areas def color_by_dst(color_distribution) -> int: @@ -315,6 +332,24 @@ def get_area_color_distribution(area): # # Assuming you have a method to calculate this in the Area class # return area.calculate_gini_index() +class CustomScheduler(mesa.time.BaseScheduler): + def step(self): + """Execute the step function for all area- and cell-agents by type, + first for Areas then for ColorCells.""" + if TYPE_CHECKING: + self.model = cast(ParticipationModel, self.model) + # Step through Area agents first (and in "random" order) + self.model.random.shuffle(self.model.areas) + for area in self.model.areas: + area.step() + # Step through ColorCell agents next + self.model.random.shuffle(self.model.color_cells) + for cell in self.model.color_cells: + cell.step() + + self.steps += 1 + self.time += 1 + class ParticipationModel(mesa.Model): """A model with some number of agents.""" @@ -328,23 +363,18 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, super().__init__() self.height = height self.width = width - self.num_agents = num_agents - self.voting_agents = [] self.num_colors = num_colors # Area variables - self.num_areas = num_areas + self.areas: List[Optional[Area]] = [None] * num_areas self.av_area_height = av_area_height self.av_area_width = av_area_width self.area_size_variance = area_size_variance - # Create schedulers and assign it to the model - self.color_cell_scheduler = mesa.time.RandomActivation(self) - self.area_scheduler = mesa.time.RandomActivation(self) - # self.agent_scheduler = mesa.time.RandomActivation(self) - # self.schedule = StagedActivation(self, - # stage_list=['color_step', 'step']) + # Create a scheduler that goes through areas first then color cells + self.scheduler = CustomScheduler(self) # The grid # SingleGrid enforces at most one agent per cell; # MultiGrid allows multiple agents to be in the same cell. + # TODO: use SingleGrid (speed) and use pos for color-cells self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) self.heterogeneity = heterogeneity # Random bias factors that affect the initial color distribution @@ -364,9 +394,11 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.search_pairs = combinations(range(0, self.options.size), 2) # TODO check if correct! self.option_vec = np.arange(self.options.size) # Also to speed up # Create color cells + self.color_cells: List[Optional[ColorCell]] = [None] * (height * width) self.initialize_color_cells() # Create agents # TODO: Where do the agents get there known cells from and how!? + self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents self.num_personalities = num_personalities self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities() @@ -393,6 +425,14 @@ def av_area_color_dst(self): def av_area_color_dst(self, value): self._av_area_color_dst = value + @property + def num_agents(self): + return len(self.voting_agents) + + @property + def num_areas(self): + return len(self.areas) + def initialize_color_cells(self): """ This method initializes a color cells for each cell in the model's grid. @@ -406,7 +446,9 @@ def initialize_color_cells(self): # Add it to the grid self.grid.place_agent(cell, (row, col)) # Add the color cell to the scheduler - self.color_cell_scheduler.add(cell) + #self.scheduler.add(cell) # TODO: check (was commented out to use list) + # And to the 'model.color_cells' list (for faster access) + self.color_cells[unique_id] = cell # TODO: check if its not better to simply use the grid when finally changing the grid type to SingleGrid def initialize_voting_agents(self): """ @@ -420,24 +462,29 @@ def initialize_voting_agents(self): x = self.random.randrange(self.width) y = self.random.randrange(self.height) personality = self.random.choice(self.personalities) - VoteAgent(a_id, self, (x, y), personality) - # Count +1 at the color cell the agent is placed at TODO improve? - agents = self.grid.get_cell_list_contents([(x, y)]) - color_cells = [a for a in agents if isinstance(a, ColorCell)] - if len(color_cells) > 1: - raise ValueError(f"There are several color cells at {(x, y)}!") - cell = color_cells[0] - cell.num_agents_in_cell = cell.num_agents_in_cell + 1 - - def initialize_area(self, a_id, x_coord, y_coord): + # Create agent without appending (add to the pre-defined list) + agent = VoteAgent(a_id, self, (x, y), personality, + assets=1, append_to_list=False) + self.voting_agents[a_id] = agent # Add using the index (faster) + # Add the agent to the grid by placing it on a cell + cell = self.grid.get_cell_list_contents([(x, y)])[0] + if TYPE_CHECKING: + cell = cast(ColorCell, cell) + cell.add_agent(agent) + + + def initialize_area(self, a_id: int, x_coord, y_coord): """ This method initializes one area in the models' grid. """ area = Area(a_id, self, self.av_area_height, self.av_area_width, self.area_size_variance) # Place the area in the grid using its indexing field + # this adds the corresponding color cells and voting agents to the area area.idx_field = (x_coord, y_coord) - self.area_scheduler.add(area) + # Safe in the models' areas-list + self.areas[a_id] = area + def initialize_areas(self): """ @@ -465,12 +512,12 @@ def initialize_areas(self): additional_x.append(self.random.randrange(self.grid.width)) additional_y.append(self.random.randrange(self.grid.height)) # Create the area's ids - a_ids = iter(range(1, self.num_areas + 1)) + a_ids = iter(range(self.num_areas)) # Initialize all areas for x_coord in x_coords: for y_coord in y_coords: - a_id = next(a_ids, 0) - if a_id == 0: + a_id = next(a_ids, -1) + if a_id == -1: break self.initialize_area(a_id, x_coord, y_coord) for x_coord, y_coord in zip(additional_x, additional_y): @@ -521,9 +568,8 @@ def step(self): """Advance the model by one step.""" # Conduct elections in the areas - self.area_scheduler.step() - # Mutate the color cells according to election outcomes - self.color_cell_scheduler.step() + # and then mutate the color cells according to election outcomes + self.scheduler.step() # Update the global color distribution self.update_av_area_color_dst() # Collect data for monitoring and data analysis @@ -595,9 +641,8 @@ def update_av_area_color_dst(self): Beware: On overlapping areas, cells are counted several times. """ sums = np.zeros(self.num_colors) - for area in self.area_scheduler.agents: - if TYPE_CHECKING: - area = cast(Area, area) + for area in self.areas: + # TODO: check this! There might be a problem with identifying the areas because of shuffling!!! (see scheduler) sums += area.color_distribution # Return the average color distributions - self.av_area_color_dst = sums / len(self.area_scheduler.agents) + self.av_area_color_dst = sums / self.num_areas diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 70319e0..517d2b4 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -1,23 +1,40 @@ -import mesa +from mesa.visualization.ModularVisualization import ModularServer from democracy_sim.participation_model import ParticipationModel -from democracy_sim.model_setup import (model_params, canvas_element, +from democracy_sim.model_setup import (model_params as params, canvas_element, voter_turnout, wealth_chart, color_distribution_chart) from democracy_sim.visualisation_elements import * +class CustomModularServer(ModularServer): + """ This is to prevent double initialization of the model. + For some reason, the Server resets the model once on initialization + and again on server launch. """ + def __init__(self, model_cls, visualization_elements, + name="Mesa Model", model_params=None, port=None): + self.initialized = False + super().__init__(model_cls, visualization_elements, name, model_params, + port) + + def reset_model(self): + if not self.initialized: + self.initialized = True + return # This ensures that the first reset-call is ignored + super().reset_model() + + color_distribution_element = ColorDistributionElement() steps_text = StepsTextElement() vto_areas = VoterTurnoutElement() - -server = mesa.visualization.ModularServer( +server = CustomModularServer( model_cls=ParticipationModel, visualization_elements=[canvas_element, color_distribution_chart, wealth_chart, voter_turnout, vto_areas, color_distribution_element, steps_text], name="DemocracySim", - model_params=model_params, + model_params=params, ) -server.launch(open_browser=True) +if __name__ == "__main__": + server.launch(open_browser=True) diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index 79e99d2..c8d9edc 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -20,7 +20,7 @@ def save_plot_to_base64(fig): class ColorDistributionElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled - step = model.color_cell_scheduler.steps + step = model.scheduler.steps if not model.show_area_stats or step == 0 or step % 10 != 0: return "" @@ -57,7 +57,7 @@ def render(self, model): # class VoterTurnoutElement(TextElement): # def render(self, model): # # Only render if show_area_stats is enabled -# step = model.color_cell_scheduler.steps +# step = model.scheduler.steps # if not model.show_area_stats or step == 0 or step % 10 != 0: # return "" # # Fetch data from the datacollector @@ -85,7 +85,7 @@ def render(self, model): class VoterTurnoutElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled - step = model.color_cell_scheduler.steps + step = model.scheduler.steps if not model.show_area_stats or step == 0 or step % 10 != 0: return "" # Fetch data from the datacollector @@ -113,7 +113,7 @@ def render(self, model): class MatplotlibElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled - step = model.color_cell_scheduler + step = model.scheduler.steps if not model.show_area_stats or step == 0 or step % 10 != 0: return "" # Fetch data from the datacollector @@ -132,6 +132,10 @@ def render(self, model): class StepsTextElement(TextElement): def render(self, model): - color_cell_step = model.color_cell_scheduler.steps - area_step = model.area_scheduler.steps - return f"Steps: {color_cell_step} (Color Cell), {area_step} (Area)" + step = model.scheduler.steps + # TODO clean up + first_agents = [str(a) for a in model.voting_agents[:5]] + text = (f"Step: {step} | cells: {len(model.color_cells)} | " + f"areas: {len(model.areas)} | First 5 voters of " + f"{len(model.voting_agents)}: {first_agents}") + return text From 17f7ed2061f60c364e8fe0e2360a2cbbc6be0815 Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 16 Oct 2024 18:51:11 +0200 Subject: [PATCH 22/38] fixed the way voting agents are added to the system + streamlined the adjust_color_pattern function --- democracy_sim/model_setup.py | 2 +- democracy_sim/participation_model.py | 37 ++++++++++++++-------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index ff41810..1c2bb06 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -119,7 +119,7 @@ def participation_draw(cell: ColorCell): portrayal[f"text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" for a in cell.areas: - text = f"{a.num_agents} agents and color dist: {a.color_distribution}" + text = f"{a.num_agents} agents, color dist: {a.color_distribution}" portrayal[f"Area {a.unique_id}"] = text return portrayal diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 554b58b..c6c5d72 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -105,18 +105,19 @@ def idx_field(self, pos: tuple): for y_area in range(self._height): x = (adjusted_x + x_area) % self.model.width y = (adjusted_y + y_area) % self.model.height - local_agents = self.model.grid.get_cell_list_contents([(x, y)]) - for a in local_agents: - if isinstance(a, VoteAgent): - self.add_agent(a) # Add the agent to the area - elif isinstance(a, ColorCell): - a.add_area(self) # Add the area to the cell - # Mark as a border cell if true - if (x_area == 0 or y_area == 0 - or x_area == self._width - 1 - or y_area == self._height - 1): - a.is_border_cell = True - self.add_cell(a) # Add the cell to the area + cell = self.model.grid.get_cell_list_contents([(x, y)])[0] + if TYPE_CHECKING: + cell = cast(ColorCell, cell) + self.add_cell(cell) # Add the cell to the area + # Add all voting agents to the area + for agent in cell.agents: + self.add_agent(agent) + cell.add_area(self) # Add the area to the color-cell + # Mark as a border cell if true + if (x_area == 0 or y_area == 0 + or x_area == self._width - 1 + or y_area == self._height - 1): + cell.is_border_cell = True self.update_color_distribution() self._idx_field = (adjusted_x, adjusted_y) @@ -579,13 +580,12 @@ def adjust_color_pattern(self, color_patches_steps, patch_power): """Adjusting the color pattern to make it less predictable.""" for _ in range(color_patches_steps): print(f"Color adjustment step {_}") - for cell in self.grid.coord_iter(): - agents = cell[0] + for grid_cell in self.grid.coord_iter(): + cell = grid_cell[0][0] # Get the color-cell if TYPE_CHECKING: - agents = cast(list, agents) - c = [cell for cell in agents if isinstance(cell, ColorCell)][0] - most_common_color = self.color_patches(c, patch_power) - c.color = most_common_color + cell = cast(ColorCell, cell) + most_common_color = self.color_patches(cell, patch_power) + cell.color = most_common_color def create_color_distribution(self, heterogeneity): """ @@ -642,7 +642,6 @@ def update_av_area_color_dst(self): """ sums = np.zeros(self.num_colors) for area in self.areas: - # TODO: check this! There might be a problem with identifying the areas because of shuffling!!! (see scheduler) sums += area.color_distribution # Return the average color distributions self.av_area_color_dst = sums / self.num_areas From 7da602a171c38d82a72ce90349a4865bffac49b0 Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 17 Oct 2024 18:43:11 +0200 Subject: [PATCH 23/38] changed grid type to SingleGrid and some minor changes --- democracy_sim/model_setup.py | 10 ++--- democracy_sim/participation_model.py | 61 ++++++++++++---------------- 2 files changed, 30 insertions(+), 41 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 1c2bb06..7eb6a48 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -3,7 +3,7 @@ """ from typing import TYPE_CHECKING, cast from mesa.visualization.modules import ChartModule -from democracy_sim.participation_agent import ColorCell, VoteAgent +from democracy_sim.participation_agent import ColorCell from democracy_sim.participation_model import (ParticipationModel, distance_functions, social_welfare_functions) @@ -99,14 +99,11 @@ def participation_draw(cell: ColorCell): """ if cell is None: raise AssertionError - if isinstance(cell, VoteAgent): - return None color = _COLORS[cell.color] portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, "x": cell.row, "y": cell.col, "Color": color} - # TODO: add the areas the cell belongs to to the hover-text (the text that is shown when one hovers over the cell in the grid) - # + maybe: draw the agent number in the opposing color, + maybe draw borders nicer + # TODO: maybe: draw the agent number in the opposing color, + maybe draw borders nicer # If the cell is a border cell, change its appearance if TYPE_CHECKING: # Type hint for IDEs cell.model = cast(ParticipationModel, cell.model) @@ -115,6 +112,9 @@ def participation_draw(cell: ColorCell): portrayal["r"] = 0.9 # Adjust the radius to fit within the cell if color == "White": portrayal["Color"] = "LightGrey" + # Add position (x, y) to the hover-text + portrayal["Position"] = f"{cell.position}" + portrayal["Color - text"] = _COLORS[cell.color] if cell.num_agents_in_cell > 0: portrayal[f"text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index c6c5d72..53bd412 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -118,8 +118,8 @@ def idx_field(self, pos: tuple): or x_area == self._width - 1 or y_area == self._height - 1): cell.is_border_cell = True - self.update_color_distribution() self._idx_field = (adjusted_x, adjusted_y) + self.update_color_distribution() def add_agent(self, agent): self.agents.append(agent) @@ -147,9 +147,10 @@ def conduct_election(self): # TODO: WHERE to discretize if needed? participating_agents = [] preference_profile = [] + # TODO: change the color pattern to an even less guessable form(?) for agent in self.agents: - if agent.ask_for_participation(area=self): - # TODO: if agent cant afford she cant participate + if (agent.assets >= self.model.election_costs + and agent.ask_for_participation(area=self)): participating_agents.append(agent) # collect the participation fee from the agents agent.assets = agent.assets - self.model.election_costs @@ -204,15 +205,6 @@ def filter_cells(self, cell_list): def step(self) -> None: self.update_color_distribution() self.conduct_election() - # self.model.datacollector.add_table_row( - # "AreaData", - # { - # "Step": self.model.schedule.time, - # "AreaID": self.unique_id, - # "ColorDistribution": self.color_distribution.tolist(), - # "VoterTurnout": self.voter_turnout - # } - # ) def compute_collective_assets(model): @@ -337,15 +329,17 @@ class CustomScheduler(mesa.time.BaseScheduler): def step(self): """Execute the step function for all area- and cell-agents by type, first for Areas then for ColorCells.""" + model = self.model if TYPE_CHECKING: - self.model = cast(ParticipationModel, self.model) + model = cast(ParticipationModel, model) # Step through Area agents first (and in "random" order) - self.model.random.shuffle(self.model.areas) - for area in self.model.areas: + # TODO think about using a different way of randomization that keeps the order of the two arrays stable + model.random.shuffle(model.areas) + for area in model.areas: area.step() # Step through ColorCell agents next - self.model.random.shuffle(self.model.color_cells) - for cell in self.model.color_cells: + model.random.shuffle(model.color_cells) + for cell in model.color_cells: cell.step() self.steps += 1 @@ -365,24 +359,18 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.height = height self.width = width self.num_colors = num_colors - # Area variables - self.areas: List[Optional[Area]] = [None] * num_areas - self.av_area_height = av_area_height - self.av_area_width = av_area_width - self.area_size_variance = area_size_variance # Create a scheduler that goes through areas first then color cells self.scheduler = CustomScheduler(self) # The grid # SingleGrid enforces at most one agent per cell; # MultiGrid allows multiple agents to be in the same cell. - # TODO: use SingleGrid (speed) and use pos for color-cells - self.grid = mesa.space.MultiGrid(height=height, width=width, torus=True) - self.heterogeneity = heterogeneity + self.grid = mesa.space.SingleGrid(height=height, width=width, torus=True) # Random bias factors that affect the initial color distribution self.vertical_bias = self.random.uniform(0, 1) self.horizontal_bias = self.random.uniform(0, 1) self.draw_borders = draw_borders # Color distribution (global) + #self.heterogeneity = heterogeneity self.color_dst = self.create_color_distribution(heterogeneity) self._av_area_color_dst = self.color_dst # Elections @@ -404,13 +392,15 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities() self.initialize_voting_agents() - # Create areas - self.av_area_width = av_area_width + # Area variables + self.areas: List[Optional[Area]] = [None] * num_areas self.av_area_height = av_area_height + self.av_area_width = av_area_width self.area_size_variance = area_size_variance - self.initialize_areas() # Adjust the color pattern to make it less random (see color patches) self.adjust_color_pattern(color_patches_steps, patch_power) + # Create areas + self.initialize_all_areas() # Data collector self.datacollector = self.initialize_datacollector() # Collect initial data @@ -447,7 +437,7 @@ def initialize_color_cells(self): # Add it to the grid self.grid.place_agent(cell, (row, col)) # Add the color cell to the scheduler - #self.scheduler.add(cell) # TODO: check (was commented out to use list) + #self.scheduler.add(cell) # TODO: check speed diffs using this.. # And to the 'model.color_cells' list (for faster access) self.color_cells[unique_id] = cell # TODO: check if its not better to simply use the grid when finally changing the grid type to SingleGrid @@ -465,7 +455,7 @@ def initialize_voting_agents(self): personality = self.random.choice(self.personalities) # Create agent without appending (add to the pre-defined list) agent = VoteAgent(a_id, self, (x, y), personality, - assets=1, append_to_list=False) + assets=5, append_to_list=False) # TODO: initial assets?! self.voting_agents[a_id] = agent # Add using the index (faster) # Add the agent to the grid by placing it on a cell cell = self.grid.get_cell_list_contents([(x, y)])[0] @@ -483,11 +473,11 @@ def initialize_area(self, a_id: int, x_coord, y_coord): # Place the area in the grid using its indexing field # this adds the corresponding color cells and voting agents to the area area.idx_field = (x_coord, y_coord) - # Safe in the models' areas-list + # Save in the models' areas-list self.areas[a_id] = area - def initialize_areas(self): + def initialize_all_areas(self): """ This method initializes the areas in the models' grid in such a way that the areas are spread approximately evenly across the grid. @@ -578,12 +568,11 @@ def step(self): def adjust_color_pattern(self, color_patches_steps, patch_power): """Adjusting the color pattern to make it less predictable.""" + cells = self.color_cells for _ in range(color_patches_steps): print(f"Color adjustment step {_}") - for grid_cell in self.grid.coord_iter(): - cell = grid_cell[0][0] # Get the color-cell - if TYPE_CHECKING: - cell = cast(ColorCell, cell) + self.random.shuffle(cells) + for cell in cells: most_common_color = self.color_patches(cell, patch_power) cell.color = most_common_color From 700fc71e38a4a93627761d65d4ad43401e90add2 Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 21 Oct 2024 16:35:55 +0200 Subject: [PATCH 24/38] mutation of color cells according to election results implemented --- democracy_sim/participation_agent.py | 9 +++++ democracy_sim/participation_model.py | 51 ++++++++++++++++++++-------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 490d9a3..a0ad7bb 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -52,6 +52,7 @@ def __init__(self, unique_id, model, pos, personality, assets=1, raise ValueError("Position must be a tuple of two integers.") self._position = row, col self._assets = assets + self._num_elections_participated = 0 self.personality = personality self.known_cells = [] # ColorCell objects the agent knows (knowledge) # Add the agent to the models' agent list @@ -90,6 +91,14 @@ def assets(self, value): def assets(self): del self._assets + @property + def num_elections_participated(self): + return self._num_elections_participated + + @num_elections_participated.setter + def num_elections_participated(self, value): + self._num_elections_participated = value + def ask_for_participation(self, area): """ The agent decides diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 53bd412..47ae949 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -145,14 +145,13 @@ def conduct_election(self): """ # Ask agents to participate # TODO: WHERE to discretize if needed? - participating_agents = [] preference_profile = [] # TODO: change the color pattern to an even less guessable form(?) for agent in self.agents: if (agent.assets >= self.model.election_costs and agent.ask_for_participation(area=self)): - participating_agents.append(agent) - # collect the participation fee from the agents + agent.num_elections_participated += 1 + # Collect the participation fee from the agents agent.assets = agent.assets - self.model.election_costs # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) @@ -175,8 +174,8 @@ def conduct_election(self): agent.assets = agent.assets + p * reward_pa + reward_pa # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process # Statistics - self.voter_turnout = int((len(participating_agents) / - self.num_agents) * 100) # In percent + n = preference_profile.shape[0] # Number agents participated + self.voter_turnout = int((n / self.num_agents) * 100) # In percent def update_color_distribution(self): @@ -203,8 +202,32 @@ def filter_cells(self, cell_list): return [c for c in cell_list if c in cell_set] def step(self) -> None: - self.update_color_distribution() + """ + Conduct an election in the area, + mutate the cells' colors according to the election outcome + and update the color distribution of the area. + """ self.conduct_election() + # TODO: STRATEGY - decide: + # should the cells mutate on area-level and right after elections? + # Or globally - and then when? + # Mutate colors in cells + # Take some number of cells to mutate (i.e. 5 %) + n_to_mutate = int(0.05 * self.num_cells) # TODO create a self.model.mu variable as mutation rate + # randomly select x cells + cells_to_mutate = self.random.sample(self.cells, n_to_mutate) + # Use the normalized voted distribution as probabilities for the colors + probs = self.voted_distribution / self.voted_distribution.sum() + # Pre-select colors for all cells to mutate + # TODO: Think about this: should we take local color-structure + # into account - like in color patches - to avoid colors mutating into + # very random structures? + colors = np.random.choice(self.model.colors, size=n_to_mutate, p=probs) + # Assign the newly selected colors to the cells + for cell, color in zip(cells_to_mutate, colors): + cell.color = color + # Important: Update the color distribution (because colors changed) + self.update_color_distribution() def compute_collective_assets(model): @@ -333,15 +356,11 @@ def step(self): if TYPE_CHECKING: model = cast(ParticipationModel, model) # Step through Area agents first (and in "random" order) - # TODO think about using a different way of randomization that keeps the order of the two arrays stable + # TODO think about randomization process model.random.shuffle(model.areas) for area in model.areas: area.step() - # Step through ColorCell agents next - model.random.shuffle(model.color_cells) - for cell in model.color_cells: - cell.step() - + # TODO: add global election? self.steps += 1 self.time += 1 @@ -358,7 +377,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, super().__init__() self.height = height self.width = width - self.num_colors = num_colors + self.colors = np.arange(num_colors) # Create a scheduler that goes through areas first then color cells self.scheduler = CustomScheduler(self) # The grid @@ -404,10 +423,14 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # Data collector self.datacollector = self.initialize_datacollector() # Collect initial data - #self.datacollector.collect(self) + self.datacollector.collect(self) # Statistics self.show_area_stats = show_area_stats + @property + def num_colors(self): + return len(self.colors) + @property def av_area_color_dst(self): return self._av_area_color_dst From da4d5747efa668c53a53694c32434a2e809cdb36 Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 28 Oct 2024 20:28:36 +0100 Subject: [PATCH 25/38] Fixed unit tests, added gloval area, added feedback of election to cell-color mutation, changed personality-creation --- democracy_sim/distance_functions.py | 11 +- democracy_sim/model_setup.py | 8 +- democracy_sim/participation_agent.py | 46 ++-- democracy_sim/participation_model.py | 91 ++++++-- democracy_sim/visualisation_elements.py | 6 +- tests/test_approval_voting.py | 296 ++++++++++++------------ tests/test_distance_functions.py | 4 +- tests/test_participation_area_agent.py | 34 ++- tests/test_participation_model.py | 16 +- 9 files changed, 294 insertions(+), 218 deletions(-) diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py index 5f20ae9..73f0f1b 100644 --- a/democracy_sim/distance_functions.py +++ b/democracy_sim/distance_functions.py @@ -48,7 +48,7 @@ def unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs): An ordering holds the option names in the order of their rank (rank=index). :param ordering_1: First (NumPy) array containing ranked options :param ordering_2: The second ordering array - :param search_pairs: The pairs of indices (for efficiency) + :param search_pairs: Containing search pairs of indices (for efficiency) :return: The kendall tau distance """ # Rename the elements to reduce the problem to counting inversions @@ -62,7 +62,7 @@ def unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs): return kendall_distance -def kendall_tau(ordering_1, ordering_2, model): +def kendall_tau(ordering_1, ordering_2, search_pairs): """ This calculates the normalized Kendall tau distance of two orderings. The Kendall tau rank distance is a metric that counts the number @@ -72,7 +72,7 @@ def kendall_tau(ordering_1, ordering_2, model): An ordering holds the option names in the order of their rank (rank=index). :param ordering_1: First (NumPy) array containing ranked options :param ordering_2: The second ordering array - :param model: Containing the pairs of indices (for efficiency) + :param search_pairs: Containing the pairs of indices (for efficiency) :return: The kendall tau distance """ # TODO: remove these tests (comment out) on actual simulations to speed up @@ -84,7 +84,6 @@ def kendall_tau(ordering_1, ordering_2, model): f"Error: Sequences {ordering_1}, {ordering_2} aren't comparable." # Get the unnormalized Kendall tau distance - search_pairs = model.search_pairs dist = unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs) # Maximum possible Kendall tau distance max_distance = comb(n, 2) # This is n choose 2, or n(n-1)/2 @@ -118,7 +117,7 @@ def spearman_distance(rank_arr_1, rank_arr_2): return np.sum(np.abs(rank_arr_1 - rank_arr_2)) -def spearman(ordering_1, ordering_2, _model=None): +def spearman(ordering_1, ordering_2, _search_pairs=None): """ This calculates the normalized Spearman distance between two orderings. Spearman's foot rule is a measure of the distance between ranked lists. @@ -126,7 +125,7 @@ def spearman(ordering_1, ordering_2, _model=None): of the two orderings (values from 0 to n-1 in any order). :param ordering_1: The first (NumPy) array containing the option's ranks. :param ordering_2: The second rank array. - :param _model: This parameter (mesa model) is intentionally unused. + :param _search_pairs: This parameter is intentionally unused. :return: The Spearman distance """ # TODO: remove these tests (comment out) on actual simulations to speed up diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 7eb6a48..4d2d6a7 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -119,8 +119,14 @@ def participation_draw(cell: ColorCell): portrayal[f"text"] = str(cell.num_agents_in_cell) portrayal["text_color"] = "Black" for a in cell.areas: + unique_id = a.unique_id + if unique_id == -1: + unique_id = "global" text = f"{a.num_agents} agents, color dist: {a.color_distribution}" - portrayal[f"Area {a.unique_id}"] = text + portrayal[f"Area {unique_id}"] = text + for voter in cell.agents: + text = f"personality: {voter.personality}" + portrayal[f"Agent {voter.unique_id}"] = text return portrayal diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index a0ad7bb..05797f1 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -30,8 +30,7 @@ class VoteAgent(Agent): """An agent that has limited knowledge and resources and can decide to use them to participate in elections.""" - def __init__(self, unique_id, model, pos, personality, assets=1, - append_to_list=True): + def __init__(self, unique_id, model, pos, personality, assets=1, add=True): """ Create a new agent. :param unique_id: The unique identifier of the agent. :param model: The simulation model of which the agent is part of. @@ -41,7 +40,8 @@ def __init__(self, unique_id, model, pos, personality, assets=1, :param personality: Represents the agent's preferences among colors. :type personality: Numpy.ndarray :param assets: The wealth/assets/motivation of the agent. - :append_to_list: Whether to add the agent to the model's agent list. + :add: Whether to add the agent to the model's agent list and color cell. + The 'add' variable is set to false on initialization of the model. """ # Pass the parameters to the parent class. super().__init__(unique_id=unique_id, model=model) @@ -55,9 +55,11 @@ def __init__(self, unique_id, model, pos, personality, assets=1, self._num_elections_participated = 0 self.personality = personality self.known_cells = [] # ColorCell objects the agent knows (knowledge) - # Add the agent to the models' agent list - if append_to_list: + # Add the agent to the models' agent list and the cell + if add: model.voting_agents.append(self) + cell = model.grid.get_cell_list_contents([(row, col)])[0] + cell.add_agent(self) def __str__(self): return (f"Agent(id={self.unique_id}, pos={self.position}, " @@ -142,9 +144,10 @@ def vote(self, area): """ The agent votes in the election of a given area, i.e., she returns a preference ranking vector over all options. - (Options are indexes, values are preference values defining the order). + (Ranking: `index = option`, `value proportional to rank`) The available options are set in the model. :param area: The area in which the election takes place. + :return ranking: A normalized preference-ranking (sum-normalization) """ # TODO Implement this (is to be decided upon a learned decision tree) # Compute the color distribution that is assumed to be the best choice. @@ -154,16 +157,27 @@ def vote(self, area): ############## if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) - # For TESTING, we just shuffle the option vector (ints) then normalize - # and interpret the result as a preference vector (values=prefs) - # (makes no sense, but it'll work for testing) - r = np.arange(self.model.options.shape[0]) - # Shuffle the array in place - np.random.shuffle(r) - r = np.array(r, dtype=float) - r /= r.sum() - #print("Agent", self.unique_id, "voted:", r) - return r + # # For TESTING + # # we just shuffle the option vector (ints) then normalize + # # and interpret the result as a preference vector (values=prefs) + # # (makes no sense, but it'll work for testing) + # r = np.arange(self.model.options.shape[0]) + # # Shuffle the array in place + # np.random.shuffle(r) + # r = np.array(r, dtype=float) + # r /= r.sum() + # return r + ############## + options = self.model.options + dist_func = self.model.distance_func + ranking = np.zeros(options.shape[0]) + color_search_pairs = self.model.color_search_pairs + print(color_search_pairs, "dist_func", dist_func, "voting_rule", self.model.voting_rule) + for i, option in enumerate(options): + # TODO: is it possible to leave out white? + ranking[i] = dist_func(self.personality, option, color_search_pairs) + ranking /= ranking.sum() # Normalize the preference vector + return ranking def estimate_real_distribution(self, area): """ diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 47ae949..3dadd64 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,3 +1,4 @@ +import itertools from typing import TYPE_CHECKING, cast, List, Optional import mesa from democracy_sim.participation_agent import VoteAgent, ColorCell @@ -135,13 +136,17 @@ def curr_norm_dist(self): It uses the models distance function. """ real_color_ord = np.argsort(self.color_distribution) - voted_ord = self.voted_distribution - return self.model.distance_func(real_color_ord, voted_ord, self.model) + voted_color_ord = self.voted_distribution + dist_to_reality = self.model.distance_func(real_color_ord, + voted_color_ord, + self.model.color_search_pairs) + return dist_to_reality def conduct_election(self): """ This method holds the primary logic of the simulation by simulating the election in the area as well as handling the payments and rewards. + :return voter_turnout: The percentage of agents that participated. """ # Ask agents to participate # TODO: WHERE to discretize if needed? @@ -156,7 +161,12 @@ def conduct_election(self): # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) preference_profile = np.array(preference_profile) + # Check for the case that no agent participated + if preference_profile.ndim != 2: + print("Area", self.unique_id, "no one participated in the election") + return 0 # TODO: What to do in this case? Cease the simulation? # Aggregate the prefs using the v-rule ⇒ returns an option ordering + print("### Area", self, "\nv-rule:", self.model.voting_rule, "dist-func:", self.model.distance_func) aggregated = self.model.voting_rule(preference_profile) # Save the "elected" distribution in self.voted_distribution winning_option = aggregated[0] @@ -168,14 +178,14 @@ def conduct_election(self): # Distribute the two types of rewards for agent in self.agents: # Personality-based reward - # TODO: Calculate value p\in(0,1) based on how well the consensus fits the personality of the agent (should better be fast) + # TODO: # NOW Calculate value p\in(0,1) based on how well the consensus fits the personality of the agent (should better be fast) p = self.random.uniform(0, 1) # + Common reward (reward_pa) for all agents agent.assets = agent.assets + p * reward_pa + reward_pa # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process # Statistics n = preference_profile.shape[0] # Number agents participated - self.voter_turnout = int((n / self.num_agents) * 100) # In percent + return int((n / self.num_agents) * 100) # Voter turnout in percent def update_color_distribution(self): @@ -207,13 +217,13 @@ def step(self) -> None: mutate the cells' colors according to the election outcome and update the color distribution of the area. """ - self.conduct_election() - # TODO: STRATEGY - decide: - # should the cells mutate on area-level and right after elections? - # Or globally - and then when? + self.voter_turnout = self.conduct_election() + if self.voter_turnout == 0: + return # TODO: What to do if no agent participated..? # Mutate colors in cells # Take some number of cells to mutate (i.e. 5 %) n_to_mutate = int(0.05 * self.num_cells) # TODO create a self.model.mu variable as mutation rate + # TODO/Idea: What if the voter_turnout determines the mutation rate? # randomly select x cells cells_to_mutate = self.random.sample(self.cells, n_to_mutate) # Use the normalized voted distribution as probabilities for the colors @@ -221,7 +231,7 @@ def step(self) -> None: # Pre-select colors for all cells to mutate # TODO: Think about this: should we take local color-structure # into account - like in color patches - to avoid colors mutating into - # very random structures? + # very random structures? # Middendorf colors = np.random.choice(self.model.colors, size=n_to_mutate, p=probs) # Assign the newly selected colors to the cells for cell, color in zip(cells_to_mutate, colors): @@ -241,6 +251,8 @@ def compute_gini_index(model): # Extract the list of assets for all agents assets = [agent.assets for agent in model.voting_agents] n = len(assets) + if n == 0: + return 0 # No agents, no inequality # Sort the assets sorted_assets = sorted(assets) # Calculate the Gini Index @@ -252,9 +264,16 @@ def compute_gini_index(model): def get_voter_turnout(model): voter_turnout_sum = 0 + num_areas = model.num_areas for area in model.areas: voter_turnout_sum += area.voter_turnout - return voter_turnout_sum / model.num_areas + if not model.global_area is None: + # TODO: Check the correctness and whether it makes sense to include the global area here + voter_turnout_sum += model.global_area.voter_turnout + num_areas += 1 + elif num_areas == 0: + return 0 + return voter_turnout_sum / num_areas def color_by_dst(color_distribution) -> int: @@ -389,7 +408,6 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.horizontal_bias = self.random.uniform(0, 1) self.draw_borders = draw_borders # Color distribution (global) - #self.heterogeneity = heterogeneity self.color_dst = self.create_color_distribution(heterogeneity) self._av_area_color_dst = self.color_dst # Elections @@ -398,20 +416,23 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.voting_rule = social_welfare_functions[rule_idx] self.distance_func = distance_functions[distance_idx] self.options = create_all_options(num_colors) + # Simulation variables + # TODO self.mu # Mutation rate for the color cells # Create search pairs once for faster iterations when comparing rankings - self.search_pairs = combinations(range(0, self.options.size), 2) # TODO check if correct! + self.search_pairs = list(combinations(range(0, self.options.size), 2)) # TODO check if correct! self.option_vec = np.arange(self.options.size) # Also to speed up + self.color_search_pairs = list(combinations(range(0, num_colors), 2)) # Create color cells self.color_cells: List[Optional[ColorCell]] = [None] * (height * width) self.initialize_color_cells() # Create agents # TODO: Where do the agents get there known cells from and how!? self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents - self.num_personalities = num_personalities self.num_personality_colors = num_personality_colors - self.personalities = self.create_personalities() + self.personalities = self.create_personalities(num_personalities) self.initialize_voting_agents() # Area variables + self.global_area = self.initialize_global_area() # TODO create bool variable to make this optional self.areas: List[Optional[Area]] = [None] * num_areas self.av_area_height = av_area_height self.av_area_width = av_area_width @@ -478,7 +499,7 @@ def initialize_voting_agents(self): personality = self.random.choice(self.personalities) # Create agent without appending (add to the pre-defined list) agent = VoteAgent(a_id, self, (x, y), personality, - assets=5, append_to_list=False) # TODO: initial assets?! + assets=5, add=False) # TODO: initial assets?! self.voting_agents[a_id] = agent # Add using the index (faster) # Add the agent to the grid by placing it on a cell cell = self.grid.get_cell_list_contents([(x, y)])[0] @@ -507,6 +528,8 @@ def initialize_all_areas(self): Depending on grid size, the number of areas and their (average) sizes. TODO create unit tests for this method (Tested manually so far) """ + if self.num_areas == 0: + return # Calculate the number of areas in each direction roo_apx = round(sqrt(self.num_areas)) nr_areas_x = self.grid.width // self.av_area_width @@ -537,19 +560,34 @@ def initialize_all_areas(self): for x_coord, y_coord in zip(additional_x, additional_y): self.initialize_area(next(a_ids), x_coord, y_coord) - def create_personalities(self, n=None): + def initialize_global_area(self): + """ + This method initializes the global area spanning the whole grid. + """ + global_area = Area(-1, self, self.height, self.width, 0) + # Place the area in the grid using its indexing field + # this adds the corresponding color cells and voting agents to the area + global_area.idx_field = (0, 0) + return global_area + + + def create_personalities(self, n): """ TODO ensure that we end up with n personalities (with unique orderings) - maybe have to use orderings and convert them + maybe have to use orderings and convert them """ - if n is None: - n = self.num_personalities - personalities = [] - for _ in range(n): - personality = create_personality(self.num_colors, - self.num_personality_colors) - personalities.append(personality) # TODO may not be unique rankings.. - return personalities + #p_colors = range(1, self.num_colors) # Personalities exclude white + # TODO is it possible to leave out white (--> dist-func)? + p_colors = range(self.num_colors) + personality_options = np.array(list(itertools.permutations(p_colors))) + + if len(personality_options) < n: + raise ValueError("Not enough unique personality options available.") + + indices = np.random.choice(len(personality_options), n, replace=False) + selected_personalities = personality_options[indices] + return selected_personalities + def initialize_datacollector(self): color_data = {f"Color {i}": get_color_distribution_function(i) for i in @@ -578,6 +616,7 @@ def initialize_datacollector(self): # } ) + def step(self): """Advance the model by one step.""" @@ -589,6 +628,7 @@ def step(self): # Collect data for monitoring and data analysis self.datacollector.collect(self) + def adjust_color_pattern(self, color_patches_steps, patch_power): """Adjusting the color pattern to make it less predictable.""" cells = self.color_cells @@ -612,6 +652,7 @@ def create_color_distribution(self, heterogeneity): dst_array = [value / total for value in values] return dst_array + def color_patches(self, cell, patch_power): """ This method is used to create a less random initial color distribution diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index c8d9edc..810332a 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -21,7 +21,7 @@ class ColorDistributionElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled step = model.scheduler.steps - if not model.show_area_stats or step == 0 or step % 10 != 0: + if not model.show_area_stats or step == 0: return "" # Fetch data from the datacollector @@ -86,7 +86,7 @@ class VoterTurnoutElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled step = model.scheduler.steps - if not model.show_area_stats or step == 0 or step % 10 != 0: + if not model.show_area_stats or step == 0: return "" # Fetch data from the datacollector data = model.datacollector.get_agent_vars_dataframe() @@ -114,7 +114,7 @@ class MatplotlibElement(TextElement): def render(self, model): # Only render if show_area_stats is enabled step = model.scheduler.steps - if not model.show_area_stats or step == 0 or step % 10 != 0: + if not model.show_area_stats or step == 0: return "" # Fetch data from the datacollector data = model.datacollector.get_model_vars_dataframe() diff --git a/tests/test_approval_voting.py b/tests/test_approval_voting.py index b260681..c5e78b9 100644 --- a/tests/test_approval_voting.py +++ b/tests/test_approval_voting.py @@ -1,5 +1,3 @@ -import numpy as np -import time from democracy_sim.social_welfare_functions import approval_voting from tests.test_majority_rule import simple, paradoxical @@ -22,151 +20,151 @@ # d d e c # e a a a -def test_approval_voting(): - # Test predefined cases - for pref_table, expected in approval_simple_cases: - res_ranking = approval_voting(pref_table) - assert list(res_ranking) == expected +# def test_approval_voting(): +# # Test predefined cases +# for pref_table, expected in approval_simple_cases: +# res_ranking = approval_voting(pref_table) +# assert list(res_ranking) == expected -# Cases with ties - "all equally possible" - -with_ties_all = np.array([ - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25] - ]) - -with_overall_tie = np.array([ - [0.4, 0.3, 0.2, 0.1], - [0.1, 0.4, 0.3, 0.2], - [0.2, 0.1, 0.4, 0.3], - [0.3, 0.2, 0.1, 0.4], -]) - -with_ties_mixed = np.array([ - [0.4, 0.3, 0.2, 0.1], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.1, 0.4, 0.3, 0.2], - [0.2, 0.1, 0.4, 0.3], - [0.25, 0.25, 0.25, 0.25], - [0.3, 0.2, 0.1, 0.4], -]) - -all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] - -def test_equally_possible(cv_threshold=0.125): - for pref_rel in all_equally_possible: - cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) - print(f"CV: {cv}") - assert cv < cv_threshold - -# Cases with ties - "not all equally possible" -with_ties_unequal = np.array([ - [0.25, 0.25, 0.25, 0.25], - [0.4, 0.3, 0.2, 0.1], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25], - [0.25, 0.25, 0.25, 0.25] - ]) - -with_ties_all_ab = np.array([ - [0.3, 0.3, 0.2, 0.2], - [0.25, 0.25, 0.25, 0.25] - ]) # all possible (a or b up first is more likely) - -with_ties_ab = np.array([ - [0.3, 0.3, 0.2, 0.2], - [0.3, 0.3, 0.2, 0.2], - [0.25, 0.25, 0.25, 0.25] - ]) # all possible (a or b up first is more likely) - -with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] - -def test_with_ties_unequal(): - for pref_rel in with_ties_unequal: - cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) - print(f"CV: {cv}") - assert cv > 0.125 - -# Random matrix - -def random_pref_profile(num_agents, num_options): - rand_matrix = np.random.rand(num_agents, num_options) - # Normalize the matrix - matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) - return matrix_rand - -def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): - """ - Run majority rule with ties multiple times, check winners - and calculate the coefficient of variation (CV) of the winners. - :param num_agents: Number of agents. - :param num_options: Number of options. - :param iterations: Number of iterations. - ------- - :return: Dictionary of winner counts {option: count}. - """ - winner_counts = {} - for _ in range(iterations): - # Create random matrix - matrix_rand = random_pref_profile(num_agents, num_options) - ranking = majority_rule(matrix_rand) - winner = ranking[0] - # Count winners - winner_counts[winner] = winner_counts.get(winner, 0) + 1 - return winner_counts - - -def test_with_random_matrix_small(): - """ - Test majority rule on a small random matrix with many iterations. - """ - num_agents = np.random.randint(2, 200) - # Keep num options small to expect all options to win at least once. - num_options = np.random.randint(2, 90) - iterations = 100*num_options - start_time = time.time() - wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) - stop_time = time.time() - # Extract winners from winner-counts dictionary and sort them - sorted_winners = list(wc.keys()) - sorted_winners.sort() - assert sorted_winners == list(range(num_options)) - # Extract count values - counts = np.array(list(wc.values())) - # Calculate the coefficient of variation (CV) - cv = np.std(counts) / np.mean(counts) - assert cv < 0.125 - print(f"\nCV: {cv}") - # Print the time taken - elapsed_time = stop_time - start_time - print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." - f"With {num_agents} agents and {num_options} options.") - - -def test_with_random_matrix_large(): - """ - Test majority rule on a large random matrix (many agents, many options). - """ - num_its = 100 - num_agents = np.random.randint(1000, 3000) - num_options = np.random.randint(2000, 3000) - # Run majority rule test with random matrix - start_time = time.time() - wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) - stop_time = time.time() - # Len of winners should be approximately equal to the number of iterations - # because with a large number of options, winners should be mostly unique. - winners, counts = list(wc.keys()), list(wc.values()) - assert abs(np.mean(counts) - 1) < 0.1 - assert abs((len(winners) / num_its) - 1) < 0.1 - # Calculate the coefficient of variation (CV) - cv = np.std(counts) / np.mean(counts) - assert cv < 0.2 - # Print the time taken - elapsed_time = stop_time - start_time - print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." - f"With {num_agents} agents and {num_options} options.") +# # Cases with ties - "all equally possible" +# +# with_ties_all = np.array([ +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25] +# ]) +# +# with_overall_tie = np.array([ +# [0.4, 0.3, 0.2, 0.1], +# [0.1, 0.4, 0.3, 0.2], +# [0.2, 0.1, 0.4, 0.3], +# [0.3, 0.2, 0.1, 0.4], +# ]) +# +# with_ties_mixed = np.array([ +# [0.4, 0.3, 0.2, 0.1], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.1, 0.4, 0.3, 0.2], +# [0.2, 0.1, 0.4, 0.3], +# [0.25, 0.25, 0.25, 0.25], +# [0.3, 0.2, 0.1, 0.4], +# ]) +# +# all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] +# +# def test_equally_possible(cv_threshold=0.125): +# for pref_rel in all_equally_possible: +# cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) +# print(f"CV: {cv}") +# assert cv < cv_threshold +# +# # Cases with ties - "not all equally possible" +# with_ties_unequal = np.array([ +# [0.25, 0.25, 0.25, 0.25], +# [0.4, 0.3, 0.2, 0.1], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25], +# [0.25, 0.25, 0.25, 0.25] +# ]) +# +# with_ties_all_ab = np.array([ +# [0.3, 0.3, 0.2, 0.2], +# [0.25, 0.25, 0.25, 0.25] +# ]) # all possible (a or b up first is more likely) +# +# with_ties_ab = np.array([ +# [0.3, 0.3, 0.2, 0.2], +# [0.3, 0.3, 0.2, 0.2], +# [0.25, 0.25, 0.25, 0.25] +# ]) # all possible (a or b up first is more likely) +# +# with_ties_unequal = [with_ties_unequal, with_ties_all_ab, with_ties_ab] +# +# def test_with_ties_unequal(): +# for pref_rel in with_ties_unequal: +# cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) +# print(f"CV: {cv}") +# assert cv > 0.125 +# +# # Random matrix +# +# def random_pref_profile(num_agents, num_options): +# rand_matrix = np.random.rand(num_agents, num_options) +# # Normalize the matrix +# matrix_rand = rand_matrix / rand_matrix.sum(axis=1, keepdims=True) +# return matrix_rand +# +# def majority_rule_with_rand_matrix(num_agents, num_options, iterations=1000): +# """ +# Run majority rule with ties multiple times, check winners +# and calculate the coefficient of variation (CV) of the winners. +# :param num_agents: Number of agents. +# :param num_options: Number of options. +# :param iterations: Number of iterations. +# ------- +# :return: Dictionary of winner counts {option: count}. +# """ +# winner_counts = {} +# for _ in range(iterations): +# # Create random matrix +# matrix_rand = random_pref_profile(num_agents, num_options) +# ranking = majority_rule(matrix_rand) +# winner = ranking[0] +# # Count winners +# winner_counts[winner] = winner_counts.get(winner, 0) + 1 +# return winner_counts +# +# +# def test_with_random_matrix_small(): +# """ +# Test majority rule on a small random matrix with many iterations. +# """ +# num_agents = np.random.randint(2, 200) +# # Keep num options small to expect all options to win at least once. +# num_options = np.random.randint(2, 90) +# iterations = 100*num_options +# start_time = time.time() +# wc = majority_rule_with_rand_matrix(num_agents, num_options, iterations) +# stop_time = time.time() +# # Extract winners from winner-counts dictionary and sort them +# sorted_winners = list(wc.keys()) +# sorted_winners.sort() +# assert sorted_winners == list(range(num_options)) +# # Extract count values +# counts = np.array(list(wc.values())) +# # Calculate the coefficient of variation (CV) +# cv = np.std(counts) / np.mean(counts) +# assert cv < 0.125 +# print(f"\nCV: {cv}") +# # Print the time taken +# elapsed_time = stop_time - start_time +# print(f"\nTime taken: {elapsed_time:.2f} sec. On {iterations} iterations." +# f"With {num_agents} agents and {num_options} options.") +# +# +# def test_with_random_matrix_large(): +# """ +# Test majority rule on a large random matrix (many agents, many options). +# """ +# num_its = 100 +# num_agents = np.random.randint(1000, 3000) +# num_options = np.random.randint(2000, 3000) +# # Run majority rule test with random matrix +# start_time = time.time() +# wc = majority_rule_with_rand_matrix(num_agents, num_options, num_its) +# stop_time = time.time() +# # Len of winners should be approximately equal to the number of iterations +# # because with a large number of options, winners should be mostly unique. +# winners, counts = list(wc.keys()), list(wc.values()) +# assert abs(np.mean(counts) - 1) < 0.1 +# assert abs((len(winners) / num_its) - 1) < 0.1 +# # Calculate the coefficient of variation (CV) +# cv = np.std(counts) / np.mean(counts) +# assert cv < 0.2 +# # Print the time taken +# elapsed_time = stop_time - start_time +# print(f"\nTime taken: {elapsed_time:.2f} sec. On {num_its} iterations." +# f"With {num_agents} agents and {num_options} options.") diff --git a/tests/test_distance_functions.py b/tests/test_distance_functions.py index cff76c6..412fcee 100644 --- a/tests/test_distance_functions.py +++ b/tests/test_distance_functions.py @@ -3,6 +3,8 @@ import numpy as np from itertools import combinations +from democracy_sim.participation_model import ParticipationModel + class TestKendallTauDistance(unittest.TestCase): @@ -120,7 +122,7 @@ def test_kendall_tau_on_orderings(self): n = len(seq1) assert n == len(seq2), \ "Test failed: sequences must have the same length" - pairs = combinations(range(0, n), 2) + pairs = list(combinations(range(0, n), 2)) # Test the ordering version d = unnormalized_kendall_tau(np.array(seq1), np.array(seq2), pairs) print(f"Seq1: {seq1}, Seq2: {seq2}, Expected: {expected}, Got: {d}") diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py index 8359a9c..d7c6e3f 100644 --- a/tests/test_participation_area_agent.py +++ b/tests/test_participation_area_agent.py @@ -16,26 +16,27 @@ def setUp(self): self.model = test_model.model def test_update_color_distribution(self): - rand_area = random.sample(self.model.area_scheduler.agents, 1)[0] + rand_area = random.sample(self.model.areas, 1)[0] init_dst = rand_area.color_distribution.copy() print(f"Area {rand_area.unique_id}s initial color dist.: {init_dst}") # Assign new (randomly chosen) cells to the area - all_color_cells = self.model.color_cell_scheduler.agents + all_color_cells = self.model.color_cells rand_area.cells = random.sample(all_color_cells, len(rand_area.cells)) # Run/test the update_color_distribution method rand_area.update_color_distribution() new_dst = rand_area.color_distribution print(f"Area {rand_area.unique_id}s new color distribution: {new_dst}") # Check if the distribution has changed - assert init_dst != new_dst + assert not np.array_equal(init_dst, new_dst), \ + "Error: The color distribution did not change" def test_filter_cells(self): # Get existing area - existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] + existing_area = random.sample(self.model.areas, 1)[0] print(f"The areas color-cells: " f"{[c.unique_id for c in existing_area.cells]}") area_cell_sample = random.sample(existing_area.cells, 4) - other_cells = random.sample(self.model.color_cell_scheduler.agents, 4) + other_cells = random.sample(self.model.color_cells, 4) raw_cell_list = area_cell_sample + other_cells print(f"Cells to be filtered: {[c.unique_id for c in raw_cell_list]}") filtered_cells = existing_area.filter_cells(raw_cell_list) @@ -48,16 +49,25 @@ def test_filter_cells(self): self.assertEqual(area_cell_sample, filtered_cells) def test_conduct_election(self): - area = random.sample(self.model.area_scheduler.agents, 1)[0] - area.conduct_election(majority_rule, spearman) - area.conduct_election(approval_voting, spearman) - area.conduct_election(majority_rule, kendall_tau) - area.conduct_election(approval_voting, kendall_tau) + area = random.sample(self.model.areas, 1)[0] + # Test with majority_rule and spearman + self.model.voting_rule = majority_rule + self.model.distance_func = spearman + area.conduct_election() + # Test with approval_voting and spearman + self.model.voting_rule = approval_voting + area.conduct_election() + # Test with approval_voting and kendall_tau + self.model.distance_func = kendall_tau + area.conduct_election() + # Test with majority_rule and kendall_tau + self.model.voting_rule = majority_rule + area.conduct_election() # TODO def test_estimate_real_distribution(self): # Get any existing area - existing_area = random.sample(self.model.area_scheduler.agents, 1)[0] + existing_area = random.sample(self.model.areas, 1)[0] # Additional area and agent personality = np.zeros(self.model.num_colors) personality[0] = 0.3 @@ -90,7 +100,7 @@ def test_estimate_real_distribution(self): [c.unique_id for c in rel_cells], "with colors", rel_color_vec) est_distribution = a.estimate_real_distribution(test_area) print(f"{a.unique_id}s' estimated color dist is: {est_distribution}") - self.assertEqual(sum(est_distribution), 1.0) + self.assertAlmostEqual(sum(est_distribution), 1.0, places=7) len_colors = self.model.num_colors self.assertEqual(len(est_distribution), len_colors) counts = [rel_color_vec.count(color) for color in range(len_colors)] diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 3d05735..5aa5723 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -1,5 +1,7 @@ import unittest -from democracy_sim.participation_model import ParticipationModel, Area +from democracy_sim.participation_model import (ParticipationModel, Area, + distance_functions, + social_welfare_functions) from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, num_agents, num_colors, num_areas, num_personalities, @@ -31,10 +33,16 @@ def setUp(self): av_area_width=av_area_width, area_size_variance=area_size_variance, patch_power=patch_power, - max_reward=max_reward) + max_reward=max_reward, + show_area_stats=False) + + # def test_empty_model(self): + # # TODO: Test empty model + # model = ParticipationModel(10, 10, 0, 1, 0, 1, 0, 1, 1, 0.1, 1, 0, False, 1, 1, 1, 1, 1, False) + # self.assertEqual(model.num_agents, 0) def test_initialization(self): - areas_count = len([area for area in self.model.area_scheduler.agents + areas_count = len([area for area in self.model.areas if isinstance(area, Area)]) self.assertEqual(areas_count, self.model.num_areas) self.assertIsInstance(self.model.datacollector, mesa.DataCollector) @@ -43,12 +51,10 @@ def test_initialization(self): def test_model_options(self): self.assertEqual(self.model.num_agents, num_agents) self.assertEqual(self.model.num_colors, num_colors) - self.assertEqual(self.model.num_personalities, num_personalities) self.assertEqual(self.model.num_personality_colors, npc) self.assertEqual(self.model.num_areas, num_areas) self.assertEqual(self.model.area_size_variance, area_size_variance) self.assertEqual(self.model.draw_borders, draw_borders) - self.assertEqual(self.model.heterogeneity, heterogeneity) v_rule = social_welfare_functions[rule_idx] dist_func = distance_functions[distance_idx] self.assertEqual(self.model.voting_rule, v_rule) From 76b7114cae86232a7ad89a133ba9f1ecbdb6863c Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 31 Oct 2024 21:43:57 +0100 Subject: [PATCH 26/38] introduced an election_impact_on_mutation factor that steers the impact the elected ordering has on each color mutation --- democracy_sim/model_setup.py | 6 +++ democracy_sim/participation_model.py | 66 ++++++++++++++-------------- tests/test_participation_model.py | 2 + 3 files changed, 42 insertions(+), 32 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 4d2d6a7..2013473 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -17,6 +17,7 @@ ############# election_costs = 5 max_reward = 50 +election_impact_on_mutation = 1.0 # 0.1-5.0 # Voting rules (see social_welfare_functions.py) rule_idx = 0 # Distance functions (see distance_functions.py) @@ -205,6 +206,11 @@ def participation_draw(cell: ColorCell): max_value=election_costs*100, step=1, description="The costs for participating in an election" ), + "election_impact_on_mutation": mesa.visualization.Slider( + name="Election impact on mutation", value=election_impact_on_mutation, + min_value=0.1, max_value=5.0, step=0.1, + description="Factor determining how strong mutation accords to election" + ), "num_agents": mesa.visualization.Slider( name="# Agents", value=num_agents, min_value=10, max_value=99999, step=10 diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 3dadd64..32b2f25 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -30,9 +30,10 @@ def __init__(self, unique_id, model, height, width, size_variance): self._set_dimensions(width, height, size_variance) self.agents = [] self.cells = [] + self.color_probs = model.color_probs # For efficiency self._idx_field = None # An indexing position of the area in the grid self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 - self.voted_distribution = np.zeros(model.num_colors) + self.voted_ordering = np.zeros(model.num_colors) self.voter_turnout = 0 # In percent self.dist_to_reality = None # Elected vs. actual color distribution @@ -128,19 +129,6 @@ def add_agent(self, agent): def add_cell(self, cell): self.cells.append(cell) - def curr_norm_dist(self): - """ - This method calculates the current distance of the area's real color - distribution (as ordering) - to the latest voted distribution ordering. - It uses the models distance function. - """ - real_color_ord = np.argsort(self.color_distribution) - voted_color_ord = self.voted_distribution - dist_to_reality = self.model.distance_func(real_color_ord, - voted_color_ord, - self.model.color_search_pairs) - return dist_to_reality def conduct_election(self): """ @@ -148,16 +136,17 @@ def conduct_election(self): the election in the area as well as handling the payments and rewards. :return voter_turnout: The percentage of agents that participated. """ + model = self.model + el_costs = model.election_costs + dist_func = model.distance_func # Ask agents to participate - # TODO: WHERE to discretize if needed? preference_profile = [] - # TODO: change the color pattern to an even less guessable form(?) for agent in self.agents: - if (agent.assets >= self.model.election_costs + if (agent.assets >= el_costs and agent.ask_for_participation(area=self)): agent.num_elections_participated += 1 # Collect the participation fee from the agents - agent.assets = agent.assets - self.model.election_costs + agent.assets = agent.assets - el_costs # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) preference_profile = np.array(preference_profile) @@ -166,20 +155,21 @@ def conduct_election(self): print("Area", self.unique_id, "no one participated in the election") return 0 # TODO: What to do in this case? Cease the simulation? # Aggregate the prefs using the v-rule ⇒ returns an option ordering - print("### Area", self, "\nv-rule:", self.model.voting_rule, "dist-func:", self.model.distance_func) - aggregated = self.model.voting_rule(preference_profile) - # Save the "elected" distribution in self.voted_distribution + aggregated = model.voting_rule(preference_profile) + # Save the "elected" ordering in self.voted_ordering winning_option = aggregated[0] - self.voted_distribution = self.model.options[winning_option] + self.voted_ordering = model.options[winning_option] # Calculate the distance to the real distribution using distance_func - self.dist_to_reality = self.curr_norm_dist() + real_color_ord = np.argsort(self.color_distribution) + self.dist_to_reality = dist_func(real_color_ord, self.voted_ordering, + model.color_search_pairs) # Calculate the rewards per agent - reward_pa = (1 - self.dist_to_reality) * self.model.max_reward + reward_pa = (1 - self.dist_to_reality) * model.max_reward # Distribute the two types of rewards + color_search_pairs = model.color_search_pairs for agent in self.agents: - # Personality-based reward - # TODO: # NOW Calculate value p\in(0,1) based on how well the consensus fits the personality of the agent (should better be fast) - p = self.random.uniform(0, 1) + # Personality-based reward factor + p = dist_func(agent.personality, real_color_ord, color_search_pairs) # + Common reward (reward_pa) for all agents agent.assets = agent.assets + p * reward_pa + reward_pa # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process @@ -226,13 +216,13 @@ def step(self) -> None: # TODO/Idea: What if the voter_turnout determines the mutation rate? # randomly select x cells cells_to_mutate = self.random.sample(self.cells, n_to_mutate) - # Use the normalized voted distribution as probabilities for the colors - probs = self.voted_distribution / self.voted_distribution.sum() - # Pre-select colors for all cells to mutate + # Use voted ordering to pick colors in descending order + # To pre-select colors for all cells to mutate # TODO: Think about this: should we take local color-structure # into account - like in color patches - to avoid colors mutating into # very random structures? # Middendorf - colors = np.random.choice(self.model.colors, size=n_to_mutate, p=probs) + colors = np.random.choice(self.voted_ordering, size=n_to_mutate, + p=self.color_probs) # Assign the newly selected colors to the cells for cell, color in zip(cells_to_mutate, colors): cell.color = color @@ -388,7 +378,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - num_personality_colors, + num_personality_colors, election_impact_on_mutation, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -418,6 +408,8 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.options = create_all_options(num_colors) # Simulation variables # TODO self.mu # Mutation rate for the color cells + # Election impact factor on color mutation through a probability array + self.color_probs = self.init_color_probs(election_impact_on_mutation) # Create search pairs once for faster iterations when comparing rankings self.search_pairs = list(combinations(range(0, self.options.size), 2)) # TODO check if correct! self.option_vec = np.arange(self.options.size) # Also to speed up @@ -507,6 +499,16 @@ def initialize_voting_agents(self): cell = cast(ColorCell, cell) cell.add_agent(agent) + def init_color_probs(self, election_impact): + """ + This method initializes a probability array for the mutation of colors. + The probabilities reflect the election outcome with some impact factor. + :param election_impact: The impact the election has on the mutation. + """ + p = (np.arange(self.num_colors, 0, -1)) ** election_impact + # Normalize + p = p / sum(p) + return p def initialize_area(self, a_id: int, x_coord, y_coord): """ diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 5aa5723..6ff15b3 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -6,6 +6,7 @@ num_agents, num_colors, num_areas, num_personalities, num_personality_colors as npc, + election_impact_on_mutation as e_impact, draw_borders, rule_idx, distance_idx, color_heterogeneity as heterogeneity, color_patches_steps, av_area_height, @@ -22,6 +23,7 @@ def setUp(self): num_colors=num_colors, num_personalities=num_personalities, num_personality_colors=npc, + election_impact_on_mutation=e_impact, num_areas=num_areas, draw_borders=draw_borders, election_costs=election_costs, From 87383b055e9b27c8cb38c29aa183ea03e6824d59 Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 1 Nov 2024 17:12:05 +0100 Subject: [PATCH 27/38] introduced a mutation rate variable mu to the system --- democracy_sim/model_setup.py | 5 +++++ democracy_sim/participation_agent.py | 1 - democracy_sim/participation_model.py | 9 ++++----- tests/test_participation_model.py | 4 ++-- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 2013473..dfb7e1c 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -18,6 +18,7 @@ election_costs = 5 max_reward = 50 election_impact_on_mutation = 1.0 # 0.1-5.0 +mu = 0.01 # 0.001-0.5 # Voting rules (see social_welfare_functions.py) rule_idx = 0 # Distance functions (see distance_functions.py) @@ -206,6 +207,10 @@ def participation_draw(cell: ColorCell): max_value=election_costs*100, step=1, description="The costs for participating in an election" ), + "mu": mesa.visualization.Slider( + name="Mutation rate", value=mu, min_value=0.001, max_value=0.5, + step=0.001, description="Probability of a color cell to mutate" + ), "election_impact_on_mutation": mesa.visualization.Slider( name="Election impact on mutation", value=election_impact_on_mutation, min_value=0.1, max_value=5.0, step=0.1, diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 05797f1..1336f7e 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -172,7 +172,6 @@ def vote(self, area): dist_func = self.model.distance_func ranking = np.zeros(options.shape[0]) color_search_pairs = self.model.color_search_pairs - print(color_search_pairs, "dist_func", dist_func, "voting_rule", self.model.voting_rule) for i, option in enumerate(options): # TODO: is it possible to leave out white? ranking[i] = dist_func(self.personality, option, color_search_pairs) diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 32b2f25..56bbd9f 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -30,7 +30,6 @@ def __init__(self, unique_id, model, height, width, size_variance): self._set_dimensions(width, height, size_variance) self.agents = [] self.cells = [] - self.color_probs = model.color_probs # For efficiency self._idx_field = None # An indexing position of the area in the grid self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 self.voted_ordering = np.zeros(model.num_colors) @@ -212,7 +211,7 @@ def step(self) -> None: return # TODO: What to do if no agent participated..? # Mutate colors in cells # Take some number of cells to mutate (i.e. 5 %) - n_to_mutate = int(0.05 * self.num_cells) # TODO create a self.model.mu variable as mutation rate + n_to_mutate = int(self.model.mu * self.num_cells) # TODO/Idea: What if the voter_turnout determines the mutation rate? # randomly select x cells cells_to_mutate = self.random.sample(self.cells, n_to_mutate) @@ -222,7 +221,7 @@ def step(self) -> None: # into account - like in color patches - to avoid colors mutating into # very random structures? # Middendorf colors = np.random.choice(self.voted_ordering, size=n_to_mutate, - p=self.color_probs) + p=self.model.color_probs) # Assign the newly selected colors to the cells for cell, color in zip(cells_to_mutate, colors): cell.color = color @@ -378,7 +377,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - num_personality_colors, election_impact_on_mutation, + num_personality_colors, mu, election_impact_on_mutation, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -407,7 +406,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.distance_func = distance_functions[distance_idx] self.options = create_all_options(num_colors) # Simulation variables - # TODO self.mu # Mutation rate for the color cells + self.mu = mu # Mutation rate for the color cells (0.1 = 10 % mutate) # Election impact factor on color mutation through a probability array self.color_probs = self.init_color_probs(election_impact_on_mutation) # Create search pairs once for faster iterations when comparing rankings diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 6ff15b3..6f857b4 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -5,7 +5,7 @@ from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, num_agents, num_colors, num_areas, num_personalities, - num_personality_colors as npc, + num_personality_colors as npc, mu, election_impact_on_mutation as e_impact, draw_borders, rule_idx, distance_idx, color_heterogeneity as heterogeneity, @@ -22,7 +22,7 @@ def setUp(self): num_agents=num_agents, num_colors=num_colors, num_personalities=num_personalities, - num_personality_colors=npc, + num_personality_colors=npc, mu=mu, election_impact_on_mutation=e_impact, num_areas=num_areas, draw_borders=draw_borders, From 75c1ea0fdd3dfc2aede4c0fd836324b47dec99b6 Mon Sep 17 00:00:00 2001 From: jurikane Date: Wed, 6 Nov 2024 13:10:29 +0100 Subject: [PATCH 28/38] started to implement a normal-distribution among agent personalities --- democracy_sim/model_setup.py | 9 +++++++++ democracy_sim/participation_model.py | 25 ++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index dfb7e1c..3bde8c6 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -233,6 +233,15 @@ def participation_draw(cell: ColorCell): value=num_personality_colors, min_value=1, max_value=num_colors-1, step=1 ), + "pers_mean": mesa.visualization.Slider( + name="Personality mean", value=0.5, min_value=0.0, max_value=1.0, + step=0.1, description="The mean of the personality distribution" + ), + "pers_std_dev": mesa.visualization.Slider( + name="Personality standard deviation", value=0.1, min_value=0.0, + max_value=1.0, step=0.1, + description="The standard deviation of the personality distribution" + ), "color_patches_steps": mesa.visualization.Slider( name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 56bbd9f..fafc566 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -377,7 +377,8 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - num_personality_colors, mu, election_impact_on_mutation, + num_personality_colors, pers_mean, pers_std_dev, + mu, election_impact_on_mutation, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -421,6 +422,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities(num_personalities) + self.personality_distribution = self.pers_dist(pers_mean, pers_std_dev) self.initialize_voting_agents() # Area variables self.global_area = self.initialize_global_area() # TODO create bool variable to make this optional @@ -483,11 +485,12 @@ def initialize_voting_agents(self): It also ensures that each agent is assigned to the color cell it is standing on. """ + dist = self.personality_distribution for a_id in range(self.num_agents): # Get a random position x = self.random.randrange(self.width) y = self.random.randrange(self.height) - personality = self.random.choice(self.personalities) + personality = self.random.choice(self.personalities, p=dist) # Create agent without appending (add to the pre-defined list) agent = VoteAgent(a_id, self, (x, y), personality, assets=5, add=False) # TODO: initial assets?! @@ -579,7 +582,9 @@ def create_personalities(self, n): """ #p_colors = range(1, self.num_colors) # Personalities exclude white # TODO is it possible to leave out white (--> dist-func)? - p_colors = range(self.num_colors) + p_colors = list(range(self.num_colors)) + # Make sure the personalities aren't always in the same order + self.random.shuffle(p_colors) # Not actually needed but just in case personality_options = np.array(list(itertools.permutations(p_colors))) if len(personality_options) < n: @@ -589,6 +594,20 @@ def create_personalities(self, n): selected_personalities = personality_options[indices] return selected_personalities + def pers_dist(self, mean, std_dev): + """ + This method creates a normalized normal distribution array for picking + and depicting the distribution of personalities in the model. + :param mean: The mean value of the normal distribution. + :param std_dev: The standard deviation of the normal distribution. + :return: A normalized normal distribution array. + """ + # Generate a normal distribution + dist = np.random.normal(mean, std_dev, len(self.personalities)) + dist = np.abs(dist) # Ensure non-negative values + dist /= dist.sum() # Normalize + return dist + def initialize_datacollector(self): color_data = {f"Color {i}": get_color_distribution_function(i) for i in From caf72a6a7231c04215056fa8676a15f850e7617c Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 28 Nov 2024 11:22:44 +0100 Subject: [PATCH 29/38] Fix major confusion in ranking logic in social welfare functions AND added better and more statistical overviews --- democracy_sim/model_setup.py | 57 +++------- democracy_sim/participation_model.py | 51 +++++---- democracy_sim/run.py | 6 +- democracy_sim/social_welfare_functions.py | 66 +++++++++--- democracy_sim/visualisation_elements.py | 124 ++++++++++++++++------ tests/test_majority_rule.py | 41 +++---- tests/test_participation_model.py | 22 ++++ tests/test_pers_dist.py | 39 +++++++ 8 files changed, 278 insertions(+), 128 deletions(-) create mode 100644 tests/test_pers_dist.py diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 3bde8c6..95f5ecd 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -7,7 +7,7 @@ from democracy_sim.participation_model import (ParticipationModel, distance_functions, social_welfare_functions) -from math import comb +from math import factorial import mesa # Parameters @@ -22,7 +22,7 @@ # Voting rules (see social_welfare_functions.py) rule_idx = 0 # Distance functions (see distance_functions.py) -distance_idx = 0 +distance_idx = 1 #################### # Model parameters # #################### @@ -33,8 +33,8 @@ patch_power = 1.0 color_heterogeneity = 0.3 # Voting Agents -num_personality_colors = 2 -num_personalities = comb(num_colors, num_personality_colors) +num_personality_colors = 4 # TODO: does this make sense when we have to use orderings anyways? +num_personalities = 10 # Grid grid_rows = 100 # height grid_cols = 80 # width @@ -43,16 +43,19 @@ canvas_width = grid_cols * cell_size draw_borders = True # Voting Areas -num_areas = 16 -av_area_height = 25 -# area_height = grid_rows // int(sqrt(num_areas)) -av_area_width = 20 -# area_width = grid_cols // int(sqrt(num_areas)) +# num_areas = 16 +# av_area_height = 25 +# # area_height = grid_rows // int(sqrt(num_areas)) +# av_area_width = 20 +# # area_width = grid_cols // int(sqrt(num_areas)) +num_areas = 4 +av_area_height = 50 +av_area_width = 40 area_size_variance = 0.0 ######################## # Statistics and Views # ######################## -show_area_stats = False +show_area_stats = True _COLORS = [ @@ -137,27 +140,6 @@ def participation_draw(cell: ColorCell): ) -# # Draw bars (Test) -# def draw_color_dist_bars(color_distributions): -# # Setup plot -# fig, ax = plt.subplots() -# for i, dist in enumerate(color_distributions): -# bottom = 0 -# for j, part in enumerate(color_distributions): -# ax.bar(i, part, bottom=bottom, color=_COLORS[j % len(_COLORS)]) -# bottom += part -# # Set x-ticks to be distribution indices -# plt.xticks(range(len(color_distributions))) -# plt.show() -# -# -# def plot_color_distribution(model, ax): -# agent_df = model.datacollector.get_agent_vars_dataframe() -# color_distributions = agent_df.groupby('Step')['Color Distribution'].apply(list).tolist() -# sns.barplot(data=color_distributions, ax=ax) -# ax.set_title('Color Distribution Over Time') - - wealth_chart = mesa.visualization.modules.ChartModule( [{"Label": "Collective assets", "Color": "Black"}], data_collector_name='datacollector' @@ -195,7 +177,7 @@ def participation_draw(cell: ColorCell): value=rule_idx, min_value=0, max_value=len(social_welfare_functions)-1, ), "distance_idx": mesa.visualization.Slider( - name=f"Rule index {[f.__name__ for f in distance_functions]}", + name=f"Dist-Function index {[f.__name__ for f in distance_functions]}", value=distance_idx, min_value=0, max_value=len(distance_functions)-1, ), "election_costs": mesa.visualization.Slider( @@ -226,22 +208,13 @@ def participation_draw(cell: ColorCell): ), "num_personalities": mesa.visualization.Slider( name="# of different personalities", value=num_personalities, - min_value=1, max_value=comb(num_colors, num_personality_colors), step=1 + min_value=1, max_value=factorial(num_personality_colors), step=1 ), "num_personality_colors": mesa.visualization.Slider( name="# colors determining the personality", value=num_personality_colors, min_value=1, max_value=num_colors-1, step=1 ), - "pers_mean": mesa.visualization.Slider( - name="Personality mean", value=0.5, min_value=0.0, max_value=1.0, - step=0.1, description="The mean of the personality distribution" - ), - "pers_std_dev": mesa.visualization.Slider( - name="Personality standard deviation", value=0.1, min_value=0.0, - max_value=1.0, step=0.1, - description="The standard deviation of the personality distribution" - ), "color_patches_steps": mesa.visualization.Slider( name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index fafc566..8d9ede6 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -138,7 +138,7 @@ def conduct_election(self): model = self.model el_costs = model.election_costs dist_func = model.distance_func - # Ask agents to participate + # Ask agents for participation preference_profile = [] for agent in self.agents: if (agent.assets >= el_costs @@ -148,6 +148,8 @@ def conduct_election(self): agent.assets = agent.assets - el_costs # Ask participating agents for their prefs preference_profile.append(agent.vote(area=self)) + # agent.vote returns an array containing dissatisfaction values + # between 0 and 1 for each option, interpretable as rank values. preference_profile = np.array(preference_profile) # Check for the case that no agent participated if preference_profile.ndim != 2: @@ -159,7 +161,7 @@ def conduct_election(self): winning_option = aggregated[0] self.voted_ordering = model.options[winning_option] # Calculate the distance to the real distribution using distance_func - real_color_ord = np.argsort(self.color_distribution) + real_color_ord = np.argsort(self.color_distribution)[::-1] # Descending self.dist_to_reality = dist_func(real_color_ord, self.voted_ordering, model.color_search_pairs) # Calculate the rewards per agent @@ -206,9 +208,10 @@ def step(self) -> None: mutate the cells' colors according to the election outcome and update the color distribution of the area. """ - self.voter_turnout = self.conduct_election() + self.voter_turnout = self.conduct_election() # The main election logic! if self.voter_turnout == 0: return # TODO: What to do if no agent participated..? + # Mutate colors in cells # Take some number of cells to mutate (i.e. 5 %) n_to_mutate = int(self.model.mu * self.num_cells) @@ -247,6 +250,8 @@ def compute_gini_index(model): # Calculate the Gini Index cumulative_sum = sum((i + 1) * sorted_assets[i] for i in range(n)) total_sum = sum(sorted_assets) + if total_sum == 0: + return 0 # No agent has any assets => view as total equality gini_index = (2 * cumulative_sum) / (n * total_sum) - (n + 1) / n return int(gini_index * 100) # Return in "percent" (0-100) @@ -300,7 +305,7 @@ def create_all_options(n, include_ties=False): def create_personality(num_colors, num_personality_colors): - """ + """ NOT USED Creates and returns a list of 'personalities' that are to be assigned to agents. Each personality is a NumPy array of length 'num_colors' but it is not a full ranking vector since the number of colors influencing @@ -338,7 +343,7 @@ def get_area_voter_turnout(area): return area.voter_turnout return None -def get_area_closeness_to_reality(area): +def get_area_dist_to_reality(area): if isinstance(area, Area): return area.dist_to_reality return None @@ -348,6 +353,11 @@ def get_area_color_distribution(area): return area.color_distribution.tolist() return None +def get_election_results(area): + if isinstance(area, Area): + return area.voted_ordering.tolist() + return None + # def get_area_personality_based_reward(area): # # Assuming you have a method to calculate this in the Area class # return area.calculate_personality_reward() @@ -377,8 +387,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - num_personality_colors, pers_mean, pers_std_dev, - mu, election_impact_on_mutation, + num_personality_colors, mu, election_impact_on_mutation, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -422,7 +431,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities(num_personalities) - self.personality_distribution = self.pers_dist(pers_mean, pers_std_dev) + self.personality_distribution = self.pers_dist(num_personalities) self.initialize_voting_agents() # Area variables self.global_area = self.initialize_global_area() # TODO create bool variable to make this optional @@ -486,11 +495,12 @@ def initialize_voting_agents(self): standing on. """ dist = self.personality_distribution + rng = np.random.default_rng() for a_id in range(self.num_agents): # Get a random position x = self.random.randrange(self.width) y = self.random.randrange(self.height) - personality = self.random.choice(self.personalities, p=dist) + personality = rng.choice(self.personalities, p=dist) # Create agent without appending (add to the pre-defined list) agent = VoteAgent(a_id, self, (x, y), personality, assets=5, add=False) # TODO: initial assets?! @@ -591,21 +601,25 @@ def create_personalities(self, n): raise ValueError("Not enough unique personality options available.") indices = np.random.choice(len(personality_options), n, replace=False) - selected_personalities = personality_options[indices] + selected_personalities = personality_options[indices].copy() + + del personality_options # Free up memory (variable may be very large) return selected_personalities - def pers_dist(self, mean, std_dev): + def pers_dist(self, size): """ This method creates a normalized normal distribution array for picking and depicting the distribution of personalities in the model. - :param mean: The mean value of the normal distribution. - :param std_dev: The standard deviation of the normal distribution. - :return: A normalized normal distribution array. + :param size: The mean value of the normal distribution. + :return: A normalized (sum is one) array mimicking a gaussian curve. """ # Generate a normal distribution - dist = np.random.normal(mean, std_dev, len(self.personalities)) - dist = np.abs(dist) # Ensure non-negative values - dist /= dist.sum() # Normalize + rng = np.random.default_rng() + dist = rng.normal(0, 1, size) + dist.sort() # To create a gaussian curve like array + dist = np.abs(dist) # Flip negative values "up" + # Normalize the distribution to sum to one + dist /= dist.sum() return dist @@ -625,8 +639,9 @@ def initialize_datacollector(self): # #"VoterTurnout": lambda a: a.voter_turnout if isinstance(a, Area) else None, "VoterTurnout": get_area_voter_turnout, - "Closeness to Reality": get_area_closeness_to_reality, + "DistToReality": get_area_dist_to_reality, "ColorDistribution": get_area_color_distribution, + "ElectionResults": get_election_results, # "Personality-Based Reward": get_area_personality_based_reward, # "Gini Index": get_area_gini_index }, diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 517d2b4..91b8546 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -24,14 +24,18 @@ def reset_model(self): color_distribution_element = ColorDistributionElement() +personality_distribution = PersonalityDistribution() +election_results = ElectionResultsElement() steps_text = StepsTextElement() vto_areas = VoterTurnoutElement() server = CustomModularServer( model_cls=ParticipationModel, visualization_elements=[canvas_element, color_distribution_chart, + personality_distribution, wealth_chart, voter_turnout, vto_areas, - color_distribution_element, steps_text], + color_distribution_element, election_results, + steps_text], name="DemocracySim", model_params=params, ) diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index d325922..9dc5112 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -33,7 +33,7 @@ def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): :param pref_table: The agent's preferences. :param noise_factor: Influences the amount of noise to be added to the preference table to break ties (higher is less noise). - :return: The preference table without any ties. + :return: The preference table without ties for first choices. """ # Add some random noise to break ties (based on the variances) variances = np.var(pref_table, axis=1) @@ -44,18 +44,18 @@ def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): pref_tab_var_non_zero = pref_table[~mask] n, m = pref_tab_var_non_zero.shape - # Set all values in the var_zero_part to zero and then add a random 1 - pref_tab_var_zero.fill(0) + # Set exactly one option to 0 (the first choice) and the rest to 1/(m-1) + pref_tab_var_zero.fill(1 / (m-1)) for i in range(pref_tab_var_zero.shape[0]): rand_option = np.random.randint(0, m) - pref_tab_var_zero[i, rand_option] = 1 + pref_tab_var_zero[i, rand_option] = 0 # On the non-zero part, add some noise to the values to break ties non_zero_variances = variances[~mask] # Generate noise based on the variances noise_eps = non_zero_variances / noise_factor noise = np.random.uniform(-noise_eps[:, np.newaxis], noise_eps[:, np.newaxis], (n, m)) - # noise_eps[:, np.newaxis] reshapes noise_eps from shape (n,) to (n, 1) + # The noise_eps[:, np.newaxis] reshapes noise_eps from shape (n,) to (n, 1) pref_tab_var_non_zero += noise # Put the parts back together @@ -66,15 +66,16 @@ def majority_rule(pref_table): This function implements the majority rule social welfare function. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). - :param pref_table: The agent's preferences as a NumPy matrix + :param pref_table: The agent's preferences (disagreement) as a NumPy matrix containing the normalized ranking vectors of all agents. + The logic: lower rank = lower disagreement :return: The resulting preference ranking (beware: its not a pref. relation) """ n, m = pref_table.shape # n agents, m options # Break ties if they exist pref_table = run_tie_breaking_preparation_for_majority(pref_table) - # Count how often an option is ranked first (indexes of max values) - first_choices = np.argmax(pref_table, axis=1) + # Count how often an option is ranked first (indexes of the min values) + first_choices = np.argmin(pref_table, axis=1) # To avoid bias toward voters of low indices in the counting, we shuffle np.random.shuffle(first_choices) # (crucial when counting shows ties later) first_choice_counts = {} @@ -98,7 +99,8 @@ def majority_rule(pref_table): def preprocessing_for_approval(pref_table, threshold=None): """ This function prepares the preference table for approval voting - by interpreting evey value above the threshold as an approval. + by interpreting every value below a threshold as an approval. + Beware: the values are distance/disagreement => smaller = less disagreement The standard threshold is 1/m (m = number of options). The reasoning is that if the preferences are normalized, 1/m ensures the threshold to be proportionate to the number of options. @@ -111,25 +113,59 @@ def preprocessing_for_approval(pref_table, threshold=None): """ if threshold is None: threshold = 1 / pref_table.shape[1] - return (pref_table >= threshold).astype(int) + return (pref_table < threshold).astype(int) -def approval_voting(pref_table): +def imp_prepr_for_approval(pref_table, threshold=None): + """ + This is just like preprocessing_for_approval, but more intelligent. + It sets the threshold depending on the variances. + :param pref_table: The agent's preferences. + Beware: the values are disagreements => smaller = less disagreement + :param threshold: Will be overwritten (will be set according to variances). + :return: The preference table with the options approved or not. """ + threshold = np.var(pref_table, axis=0) + return (pref_table <= threshold).astype(int) + + +def approval_voting(pref_table): + """ TODO: does this take the meaning of the values into account? value = dist. = disagreement ! This function implements the approval voting social welfare function. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). - :param pref_table: The agent's preferences as a NumPy matrix + :param pref_table: The agent's preferences (disagreement) as a NumPy matrix containing the normalized ranking vectors of all agents. + The logic: lower rank = lower disagreement :return: The resulting preference ranking (beware: not a pref. relation). """ - pref_table = preprocessing_for_approval(pref_table) + pref_table = imp_prepr_for_approval(pref_table) # Count how often each option is approved approval_counts = np.sum(pref_table, axis=0) # Add noise to break ties TODO check for bias - noise = np.random.uniform(-0.3, 0.3, len(approval_counts)) + eps = 1e-4 + noise = np.random.uniform(-eps, eps, len(approval_counts)) #option_count_pairs = list(enumerate(approval_counts + noise)) #option_count_pairs.sort(key=lambda x: x[1], reverse=True) #return [pair[0] for pair in option_count_pairs] - return np.argsort(-(approval_counts + noise)) + return np.argsort(-(approval_counts + noise)) # TODO: check order (ascending/descending) - np.argsort sorts ascending + +def continuous_score_voting(pref_table): + """ + TODO: integrade and test + This function implements a continuous score voting based on disagreement. + Beware: Input is a preference table (values define a ranking, index=option), + but the output is a ranking/an ordering (values represent options). + :param pref_table: The agent's preferences (disagreement) as a NumPy matrix + containing the normalized ranking vectors of all agents. + The logic: lower rank = lower disagreement + :return: The resulting preference ranking (beware: not a pref. relation). + """ + # Sum up the disagreement for each option + scores = np.sum(pref_table, axis=0) + # Add noise to break ties + eps = 1e-8 + noise = np.random.uniform(-eps, eps, len(scores)) + ranking = np.argsort(-(scores + noise)) + return ranking diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index 810332a..a48c27e 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -1,5 +1,7 @@ import matplotlib.pyplot as plt +from typing import TYPE_CHECKING, cast from mesa.visualization import TextElement +import matplotlib.patches as patches from model_setup import _COLORS import base64 import math @@ -27,17 +29,18 @@ def render(self, model): # Fetch data from the datacollector data = model.datacollector.get_agent_vars_dataframe() color_distribution = data['ColorDistribution'].dropna() + dist_to_reality = data['DistToReality'].dropna() - # Extract unique area IDs - area_ids = color_distribution.index.get_level_values(1).unique() + # Extract unique area IDs (excluding the global area) + area_ids = color_distribution.index.get_level_values(1).unique()[1:] num_colors = len(color_distribution.iloc[0]) # Create subplots within a single figure - num_areas = len(area_ids) + num_areas = len(area_ids) - 1 # Exclude the global area num_cols = math.ceil(math.sqrt(num_areas)) num_rows = math.ceil(num_areas / num_cols) fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, - figsize=(20, 20), sharex=True) + figsize=(8, 8), sharex=True) for ax, area_id in zip(axes.flatten(), area_ids): area_data = color_distribution.xs(area_id, level=1) @@ -45,6 +48,9 @@ def render(self, model): color_data = area_data.apply(lambda x: x[color_idx]) ax.plot(color_data.index, color_data.values, label=f'Color {color_idx}', color=_COLORS[color_idx]) + area_data = dist_to_reality.xs(area_id, level=1) + ax.plot(area_data.index, area_data.values, + label=f'Distance to reality', color='Black') ax.set_title(f'Area {area_id}') ax.set_xlabel('Step') ax.set_ylabel('Color Distribution') @@ -54,33 +60,87 @@ def render(self, model): return save_plot_to_base64(fig) -# class VoterTurnoutElement(TextElement): -# def render(self, model): -# # Only render if show_area_stats is enabled -# step = model.scheduler.steps -# if not model.show_area_stats or step == 0 or step % 10 != 0: -# return "" -# # Fetch data from the datacollector -# data = model.datacollector.get_agent_vars_dataframe() -# voter_turnout = data['VoterTurnout'].dropna() -# -# # Extract unique area IDs -# area_ids = voter_turnout.index.get_level_values(1).unique() -# -# # Create subplots within a single figure -# num_areas = len(area_ids) -# fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(20, 20), sharex=True) -# -# for ax, area_id in zip(axes.flatten(), area_ids): -# area_data = voter_turnout.xs(area_id, level=1) -# ax.plot(area_data.index, area_data.values, label=f'Area {area_id}') -# ax.set_title(f'Area {area_id}') -# ax.set_xlabel('Step') -# ax.set_ylabel('Voter Turnout (%)') -# ax.legend() -# -# plt.tight_layout() -# return save_plot_to_base64(fig) +class ElectionResultsElement(TextElement): + def render(self, model): + # TODO: put together with ColorDistributionElement to create both at once + # Only render if show_area_stats is enabled + step = model.scheduler.steps + if not model.show_area_stats or step == 0: + return "" + + # Fetch data from the datacollector + data = model.datacollector.get_agent_vars_dataframe() + election_results = data['ElectionResults'].dropna() + + # Extract unique area IDs + area_ids = election_results.index.get_level_values(1).unique()[1:] + num_colors = len(election_results.iloc[0]) + + # Create subplots within a single figure + num_areas = len(area_ids) - 1 # Exclude the global area + num_cols = math.ceil(math.sqrt(num_areas)) + num_rows = math.ceil(num_areas / num_cols) + fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(8, 8), sharex=True) + + for ax, area_id in zip(axes.flatten(), area_ids): + area_data = election_results.xs(area_id, level=1) + for color_idx in range(num_colors): + color_data = area_data.apply(lambda x: x[color_idx]) + ax.plot(color_data.index, color_data.values, + label=f'Color {color_idx}', color=_COLORS[color_idx]) + ax.set_title(f'Area {area_id}') + ax.set_xlabel('Step') + ax.set_ylabel('Election Results') + + plt.tight_layout() + return save_plot_to_base64(fig) + + +class PersonalityDistribution(TextElement): + + def __init__(self): + super().__init__() + self.personality_distribution = None + self.pers_dist_plot = None + + def create_once(self, model): + if TYPE_CHECKING: + model = cast('ParticipationModel', model) + # Fetch data + dists = model.personality_distribution + personalities = model.personalities + num_personalities = personalities.shape[0] + num_agents = model.num_agents + colors = _COLORS[:model.num_colors] + + + fig, ax = plt.subplots(figsize=(6, 4)) + heights = dists * num_agents + bars = ax.bar(range(num_personalities), heights, color='skyblue') + + for bar, personality in zip(bars, personalities): + height = bar.get_height() + width = bar.get_width() + + for i, c in enumerate(personality): + coords = (bar.get_x() + width / 2 - 0.4 + i * 0.2, height) + rect = patches.Rectangle(coords, 0.2, 5, color=colors[c]) + ax.add_patch(rect) + + ax.set_xlabel('Personality ID') + ax.set_ylabel('Number of Agents') + ax.set_title('Distribution of Personalities among Agents') + + plt.tight_layout() + self.pers_dist_plot = save_plot_to_base64(fig) + + def render(self, model): + # Only create a new plot at the start of a simulation + if model.scheduler.steps == 0: + self.create_once(model) + return self.pers_dist_plot + class VoterTurnoutElement(TextElement): def render(self, model): @@ -96,7 +156,7 @@ def render(self, model): area_ids = voter_turnout.index.get_level_values(1).unique() # Create a single plot - fig, ax = plt.subplots(figsize=(10, 6)) + fig, ax = plt.subplots(figsize=(8, 6)) for area_id in area_ids: area_data = voter_turnout.xs(area_id, level=1) diff --git a/tests/test_majority_rule.py b/tests/test_majority_rule.py index 7cbcba3..5a3d31d 100644 --- a/tests/test_majority_rule.py +++ b/tests/test_majority_rule.py @@ -2,15 +2,15 @@ import time from democracy_sim.social_welfare_functions import majority_rule -# Simple and standard cases +# Simple and standard cases (lower values = higher rank) simple = np.array([ - [0.5, 0.4, 0.1], - [0.1, 0.5, 0.4], - [0.4, 0.5, 0.1], - [0.1, 0.4, 0.5], [0.1, 0.4, 0.5], - [0.1, 0.4, 0.5] + [0.5, 0.1, 0.4], + [0.4, 0.1, 0.5], + [0.5, 0.4, 0.1], + [0.5, 0.4, 0.1], + [0.5, 0.4, 0.1] ]) # => c, b, a ~ 2, 1, 0 # Following "paradoxical" example is taken from @@ -26,23 +26,24 @@ paradoxical = np.array([ # 5 times a,c,b,d,e --> 0.4, 0.2, 0.3, 0.1, 0. - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], - [0.4, 0.2, 0.3, 0.1, 0. ], + # 5 times a,c,b,d,e --> 0., 0.2, 0.1, 0.3, 0.4 + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], + [0. , 0.2, 0.1, 0.3, 0.4], # 4 times e,b,c,d,a - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], - [0. , 0.3, 0.2, 0.1, 0.4], + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], + [0.4, 0.1, 0.2, 0.3, 0. ], # 3 times d,c,b,e,a - [0. , 0.2, 0.3, 0.4, 0.1], - [0. , 0.2, 0.3, 0.4, 0.1], - [0. , 0.2, 0.3, 0.4, 0.1], + [0.4, 0.2, 0.1, 0. , 0.3], + [0.4, 0.2, 0.1, 0. , 0.3], + [0.4, 0.2, 0.1, 0. , 0.3], # 2 times b,d,e,c,a - [0. , 0.4, 0.1, 0.3, 0.2], - [0. , 0.4, 0.1, 0.3, 0.2] + [0.4, 0. , 0.3, 0.1, 0.2], + [0.4, 0. , 0.3, 0.1, 0.2] ]) # Plurality => a, e, d, b, c ~ 0, 4, 3, 1, 2 majority_simple_cases = [ diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 6f857b4..1535c22 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -75,6 +75,28 @@ def test_create_color_distribution(self): assert mid_dst != eq_dst assert het_dst != mid_dst + def test_distribution_of_personalities(self): + p_dist = self.model.personality_distribution + self.assertAlmostEqual(sum(p_dist), 1.0) + self.assertEqual(len(p_dist), num_personalities) + voting_agents = self.model.voting_agents + num_agnts = self.model.num_agents + personalities = list(self.model.personalities) + p_counts = {str(i): 0 for i in personalities} + # Count the occurrence of each personality + for agent in voting_agents: + p_counts[str(agent.personality)] += 1 + # Normalize the counts to get the real personality distribution + real_dist = [p_counts[str(p)] / num_agnts for p in personalities] + # Simple tests + self.assertEqual(len(real_dist), len(p_dist)) + self.assertAlmostEqual(float(sum(real_dist)), 1.0) + # Compare each value + my_delta = 0.4 / num_personalities # The more personalities, the smaller the delta + for p_dist_val, real_p_dist_val in zip(p_dist, real_dist): + self.assertAlmostEqual(p_dist_val, real_p_dist_val, delta=my_delta) + + def test_initialize_areas(self): # TODO (very non-trivial) - has been tested manually so far. pass diff --git a/tests/test_pers_dist.py b/tests/test_pers_dist.py new file mode 100644 index 0000000..1142ce8 --- /dev/null +++ b/tests/test_pers_dist.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.pyplot as plt + +def create_gaussian_distribution(size): + # Generate a normal distribution + rng = np.random.default_rng() + dist = rng.normal(0, 1, size) + dist.sort() # To create a gaussian curve like array + dist = np.abs(dist) # Flip negative values "up" + # Normalize the distribution to sum to one + dist /= dist.sum() + # Ensure the sum is exactly one + # sm = dist.sum() + # if sm != 1.0: + # idx = rng.choice(size) # Choose a random index + # dist[idx] += 1 - sm + return dist + +# Example usage +nr_options = 20 +gaussian_dist = create_gaussian_distribution(nr_options) +s = gaussian_dist.sum() + +nr_zeroes = gaussian_dist.size - np.count_nonzero(gaussian_dist) +print("There are", nr_zeroes, "zero values in the distribution") + +# Plot the distribution +plt.plot(gaussian_dist) +plt.title("Normalized Gaussian Distribution") +plt.show() + +sample_size = 800 +pool = np.arange(nr_options) +rng = np.random.default_rng() +print(pool.shape) +chosen = rng.choice(pool, sample_size, p=gaussian_dist) + +plt.hist(chosen) +plt.show() \ No newline at end of file From b4e092dd2586e8ffde60b64dfe726c8b97ff2dd3 Mon Sep 17 00:00:00 2001 From: jurikane Date: Sat, 30 Nov 2024 11:09:45 +0100 Subject: [PATCH 30/38] combined the creation of the two area-stats overviews into one render function and FIXed a depiction error (wrong ranking) in the ElectionResults depiction --- democracy_sim/model_setup.py | 9 +-- democracy_sim/participation_model.py | 9 ++- democracy_sim/run.py | 6 +- democracy_sim/visualisation_elements.py | 74 +++++++++++-------------- 4 files changed, 41 insertions(+), 57 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 95f5ecd..bd1faf8 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -108,7 +108,7 @@ def participation_draw(cell: ColorCell): portrayal = {"Shape": "rect", "w": 1, "h": 1, "Filled": "true", "Layer": 0, "x": cell.row, "y": cell.col, "Color": color} - # TODO: maybe: draw the agent number in the opposing color, + maybe draw borders nicer + # TODO: maybe: draw the agent number in the opposing color # If the cell is a border cell, change its appearance if TYPE_CHECKING: # Type hint for IDEs cell.model = cast(ParticipationModel, cell.model) @@ -159,13 +159,6 @@ def participation_draw(cell: ColorCell): data_collector_name='datacollector') -# Agent charts - -# voter_turnout_chart = mesa.visualization.ChartModule( -# [{"Label": "Voter Turnout", "Color": "Black"}], -# data_collector_name='datacollector' -# ) - model_params = { "height": grid_rows, "width": grid_cols, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 8d9ede6..372aa78 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -32,7 +32,7 @@ def __init__(self, unique_id, model, height, width, size_variance): self.cells = [] self._idx_field = None # An indexing position of the area in the grid self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 - self.voted_ordering = np.zeros(model.num_colors) + self.voted_ordering = None self.voter_turnout = 0 # In percent self.dist_to_reality = None # Elected vs. actual color distribution @@ -354,7 +354,12 @@ def get_area_color_distribution(area): return None def get_election_results(area): - if isinstance(area, Area): + """ + Returns the voted ordering as a list or None if not available. + + :return: List of voted ordering or None. + """ + if isinstance(area, Area) and area.voted_ordering is not None: return area.voted_ordering.tolist() return None diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 91b8546..8d2438b 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -23,9 +23,8 @@ def reset_model(self): super().reset_model() -color_distribution_element = ColorDistributionElement() personality_distribution = PersonalityDistribution() -election_results = ElectionResultsElement() +area_stats = AreaStats() steps_text = StepsTextElement() vto_areas = VoterTurnoutElement() @@ -34,8 +33,7 @@ def reset_model(self): visualization_elements=[canvas_element, color_distribution_chart, personality_distribution, wealth_chart, voter_turnout, vto_areas, - color_distribution_element, election_results, - steps_text], + area_stats, steps_text], name="DemocracySim", model_params=params, ) diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index a48c27e..1a5b7fc 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -19,7 +19,7 @@ def save_plot_to_base64(fig): return f'' -class ColorDistributionElement(TextElement): +class AreaStats(TextElement): def render(self, model): # Only render if show_area_stats is enabled step = model.scheduler.steps @@ -30,71 +30,58 @@ def render(self, model): data = model.datacollector.get_agent_vars_dataframe() color_distribution = data['ColorDistribution'].dropna() dist_to_reality = data['DistToReality'].dropna() + election_results = data['ElectionResults'].dropna() # Extract unique area IDs (excluding the global area) area_ids = color_distribution.index.get_level_values(1).unique()[1:] num_colors = len(color_distribution.iloc[0]) - # Create subplots within a single figure - num_areas = len(area_ids) - 1 # Exclude the global area + # Create subplots within a single figure for color distribution + num_areas = len(area_ids) num_cols = math.ceil(math.sqrt(num_areas)) num_rows = math.ceil(num_areas / num_cols) - fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, - figsize=(8, 8), sharex=True) - - for ax, area_id in zip(axes.flatten(), area_ids): + fig1, axes1 = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(8, 8), sharex=True) + handles, labels = [], [] + for ax, area_id in zip(axes1.flatten(), area_ids): area_data = color_distribution.xs(area_id, level=1) for color_idx in range(num_colors): color_data = area_data.apply(lambda x: x[color_idx]) ax.plot(color_data.index, color_data.values, - label=f'Color {color_idx}', color=_COLORS[color_idx]) + color=_COLORS[color_idx]) area_data = dist_to_reality.xs(area_id, level=1) ax.plot(area_data.index, area_data.values, - label=f'Distance to reality', color='Black') + color='Black', linestyle='--', + label='Distance of the election result\n' + 'to the actual color distribution\nValues 0-1',) + handles, labels = ax.get_legend_handles_labels() ax.set_title(f'Area {area_id}') ax.set_xlabel('Step') ax.set_ylabel('Color Distribution') - #ax.legend() + fig1.legend(handles, labels, loc='upper center', ncol=3) plt.tight_layout() - return save_plot_to_base64(fig) - - -class ElectionResultsElement(TextElement): - def render(self, model): - # TODO: put together with ColorDistributionElement to create both at once - # Only render if show_area_stats is enabled - step = model.scheduler.steps - if not model.show_area_stats or step == 0: - return "" - - # Fetch data from the datacollector - data = model.datacollector.get_agent_vars_dataframe() - election_results = data['ElectionResults'].dropna() + color_dist_plot = save_plot_to_base64(fig1) - # Extract unique area IDs - area_ids = election_results.index.get_level_values(1).unique()[1:] - num_colors = len(election_results.iloc[0]) + # Create subplots within a single figure for election results + fig2, axes2 = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(8, 8), sharex=True) - # Create subplots within a single figure - num_areas = len(area_ids) - 1 # Exclude the global area - num_cols = math.ceil(math.sqrt(num_areas)) - num_rows = math.ceil(num_areas / num_cols) - fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, - figsize=(8, 8), sharex=True) - - for ax, area_id in zip(axes.flatten(), area_ids): + for ax, area_id in zip(axes2.flatten(), area_ids): area_data = election_results.xs(area_id, level=1) - for color_idx in range(num_colors): - color_data = area_data.apply(lambda x: x[color_idx]) + for color_id in range(num_colors): + color_data = area_data.apply(lambda x: x.index(color_id)) ax.plot(color_data.index, color_data.values, - label=f'Color {color_idx}', color=_COLORS[color_idx]) + label=f'Color {color_id}', color=_COLORS[color_id]) ax.set_title(f'Area {area_id}') ax.set_xlabel('Step') - ax.set_ylabel('Election Results') + ax.set_ylabel('Election Results (rank value)') + ax.invert_yaxis() plt.tight_layout() - return save_plot_to_base64(fig) + election_results_plot = save_plot_to_base64(fig2) + + return color_dist_plot + election_results_plot class PersonalityDistribution(TextElement): @@ -123,12 +110,13 @@ def create_once(self, model): height = bar.get_height() width = bar.get_width() - for i, c in enumerate(personality): + for i, color_idx in enumerate(personality): coords = (bar.get_x() + width / 2 - 0.4 + i * 0.2, height) - rect = patches.Rectangle(coords, 0.2, 5, color=colors[c]) + rect = patches.Rectangle(coords, 0.2, 5, + color=colors[color_idx]) ax.add_patch(rect) - ax.set_xlabel('Personality ID') + ax.set_xlabel('"Personality" ID') ax.set_ylabel('Number of Agents') ax.set_title('Distribution of Personalities among Agents') From 546f83ed114730f1ca47323b79246e3022b0a08a Mon Sep 17 00:00:00 2001 From: jurikane Date: Sat, 30 Nov 2024 12:12:25 +0100 Subject: [PATCH 31/38] changed the area stats to be depicted side by side for each area for better overview independent of the number of areas --- democracy_sim/visualisation_elements.py | 68 +++++++++++-------------- 1 file changed, 30 insertions(+), 38 deletions(-) diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index 1a5b7fc..5e42a26 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -35,53 +35,45 @@ def render(self, model): # Extract unique area IDs (excluding the global area) area_ids = color_distribution.index.get_level_values(1).unique()[1:] num_colors = len(color_distribution.iloc[0]) - - # Create subplots within a single figure for color distribution num_areas = len(area_ids) - num_cols = math.ceil(math.sqrt(num_areas)) - num_rows = math.ceil(num_areas / num_cols) - fig1, axes1 = plt.subplots(nrows=num_rows, ncols=num_cols, - figsize=(8, 8), sharex=True) - handles, labels = [], [] - for ax, area_id in zip(axes1.flatten(), area_ids): + + # Create subplots with two columns (two plots per area). + fig, axes = plt.subplots(nrows=num_areas, ncols=2, + figsize=(8, 4 * num_areas), sharex=True) + + for area_id in area_ids: + row = area_id + # Left plot: color distribution and distance to reality value + ax1 = axes[row, 0] area_data = color_distribution.xs(area_id, level=1) for color_idx in range(num_colors): color_data = area_data.apply(lambda x: x[color_idx]) - ax.plot(color_data.index, color_data.values, - color=_COLORS[color_idx]) - area_data = dist_to_reality.xs(area_id, level=1) - ax.plot(area_data.index, area_data.values, - color='Black', linestyle='--', - label='Distance of the election result\n' - 'to the actual color distribution\nValues 0-1',) - handles, labels = ax.get_legend_handles_labels() - ax.set_title(f'Area {area_id}') - ax.set_xlabel('Step') - ax.set_ylabel('Color Distribution') - - fig1.legend(handles, labels, loc='upper center', ncol=3) - plt.tight_layout() - color_dist_plot = save_plot_to_base64(fig1) - - # Create subplots within a single figure for election results - fig2, axes2 = plt.subplots(nrows=num_rows, ncols=num_cols, - figsize=(8, 8), sharex=True) - - for ax, area_id in zip(axes2.flatten(), area_ids): + ax1.plot(color_data.index, color_data.values, + color=_COLORS[color_idx]) + a_data = dist_to_reality.xs(area_id, level=1) + ax1.plot(a_data.index, a_data.values, color='Black', linestyle='--') + ax1.set_title(f'Area {area_id} \n' + f'--- deviation from voted distribution') + ax1.set_xlabel('Step') + ax1.set_ylabel('Color Distribution') + + # Right plot: election results + ax2 = axes[row, 1] area_data = election_results.xs(area_id, level=1) for color_id in range(num_colors): - color_data = area_data.apply(lambda x: x.index(color_id)) - ax.plot(color_data.index, color_data.values, - label=f'Color {color_id}', color=_COLORS[color_id]) - ax.set_title(f'Area {area_id}') - ax.set_xlabel('Step') - ax.set_ylabel('Election Results (rank value)') - ax.invert_yaxis() + color_data = area_data.apply(lambda x: list(x).index( + color_id) if color_id in x else None) + ax2.plot(color_data.index, color_data.values, + label=f'Color {color_id}', color=_COLORS[color_id]) + ax2.set_title(f'Area {area_id} \n') + ax2.set_xlabel('Step') + ax2.set_ylabel('Election Results (rank values)') + ax2.invert_yaxis() plt.tight_layout() - election_results_plot = save_plot_to_base64(fig2) + combined_plot = save_plot_to_base64(fig) - return color_dist_plot + election_results_plot + return combined_plot class PersonalityDistribution(TextElement): From 7786c9a3af5e86a267a2797193a3db624ac1d5ea Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 2 Dec 2024 15:31:13 +0100 Subject: [PATCH 32/38] added a plot to view personality-distributions per area, removed num_personality_colors as a variable in the model --- democracy_sim/model_setup.py | 39 +++++------ democracy_sim/participation_model.py | 32 ++++++--- democracy_sim/run.py | 6 +- democracy_sim/visualisation_elements.py | 93 +++++++++++++++++++++---- 4 files changed, 122 insertions(+), 48 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index bd1faf8..fc9fbb7 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -17,10 +17,10 @@ ############# election_costs = 5 max_reward = 50 -election_impact_on_mutation = 1.0 # 0.1-5.0 -mu = 0.01 # 0.001-0.5 +election_impact_on_mutation = 1.8 # 0.1-5.0 +mu = 0.05 # 0.001-0.5 # Voting rules (see social_welfare_functions.py) -rule_idx = 0 +rule_idx = 1 # Distance functions (see distance_functions.py) distance_idx = 1 #################### @@ -28,13 +28,12 @@ #################### num_agents = 800 # Colors -num_colors = 4 +num_colors = 3 color_patches_steps = 3 patch_power = 1.0 color_heterogeneity = 0.3 # Voting Agents -num_personality_colors = 4 # TODO: does this make sense when we have to use orderings anyways? -num_personalities = 10 +num_personalities = 4 # Grid grid_rows = 100 # height grid_cols = 80 # width @@ -43,14 +42,14 @@ canvas_width = grid_cols * cell_size draw_borders = True # Voting Areas -# num_areas = 16 -# av_area_height = 25 -# # area_height = grid_rows // int(sqrt(num_areas)) -# av_area_width = 20 -# # area_width = grid_cols // int(sqrt(num_areas)) -num_areas = 4 -av_area_height = 50 -av_area_width = 40 +num_areas = 16 +av_area_height = 25 +# area_height = grid_rows // int(sqrt(num_areas)) +av_area_width = 20 +# area_width = grid_cols // int(sqrt(num_areas)) +# num_areas = 4 +# av_area_height = 50 +# av_area_width = 40 area_size_variance = 0.0 ######################## # Statistics and Views # @@ -201,13 +200,13 @@ def participation_draw(cell: ColorCell): ), "num_personalities": mesa.visualization.Slider( name="# of different personalities", value=num_personalities, - min_value=1, max_value=factorial(num_personality_colors), step=1 - ), - "num_personality_colors": mesa.visualization.Slider( - name="# colors determining the personality", - value=num_personality_colors, - min_value=1, max_value=num_colors-1, step=1 + min_value=1, max_value=factorial(num_colors), step=1 ), + # "num_personality_colors": mesa.visualization.Slider( + # name="# colors determining the personality", + # value=num_personality_colors, + # min_value=1, max_value=num_colors-1, step=1 + # ), "color_patches_steps": mesa.visualization.Slider( name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 372aa78..72a826d 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -35,6 +35,7 @@ def __init__(self, unique_id, model, height, width, size_variance): self.voted_ordering = None self.voter_turnout = 0 # In percent self.dist_to_reality = None # Elected vs. actual color distribution + self.personality_dist = None def __str__(self): return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " @@ -121,6 +122,7 @@ def idx_field(self, pos: tuple): cell.is_border_cell = True self._idx_field = (adjusted_x, adjusted_y) self.update_color_distribution() + self.update_personality_dist() def add_agent(self, agent): self.agents.append(agent) @@ -178,7 +180,6 @@ def conduct_election(self): n = preference_profile.shape[0] # Number agents participated return int((n / self.num_agents) * 100) # Voter turnout in percent - def update_color_distribution(self): """ This method calculates the current color distribution of the area @@ -192,6 +193,19 @@ def update_color_distribution(self): dist_val = color_count.get(color, 0) / self.num_cells # Float self.color_distribution[color] = dist_val + def update_personality_dist(self): + """ + This method calculates the areas current distribution of personalities. + """ + personalities = list(self.model.personalities) + p_counts = {str(i): 0 for i in personalities} + # Count the occurrence of each personality + for agent in self.agents: + p_counts[str(agent.personality)] += 1 + # Normalize the counts + self.personality_dist = [p_counts[str(p)] / self.num_agents + for p in personalities] + def filter_cells(self, cell_list): """ This method is used to filter a given list of cells to return only @@ -304,7 +318,7 @@ def create_all_options(n, include_ties=False): return r -def create_personality(num_colors, num_personality_colors): +def create_personality(num_colors): """ NOT USED Creates and returns a list of 'personalities' that are to be assigned to agents. Each personality is a NumPy array of length 'num_colors' @@ -312,18 +326,17 @@ def create_personality(num_colors, num_personality_colors): the personality is limited. The array is therefore not normalized. White (color 0) is never part of a personality. :param num_colors: The number of colors in the simulation. - :param num_personality_colors: Number of colors influencing the personality. """ # TODO add unit tests for this function personality = np.random.randint(0, 100, num_colors) # TODO low=0 or 1? # Save the sum to "normalize" the values later (no real normalization) sum_value = sum(personality) + 1e-8 # To avoid division by zero # Select only as many features as needed (num_personality_colors) - to_del = num_colors - num_personality_colors # How many to be deleted - if to_del > 0: - # The 'replace=False' ensures that indexes aren't chosen twice - indices = np.random.choice(num_colors, to_del, replace=False) - personality[indices] = 0 # 'Delete' the values + # to_del = num_colors - num_personality_colors # How many to be deleted + # if to_del > 0: + # # The 'replace=False' ensures that indexes aren't chosen twice + # indices = np.random.choice(num_colors, to_del, replace=False) + # personality[indices] = 0 # 'Delete' the values personality[0] = 0 # White is never part of the personality # "Normalize" the rest of the values personality = personality / sum_value @@ -392,7 +405,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - num_personality_colors, mu, election_impact_on_mutation, + mu, election_impact_on_mutation, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -434,7 +447,6 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # Create agents # TODO: Where do the agents get there known cells from and how!? self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents - self.num_personality_colors = num_personality_colors self.personalities = self.create_personalities(num_personalities) self.personality_distribution = self.pers_dist(num_personalities) self.initialize_voting_agents() diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 8d2438b..0f14c16 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -25,15 +25,15 @@ def reset_model(self): personality_distribution = PersonalityDistribution() area_stats = AreaStats() -steps_text = StepsTextElement() vto_areas = VoterTurnoutElement() +area_personality_dists = AreaPersonalityDists() server = CustomModularServer( model_cls=ParticipationModel, visualization_elements=[canvas_element, color_distribution_chart, - personality_distribution, wealth_chart, voter_turnout, vto_areas, - area_stats, steps_text], + personality_distribution, area_stats, + area_personality_dists], name="DemocracySim", model_params=params, ) diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index 5e42a26..b0fa9f6 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -43,31 +43,32 @@ def render(self, model): for area_id in area_ids: row = area_id - # Left plot: color distribution and distance to reality value + # Left plot: distance to reality value and color distribution ax1 = axes[row, 0] area_data = color_distribution.xs(area_id, level=1) + a_data = dist_to_reality.xs(area_id, level=1) + ax1.plot(a_data.index, a_data.values, color='Black', linestyle='--') for color_idx in range(num_colors): color_data = area_data.apply(lambda x: x[color_idx]) ax1.plot(color_data.index, color_data.values, color=_COLORS[color_idx]) - a_data = dist_to_reality.xs(area_id, level=1) - ax1.plot(a_data.index, a_data.values, color='Black', linestyle='--') ax1.set_title(f'Area {area_id} \n' f'--- deviation from voted distribution') ax1.set_xlabel('Step') ax1.set_ylabel('Color Distribution') - # Right plot: election results + # Right plot: election result ax2 = axes[row, 1] area_data = election_results.xs(area_id, level=1) for color_id in range(num_colors): color_data = area_data.apply(lambda x: list(x).index( color_id) if color_id in x else None) - ax2.plot(color_data.index, color_data.values, - label=f'Color {color_id}', color=_COLORS[color_id]) + ax2.plot(color_data.index, color_data.values, marker='o', + label=f'Color {color_id}', color=_COLORS[color_id], + linewidth=0.2) ax2.set_title(f'Area {area_id} \n') ax2.set_xlabel('Step') - ax2.set_ylabel('Election Results (rank values)') + ax2.set_ylabel('Elected ranking (rank values)') ax2.invert_yaxis() plt.tight_layout() @@ -92,25 +93,26 @@ def create_once(self, model): num_personalities = personalities.shape[0] num_agents = model.num_agents colors = _COLORS[:model.num_colors] - + num_colors = len(personalities[0]) fig, ax = plt.subplots(figsize=(6, 4)) heights = dists * num_agents - bars = ax.bar(range(num_personalities), heights, color='skyblue') + bars = ax.bar(range(num_personalities), heights, width=0.6) for bar, personality in zip(bars, personalities): height = bar.get_height() width = bar.get_width() for i, color_idx in enumerate(personality): - coords = (bar.get_x() + width / 2 - 0.4 + i * 0.2, height) - rect = patches.Rectangle(coords, 0.2, 5, + rect_width = width / num_colors + coords = (bar.get_x() + i * rect_width, 0) + rect = patches.Rectangle(coords, rect_width, height, color=colors[color_idx]) ax.add_patch(rect) ax.set_xlabel('"Personality" ID') ax.set_ylabel('Number of Agents') - ax.set_title('Distribution of Personalities among Agents') + ax.set_title('Global distribution of personalities among agents') plt.tight_layout() self.pers_dist_plot = save_plot_to_base64(fig) @@ -138,10 +140,16 @@ def render(self, model): # Create a single plot fig, ax = plt.subplots(figsize=(8, 6)) - for area_id in area_ids: + for i, area_id in enumerate(area_ids): area_data = voter_turnout.xs(area_id, level=1) - ax.plot(area_data.index, area_data.values, label=f'Area {area_id}') - + if i < 10: + line_style = '-' + elif i < 20: + line_style = ':' + else: + line_style = '--' + ax.plot(area_data.index, area_data.values, label=f'Area {area_id}', + linestyle=line_style) ax.set_title('Voter Turnout by Area Over Time') ax.set_xlabel('Step') ax.set_ylabel('Voter Turnout (%)') @@ -179,3 +187,58 @@ def render(self, model): f"areas: {len(model.areas)} | First 5 voters of " f"{len(model.voting_agents)}: {first_agents}") return text + + +class AreaPersonalityDists(TextElement): + + def __init__(self): + super().__init__() + self.personality_distributions = None + self.areas_pers_dist_plot = None + + def create_once(self, model): + if TYPE_CHECKING: + model = cast('ParticipationModel', model) + + colors = _COLORS[:model.num_colors] + personalities = model.personalities + num_colors = len(personalities[0]) + num_personalities = personalities.shape[0] + + # Create subplots within a single figure + num_areas = len(model.areas) + num_cols = math.ceil(math.sqrt(num_areas)) + num_rows = math.ceil(num_areas / num_cols) + fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, + figsize=(8, 8), sharex=True) + for ax, area in zip(axes.flatten(), model.areas): + # Fetch data + p_dist = area.personality_dist + num_agents = area.num_agents + # Subplot + heights = [int(val * num_agents) for val in p_dist] + bars = ax.bar(range(num_personalities), heights, color='skyblue') + + for bar, personality in zip(bars, personalities): + height = bar.get_height() + width = bar.get_width() + + for i, color_idx in enumerate(personality): + rect_width = width / num_colors + coords = (bar.get_x() + i * rect_width, height) + rect = patches.Rectangle(coords, rect_width, 2, + color=colors[color_idx]) + ax.add_patch(rect) + + ax.set_xlabel('"Personality" ID') + ax.set_ylabel('Number of Agents') + ax.set_title(f'Area {area.unique_id}') + + plt.tight_layout() + self.areas_pers_dist_plot = save_plot_to_base64(fig) + + def render(self, model): + # Only create a new plot at the start of a simulation + if model.scheduler.steps == 0: + self.create_once(model) + return self.areas_pers_dist_plot From 471be43aa5098e6c213e4ffd0151207ac964ecde Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 9 Dec 2024 16:16:50 +0100 Subject: [PATCH 33/38] changed the approval logic to get from std pref-rel to approval_voting-pref-rel, FIXed pytests, added approval voting pytest --- democracy_sim/model_setup.py | 14 ++-- democracy_sim/participation_model.py | 12 +-- democracy_sim/social_welfare_functions.py | 4 +- tests/test_approval_voting.py | 91 ++++++++++++----------- tests/test_participation_area_agent.py | 4 +- tests/test_participation_model.py | 7 +- tests/test_participation_voting_agent.py | 4 +- 7 files changed, 70 insertions(+), 66 deletions(-) diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index fc9fbb7..d4cb3cc 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -15,7 +15,7 @@ ############# # Elections # ############# -election_costs = 5 +election_costs = 1 max_reward = 50 election_impact_on_mutation = 1.8 # 0.1-5.0 mu = 0.05 # 0.001-0.5 @@ -27,6 +27,7 @@ # Model parameters # #################### num_agents = 800 +common_assets = 40000 # Colors num_colors = 3 color_patches_steps = 3 @@ -129,7 +130,7 @@ def participation_draw(cell: ColorCell): text = f"{a.num_agents} agents, color dist: {a.color_distribution}" portrayal[f"Area {unique_id}"] = text for voter in cell.agents: - text = f"personality: {voter.personality}" + text = f"personality: {voter.personality}, assets: {voter.assets}" portrayal[f"Agent {voter.unique_id}"] = text return portrayal @@ -202,11 +203,10 @@ def participation_draw(cell: ColorCell): name="# of different personalities", value=num_personalities, min_value=1, max_value=factorial(num_colors), step=1 ), - # "num_personality_colors": mesa.visualization.Slider( - # name="# colors determining the personality", - # value=num_personality_colors, - # min_value=1, max_value=num_colors-1, step=1 - # ), + "common_assets": mesa.visualization.Slider( + name="Initial common assets", value=common_assets, + min_value=num_agents, max_value=1000*num_agents, step=10 + ), "color_patches_steps": mesa.visualization.Slider( name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 72a826d..a53a56f 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -166,15 +166,15 @@ def conduct_election(self): real_color_ord = np.argsort(self.color_distribution)[::-1] # Descending self.dist_to_reality = dist_func(real_color_ord, self.voted_ordering, model.color_search_pairs) - # Calculate the rewards per agent - reward_pa = (1 - self.dist_to_reality) * model.max_reward + # Calculate the rpa - rewards per agent (can be negative) + rpa = (0.5 - self.dist_to_reality) * model.max_reward # TODO: change this (?) # Distribute the two types of rewards color_search_pairs = model.color_search_pairs for agent in self.agents: # Personality-based reward factor p = dist_func(agent.personality, real_color_ord, color_search_pairs) # + Common reward (reward_pa) for all agents - agent.assets = agent.assets + p * reward_pa + reward_pa + agent.assets = int(agent.assets + (0.5-p) * model.max_reward + rpa) # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process # Statistics n = preference_profile.shape[0] # Number agents participated @@ -405,7 +405,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - mu, election_impact_on_mutation, + mu, election_impact_on_mutation, common_assets, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -435,6 +435,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.options = create_all_options(num_colors) # Simulation variables self.mu = mu # Mutation rate for the color cells (0.1 = 10 % mutate) + self.common_assets = common_assets # Election impact factor on color mutation through a probability array self.color_probs = self.init_color_probs(election_impact_on_mutation) # Create search pairs once for faster iterations when comparing rankings @@ -513,6 +514,7 @@ def initialize_voting_agents(self): """ dist = self.personality_distribution rng = np.random.default_rng() + assets = self.common_assets // self.num_agents for a_id in range(self.num_agents): # Get a random position x = self.random.randrange(self.width) @@ -520,7 +522,7 @@ def initialize_voting_agents(self): personality = rng.choice(self.personalities, p=dist) # Create agent without appending (add to the pre-defined list) agent = VoteAgent(a_id, self, (x, y), personality, - assets=5, add=False) # TODO: initial assets?! + assets=assets, add=False) # TODO: initial assets?! self.voting_agents[a_id] = agent # Add using the index (faster) # Add the agent to the grid by placing it on a cell cell = self.grid.get_cell_list_contents([(x, y)])[0] diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 9dc5112..6fc3e8c 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -125,8 +125,8 @@ def imp_prepr_for_approval(pref_table, threshold=None): :param threshold: Will be overwritten (will be set according to variances). :return: The preference table with the options approved or not. """ - threshold = np.var(pref_table, axis=0) - return (pref_table <= threshold).astype(int) + threshold = np.mean(pref_table, axis=1) - np.var(pref_table, axis=1) + return (pref_table < threshold.reshape(-1, 1)).astype(int) def approval_voting(pref_table): diff --git a/tests/test_approval_voting.py b/tests/test_approval_voting.py index c5e78b9..60ec936 100644 --- a/tests/test_approval_voting.py +++ b/tests/test_approval_voting.py @@ -1,12 +1,13 @@ from democracy_sim.social_welfare_functions import approval_voting from tests.test_majority_rule import simple, paradoxical +import numpy as np # TODO adapt to approval voting (state = merely copied from majority_rule.py) # Simple and standard cases approval_simple_cases = [ - (simple, [2, 1, 0]), # TODO: Whats the expected result? - (paradoxical, [0, 4, 3, 1, 2]) # TODO '' '' + (simple, [[2, 1, 0]]), # TODO: Whats the expected result? + (paradoxical, [[2, 1, 0, 3, 4], [2, 1, 3, 0, 4]]) # TODO '' '' ] # Following "paradoxical" example is taken from @@ -20,47 +21,53 @@ # d d e c # e a a a -# def test_approval_voting(): -# # Test predefined cases -# for pref_table, expected in approval_simple_cases: -# res_ranking = approval_voting(pref_table) -# assert list(res_ranking) == expected +def test_approval_voting(): + # Test predefined cases + for pref_table, expected in approval_simple_cases: + res_ranking = approval_voting(pref_table) + is_correct = False + for exp in expected: + if list(res_ranking) == exp: + is_correct = True + assert is_correct + +# Cases with ties - "all equally possible" + +with_ties_all = np.array([ + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25] + ]) + +with_overall_tie = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.3, 0.2, 0.1, 0.4], +]) + +with_ties_mixed = np.array([ + [0.4, 0.3, 0.2, 0.1], + [0.25, 0.25, 0.25, 0.25], + [0.25, 0.25, 0.25, 0.25], + [0.1, 0.4, 0.3, 0.2], + [0.2, 0.1, 0.4, 0.3], + [0.25, 0.25, 0.25, 0.25], + [0.3, 0.2, 0.1, 0.4], +]) + +all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] + +def test_equally_possible(): + for pref_rel in all_equally_possible: + winners = set() + for _ in range(500): + winner = approval_voting(pref_rel) + winners.add(winner[0]) + assert set(winners) == {0, 1, 2, 3} -# # Cases with ties - "all equally possible" -# -# with_ties_all = np.array([ -# [0.25, 0.25, 0.25, 0.25], -# [0.25, 0.25, 0.25, 0.25], -# [0.25, 0.25, 0.25, 0.25], -# [0.25, 0.25, 0.25, 0.25], -# [0.25, 0.25, 0.25, 0.25] -# ]) -# -# with_overall_tie = np.array([ -# [0.4, 0.3, 0.2, 0.1], -# [0.1, 0.4, 0.3, 0.2], -# [0.2, 0.1, 0.4, 0.3], -# [0.3, 0.2, 0.1, 0.4], -# ]) -# -# with_ties_mixed = np.array([ -# [0.4, 0.3, 0.2, 0.1], -# [0.25, 0.25, 0.25, 0.25], -# [0.25, 0.25, 0.25, 0.25], -# [0.1, 0.4, 0.3, 0.2], -# [0.2, 0.1, 0.4, 0.3], -# [0.25, 0.25, 0.25, 0.25], -# [0.3, 0.2, 0.1, 0.4], -# ]) -# -# all_equally_possible = [with_ties_all, with_overall_tie, with_ties_mixed] -# -# def test_equally_possible(cv_threshold=0.125): -# for pref_rel in all_equally_possible: -# cv = majority_rule_with_ties_all(pref_rel, [0, 1, 2, 3]) -# print(f"CV: {cv}") -# assert cv < cv_threshold -# # # Cases with ties - "not all equally possible" # with_ties_unequal = np.array([ # [0.25, 0.25, 0.25, 0.25], diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py index d7c6e3f..2749a40 100644 --- a/tests/test_participation_area_agent.py +++ b/tests/test_participation_area_agent.py @@ -69,9 +69,7 @@ def test_estimate_real_distribution(self): # Get any existing area existing_area = random.sample(self.model.areas, 1)[0] # Additional area and agent - personality = np.zeros(self.model.num_colors) - personality[0] = 0.3 - personality[1] = 0.7 + personality = random.choice(self.model.personalities) a = VoteAgent(num_agents + 1, self.model, pos=(0, 0), personality=personality, assets=25) additional_test_area = Area(self.model.num_areas + 1, diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index 1535c22..eff898e 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -4,8 +4,7 @@ social_welfare_functions) from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, num_agents, num_colors, num_areas, - num_personalities, - num_personality_colors as npc, mu, + num_personalities, common_assets, mu, election_impact_on_mutation as e_impact, draw_borders, rule_idx, distance_idx, color_heterogeneity as heterogeneity, @@ -22,7 +21,7 @@ def setUp(self): num_agents=num_agents, num_colors=num_colors, num_personalities=num_personalities, - num_personality_colors=npc, mu=mu, + common_assets=common_assets, mu=mu, election_impact_on_mutation=e_impact, num_areas=num_areas, draw_borders=draw_borders, @@ -53,12 +52,12 @@ def test_initialization(self): def test_model_options(self): self.assertEqual(self.model.num_agents, num_agents) self.assertEqual(self.model.num_colors, num_colors) - self.assertEqual(self.model.num_personality_colors, npc) self.assertEqual(self.model.num_areas, num_areas) self.assertEqual(self.model.area_size_variance, area_size_variance) self.assertEqual(self.model.draw_borders, draw_borders) v_rule = social_welfare_functions[rule_idx] dist_func = distance_functions[distance_idx] + self.assertEqual(self.model.common_assets, common_assets) self.assertEqual(self.model.voting_rule, v_rule) self.assertEqual(self.model.distance_func, dist_func) self.assertEqual(self.model.election_costs, election_costs) diff --git a/tests/test_participation_voting_agent.py b/tests/test_participation_voting_agent.py index 2ab326d..a790a1f 100644 --- a/tests/test_participation_voting_agent.py +++ b/tests/test_participation_voting_agent.py @@ -11,9 +11,7 @@ def setUp(self): test_model = TestParticipationModel() test_model.setUp() self.model = test_model.model - personality = np.zeros(self.model.num_colors) - personality[0] = 0.3 - personality[1] = 0.7 + personality = random.choice(self.model.personalities) self.agent = VoteAgent(num_agents + 1, self.model, pos=(0, 0), personality=personality, assets=25) self.additional_test_area = Area(self.model.num_areas + 1, From dd0c79d06eda64fa056b601d9b82cbce6798f783 Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 10 Jan 2025 19:29:15 +0100 Subject: [PATCH 34/38] added known_cells system var, implemented a randomized update of known cells per area and agent and tested --- .gitignore | 20 ++++++++++++++ democracy_sim/model_setup.py | 7 ++++- democracy_sim/participation_agent.py | 30 ++++++++++++++++----- democracy_sim/participation_model.py | 12 ++++++--- democracy_sim/social_welfare_functions.py | 12 ++++++--- tests/test_participation_area_agent.py | 33 ++++++++++++----------- tests/test_participation_model.py | 6 +++-- tests/test_participation_voting_agent.py | 9 ++++--- 8 files changed, 93 insertions(+), 36 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..10a07f7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +/.idea +.DS_Store +__pycache__/ +/examples +/starter_model +/mesa +site/ +sorted-out-tests +/benchmarks +/notes +# short term: +Dockerfile +docker-compose.yml +Singularity.def +/app.py +/main.py +templates +/docs/images/CI-images +.coverage* +*.cache diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index d4cb3cc..76c37a6 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -33,6 +33,7 @@ color_patches_steps = 3 patch_power = 1.0 color_heterogeneity = 0.3 +known_cells = 10 # Voting Agents num_personalities = 4 # Grid @@ -200,13 +201,17 @@ def participation_draw(cell: ColorCell): step=1 ), "num_personalities": mesa.visualization.Slider( - name="# of different personalities", value=num_personalities, + name="# different personalities", value=num_personalities, min_value=1, max_value=factorial(num_colors), step=1 ), "common_assets": mesa.visualization.Slider( name="Initial common assets", value=common_assets, min_value=num_agents, max_value=1000*num_agents, step=10 ), + "known_cells": mesa.visualization.Slider( + name="# known fields", value=known_cells, + min_value=1, max_value=100, step=1 + ), "color_patches_steps": mesa.visualization.Slider( name="Patches size (# steps)", value=color_patches_steps, min_value=0, max_value=9, step=1, diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 1336f7e..9433ee4 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, cast, List, Optional import numpy as np from mesa import Agent if TYPE_CHECKING: # Type hint for IDEs @@ -54,7 +54,9 @@ def __init__(self, unique_id, model, pos, personality, assets=1, add=True): self._assets = assets self._num_elections_participated = 0 self.personality = personality - self.known_cells = [] # ColorCell objects the agent knows (knowledge) + self.cell = model.grid.get_cell_list_contents([(row, col)])[0] + # ColorCell objects the agent knows (knowledge) + self.known_cells: List[Optional[ColorCell]] = [None] * model.known_cells # Add the agent to the models' agent list and the cell if add: model.voting_agents.append(self) @@ -101,6 +103,19 @@ def num_elections_participated(self): def num_elections_participated(self, value): self._num_elections_participated = value + def update_known_cells(self, area): + """ + This method is to update the list of known cells before casting a vote. + :param area: The area that holds the pool of cells in question + """ + n_cells = len(area.cells) + k = len(self.known_cells) + self.known_cells = ( + self.random.sample(area.cells, k) + if n_cells >= k + else area.cells + ) + def ask_for_participation(self, area): """ The agent decides @@ -136,7 +151,7 @@ def compute_assumed_opt_dist(self, area): a_factor = self.decide_altruism_factor(area) # TODO: Implement this # compute the preference ranking vector as a mix between the agent's # own preferences/personality traits and the estimated real distribution - est_dist = self.estimate_real_distribution(area) + est_dist, conf = self.estimate_real_distribution(area) ass_opt = combine_and_normalize(self.personality, est_dist, a_factor) return ass_opt @@ -181,14 +196,15 @@ def vote(self, area): def estimate_real_distribution(self, area): """ The agent estimates the real color distribution in the area based on - her own knowledge (self.known_fields). + her own knowledge (self.known_cells). """ - relevant_cells = area.filter_cells(self.known_cells) - known_colors = np.array([cell.color for cell in relevant_cells]) + # relevant_cells = area.filter_cells(self.known_cells) + known_colors = np.array([cell.color for cell in self.known_cells]) unique, counts = np.unique(known_colors, return_counts=True) distribution = np.zeros(self.model.num_colors) distribution[unique] = counts / known_colors.size - return distribution + confidence = len(self.known_cells) / area.num_cells + return distribution, confidence class ColorCell(Agent): diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index a53a56f..74685b8 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -70,7 +70,7 @@ def num_agents(self): @property def num_cells(self): - return len(self.cells) + return self._width * self._height @property def idx_field(self): @@ -143,6 +143,8 @@ def conduct_election(self): # Ask agents for participation preference_profile = [] for agent in self.agents: + # Give agents their (new) known fields + agent.update_known_cells(area=self) if (agent.assets >= el_costs and agent.ask_for_participation(area=self)): agent.num_elections_participated += 1 @@ -175,6 +177,9 @@ def conduct_election(self): p = dist_func(agent.personality, real_color_ord, color_search_pairs) # + Common reward (reward_pa) for all agents agent.assets = int(agent.assets + (0.5-p) * model.max_reward + rpa) + # Correct wealth if below zero + if agent.assets < 0: + agent.assets = 0 # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process # Statistics n = preference_profile.shape[0] # Number agents participated @@ -227,7 +232,7 @@ def step(self) -> None: return # TODO: What to do if no agent participated..? # Mutate colors in cells - # Take some number of cells to mutate (i.e. 5 %) + # Take some number of cells to mutate (i.e., 5 %) n_to_mutate = int(self.model.mu * self.num_cells) # TODO/Idea: What if the voter_turnout determines the mutation rate? # randomly select x cells @@ -405,7 +410,7 @@ class ParticipationModel(mesa.Model): """A model with some number of agents.""" def __init__(self, height, width, num_agents, num_colors, num_personalities, - mu, election_impact_on_mutation, common_assets, + mu, election_impact_on_mutation, common_assets, known_cells, num_areas, av_area_height, av_area_width, area_size_variance, patch_power, color_patches_steps, draw_borders, heterogeneity, rule_idx, distance_idx, election_costs, max_reward, @@ -430,6 +435,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # Elections self.election_costs = election_costs self.max_reward = max_reward + self.known_cells = known_cells self.voting_rule = social_welfare_functions[rule_idx] self.distance_func = distance_functions[distance_idx] self.options = create_all_options(num_colors) diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 6fc3e8c..5712cf5 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -7,6 +7,8 @@ and the values (each in [0,1]) are normalized ranking values. The purpose of this is to allow for non-discrete and non-equidistant rankings. """ +from typing import TYPE_CHECKING + import numpy as np @@ -55,7 +57,7 @@ def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): noise_eps = non_zero_variances / noise_factor noise = np.random.uniform(-noise_eps[:, np.newaxis], noise_eps[:, np.newaxis], (n, m)) - # The noise_eps[:, np.newaxis] reshapes noise_eps from shape (n,) to (n, 1) + # `noise_eps[:, np.newaxis]` reshapes noise_eps from shape `(n,)` to (n, 1) pref_tab_var_non_zero += noise # Put the parts back together @@ -116,16 +118,18 @@ def preprocessing_for_approval(pref_table, threshold=None): return (pref_table < threshold).astype(int) -def imp_prepr_for_approval(pref_table, threshold=None): +def imp_prepr_for_approval(pref_table): """ This is just like preprocessing_for_approval, but more intelligent. It sets the threshold depending on the variances. :param pref_table: The agent's preferences. Beware: the values are disagreements => smaller = less disagreement - :param threshold: Will be overwritten (will be set according to variances). :return: The preference table with the options approved or not. """ + # The threshold is set according to the variances threshold = np.mean(pref_table, axis=1) - np.var(pref_table, axis=1) + if TYPE_CHECKING: + assert isinstance(threshold, np.ndarray) return (pref_table < threshold.reshape(-1, 1)).astype(int) @@ -153,7 +157,7 @@ def approval_voting(pref_table): def continuous_score_voting(pref_table): """ - TODO: integrade and test + TODO: integrate and test This function implements a continuous score voting based on disagreement. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py index 2749a40..7bb9fb3 100644 --- a/tests/test_participation_area_agent.py +++ b/tests/test_participation_area_agent.py @@ -65,9 +65,7 @@ def test_conduct_election(self): area.conduct_election() # TODO - def test_estimate_real_distribution(self): - # Get any existing area - existing_area = random.sample(self.model.areas, 1)[0] + def test_adding_new_area_and_agent_within_it(self): # Additional area and agent personality = random.choice(self.model.personalities) a = VoteAgent(num_agents + 1, self.model, pos=(0, 0), @@ -82,26 +80,31 @@ def test_estimate_real_distribution(self): assert a in test_area.agents # Test if agent is present print(f"Agent {a.unique_id} is in area {test_area.unique_id}") print(f"Areas color-cells: {[c.unique_id for c in test_area.cells]}") + + def test_estimate_real_distribution(self): + # Get any existing area + rnd_area = random.sample(self.model.areas, 1)[0] + a = random.sample(rnd_area.agents, 1)[0] # Test the estimate_real_distribution method - k = random.sample(range(2, len(test_area.cells)), 1)[0] + a.update_known_cells(area=rnd_area) + k = len(a.known_cells) print(f"Sample size: {k}") - sample_1 = random.sample(test_area.cells, k) - sample_2 = random.sample(existing_area.cells, 3) - a.known_cells = sample_1 + sample_2 a_colors = [c.color for c in a.known_cells] # To test against print(f"Cells that agent {a.unique_id} knows of:\n" f"{[c.unique_id for c in a.known_cells]} with colors: {a_colors}") - print(f"Cells not part of the area: {[c.unique_id for c in sample_2]}") - rel_cells = test_area.filter_cells(a.known_cells) - rel_color_vec = [c.color for c in rel_cells] - print("The relevant cells should be:\n", - [c.unique_id for c in rel_cells], "with colors", rel_color_vec) - est_distribution = a.estimate_real_distribution(test_area) - print(f"{a.unique_id}s' estimated color dist is: {est_distribution}") + filtered = rnd_area.filter_cells(a.known_cells) + select_wrong = [c not in filtered for c in a.known_cells] + wrong = [c.unique_id for i, c in enumerate(a.known_cells) + if select_wrong[i]] + assert not any(wrong), f"Error: Cells {wrong} are not part of the area!" + est_distribution, conf = a.estimate_real_distribution(rnd_area) + assert 0.0 < conf < 1.0, "Error: Confidence out of range [0, 1]!" + print(f"{a.unique_id}s' estimated color dist is: {est_distribution}", + f"with confidence: {conf}") self.assertAlmostEqual(sum(est_distribution), 1.0, places=7) len_colors = self.model.num_colors self.assertEqual(len(est_distribution), len_colors) - counts = [rel_color_vec.count(color) for color in range(len_colors)] + counts = [a_colors.count(color) for color in range(len_colors)] print(f"Color counts: {counts}") s = sum(counts) expected_distribution = [i / s for i in counts] diff --git a/tests/test_participation_model.py b/tests/test_participation_model.py index eff898e..31cfaa6 100644 --- a/tests/test_participation_model.py +++ b/tests/test_participation_model.py @@ -5,6 +5,7 @@ from democracy_sim.model_setup import (grid_rows as height, grid_cols as width, num_agents, num_colors, num_areas, num_personalities, common_assets, mu, + known_cells, election_impact_on_mutation as e_impact, draw_borders, rule_idx, distance_idx, color_heterogeneity as heterogeneity, @@ -21,6 +22,7 @@ def setUp(self): num_agents=num_agents, num_colors=num_colors, num_personalities=num_personalities, + known_cells=known_cells, common_assets=common_assets, mu=mu, election_impact_on_mutation=e_impact, num_areas=num_areas, @@ -79,14 +81,14 @@ def test_distribution_of_personalities(self): self.assertAlmostEqual(sum(p_dist), 1.0) self.assertEqual(len(p_dist), num_personalities) voting_agents = self.model.voting_agents - num_agnts = self.model.num_agents + nr_agents = self.model.num_agents personalities = list(self.model.personalities) p_counts = {str(i): 0 for i in personalities} # Count the occurrence of each personality for agent in voting_agents: p_counts[str(agent.personality)] += 1 # Normalize the counts to get the real personality distribution - real_dist = [p_counts[str(p)] / num_agnts for p in personalities] + real_dist = [p_counts[str(p)] / nr_agents for p in personalities] # Simple tests self.assertEqual(len(real_dist), len(p_dist)) self.assertAlmostEqual(float(sum(real_dist)), 1.0) diff --git a/tests/test_participation_voting_agent.py b/tests/test_participation_voting_agent.py index a790a1f..4f49a6a 100644 --- a/tests/test_participation_voting_agent.py +++ b/tests/test_participation_voting_agent.py @@ -41,11 +41,11 @@ def test_combine_and_normalize(self): k = random.sample(range(2, len(test_area.cells)), 1)[0] print(f"Sample size: {k}") a.known_cells = random.sample(test_area.cells, k) - est_dist = a.estimate_real_distribution(test_area) + est_dist, conf = a.estimate_real_distribution(test_area) own_prefs = a.personality # own_prefs = np.array([0.25, 0.5, 0.0, 0.0]) # Should also work.. print(f"Agent {a.unique_id}s' personality: {own_prefs}" - f" and estimated color distribution: {est_dist}") + f" estimated color dist: {est_dist} with confidences: {conf}") for a_factor in [0.0, 0.2, 0.5, 1.0]: comb = combine_and_normalize(own_prefs, est_dist, a_factor) print(f"Assumed opt. distribution with factor {a_factor}: \n{comb}") @@ -65,9 +65,10 @@ def test_compute_assumed_opt_dist(self): max_size = len(test_area.cells) k = random.sample(range(2, max_size), 1)[0] a.known_cells = random.sample(test_area.cells, k=k) - est_dist = a.estimate_real_distribution(test_area) + est_dist, conf = a.estimate_real_distribution(test_area) own_prefs = a.personality - print(f"The agents\npersonality: {own_prefs} \nest_dist : {est_dist}") + print(f"The agents\npersonality: {own_prefs} \n" + f"est_dist : {est_dist} and confidences: {conf}") r = a.compute_assumed_opt_dist(test_area) print(f"Assumed optimal distribution: {r}") self.assertTrue(np.isclose(sum(r), 1.0, atol=1e-8)) From e9bf681cb35d528e70f956d3fa0b86b80c963a6c Mon Sep 17 00:00:00 2001 From: jurikane Date: Thu, 23 Jan 2025 18:19:07 +0100 Subject: [PATCH 35/38] added concept description to docs --- docs/concept.md | 93 +++++++++++++++++++++++++++++++++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 94 insertions(+) create mode 100644 docs/concept.md diff --git a/docs/concept.md b/docs/concept.md new file mode 100644 index 0000000..9bfc546 --- /dev/null +++ b/docs/concept.md @@ -0,0 +1,93 @@ +# Concept + +**DemocracySim** is a multi-agent simulation framework designed to explore the effects of different voting rules on democratic participation and welfare. Developed as part of a master's thesis at Leipzig University, the project investigates how collective decision-making processes shape individual participation, resource distribution, and long-term system dynamics. With a focus on agent-based modeling, the simulation ties together elements of participatory dynamics, resource allocation, and group decision effects in a controlled, evolving system. + +--- + +## Project Summary + +DemocracySim is set in a grid-based environment where agents interact with their surroundings and participate in group decision-making through elections. The system explores various scenarios and voting rules to understand key dynamics and challenges in democratic participation. + +### Key Features + +#### Simulated Environment: +- The grid is designed without boundaries, and each unit (field) within it adopts one of **x** colors. Fields change color based on election results, with a mutation rate affected by prior outcomes. +- Groups of fields form **territories**, which serve as the basis for elections and influence grid evolution. + +#### Agents: +- Agents are equipped with a basic artificial intelligence system and operate under a **"top-down" model**, learning decision-making strategies via training. +- Each agent has a **limited budget** and must decide whether to participate in elections. +- Agents have individual **preferences** over colors (called *personalities*) and are divided into **y** randomly distributed personality types. + *(The distribution of types forms majority-minority situations.)* + +#### Elections and Rewards (Two Dilemmas): +1. **Elections:** + - Elections concern the frequency distribution of field colors in a given territory, representing an "objective truth" aimed at emulating wise group decisions. + - For an intuitive understanding, the election addresses the question: + *"What is — or should be — the current color distribution within your territory?"* + +2. **Rewards:** + - Rewards are distributed to all agents in the territory, regardless of participation (*participation dilemma*). + These rewards consist of: + - **Base reward:** Distributed equally based on how well agents guess the true color distribution. + - **Personal reward:** Allocated based on the alignment between election results and agent preferences, introducing a second dilemma: + - *Should agents vote selfishly (favoring their preferences) or vote with a focus on the group's accuracy (collective good)?* + +--- + +## Simulation Metrics / Indicators + +### **Participation Rate** *(Aggregate Behavioral Variable)* +- Measures the percentage of agents actively participating in elections at a given time. +- Helps evaluate the *participation dilemma* by analyzing participation across the group and comparing rates for majority vs. minority groups. + +### **Altruism Factor** *(Individual Behavioral Variable)* +- Quantifies the extent to which agents prioritize the **collective good** (e.g., the group's accuracy in guessing) over **individual preferences**, including cases of non-cooperation with a majority they belong to when it conflicts with the (expected) collective good. +- Additionally, tracking the average altruism factor of personality groups can provide insights, though this may be misleading if agents/groups do not participate. + +### **Gini Index** *(Inequality Metric)* +- Measures the inequality in asset distribution among agents within the system. +- Ranges from **0** (perfect equality) to **1** (maximum inequality, where one agent holds all assets). +- Offers insights into how electoral decisions impact wealth/resource distribution over time. + +### **Collective Accuracy** +- Measures how accurately the group, as a collective, estimates the actual color distribution. +- This directly influences rewards and serves as a metric for evaluating group performance against a ground truth. + +### **Diversity of Shared Opinions** +- Evaluates the variation in agents' expressed preferences. +- To track whether participating agents provide diverse input or converge on overly similar opinions (e.g., due to majority influence). + +### **Distance to Optimum** +In principle, the optimal decision can be determined based on a predefined goal, allowing the distance between this optimum and the group's actual decision to be measured. + +**Possible predefined goals include:** + +1. **Utilitarian**: + - *Maximize the total sum of distributed rewards.* + - Focus on the *total reward*, regardless of how it is distributed. + +2. **Egalitarian**: + - *Minimize the overall inequality in individual rewards.* + - Focus on **fairness**, aiming for a more just distribution of rewards among members. + +3. **Rawlsian**: + - *Maximize the rewards for the poorest (personality-based) group.* + - Inspired by **John Rawls' Difference Principle**, the focus is on improving the well-being of the least advantaged group while tolerating inequalities elsewhere. + +--- + +## Research Questions + +DemocracySim seeks to answer several critical questions: + +- Do different voting procedures produce varying dynamics, and if so, how? +- How do minority and majority agent types behave in collective decision-making? +- What are the long-term effects of (non-)participation on the system? +- How does wealth distribution impact participation and welfare in the simulation? + +--- + +## Broader Implications + +This project offers a controlled testbed for understanding the complex interplay of individual and collective interest in democratic systems. DemocracySim has the potential to reveal valuable insights into real-world voting dynamics. diff --git a/mkdocs.yml b/mkdocs.yml index 93627d1..0341c9e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,6 +13,7 @@ edit_uri: edit/dev/docs/ nav: - Home: index.md - Teaser: teaser.md + - Concept: concept.md #- Overview: overview.md #- Code: the_voting_process_step_by_step.md - Mesa: mesa_docs.md From c633a62c96f1897515e8b8decdffa5a6a353acf6 Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 14 Mar 2025 15:28:17 +0800 Subject: [PATCH 36/38] update docs further --- .gitignore | 2 + democracy_sim/participation_agent.py | 58 ++-- democracy_sim/participation_model.py | 341 ++++++++++++++++-------- democracy_sim/visualisation_elements.py | 2 +- docs/concept.md | 93 ------- docs/research/goals.md | 20 ++ docs/technical/api/VoteAgent.md | 3 + mkdocs.yml | 22 +- requirements.txt | 6 +- tests/test_distance_functions.py | 2 - tests/test_majority_rule.py | 2 +- tests/test_participation_area_agent.py | 16 +- 12 files changed, 309 insertions(+), 258 deletions(-) delete mode 100644 docs/concept.md create mode 100644 docs/research/goals.md create mode 100644 docs/technical/api/VoteAgent.md diff --git a/.gitignore b/.gitignore index 10a07f7..5a65933 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,5 @@ templates /docs/images/CI-images .coverage* *.cache +ai_info.txt +convert_docstrings.py diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 9433ee4..5b8c65e 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -43,7 +43,6 @@ def __init__(self, unique_id, model, pos, personality, assets=1, add=True): :add: Whether to add the agent to the model's agent list and color cell. The 'add' variable is set to false on initialization of the model. """ - # Pass the parameters to the parent class. super().__init__(unique_id=unique_id, model=model) # The "pos" variable in mesa is special, so I avoid it here try: @@ -62,6 +61,9 @@ def __init__(self, unique_id, model, pos, personality, assets=1, add=True): model.voting_agents.append(self) cell = model.grid.get_cell_list_contents([(row, col)])[0] cell.add_agent(self) + # Election relevant variables + self.est_real_dist = np.zeros(self.model.num_colors) + self.confidence = 0.0 def __str__(self): return (f"Agent(id={self.unique_id}, pos={self.position}, " @@ -149,10 +151,10 @@ def compute_assumed_opt_dist(self, area): """ # Compute the "altruism_factor" via a decision tree a_factor = self.decide_altruism_factor(area) # TODO: Implement this - # compute the preference ranking vector as a mix between the agent's - # own preferences/personality traits and the estimated real distribution + # Compute the preference ranking vector as a mix between the agent's own + # preferences/personality traits and the estimated real distribution. est_dist, conf = self.estimate_real_distribution(area) - ass_opt = combine_and_normalize(self.personality, est_dist, a_factor) + ass_opt = combine_and_normalize(est_dist, self.personality, a_factor) return ass_opt def vote(self, area): @@ -166,23 +168,13 @@ def vote(self, area): """ # TODO Implement this (is to be decided upon a learned decision tree) # Compute the color distribution that is assumed to be the best choice. - # TODO est_best_dist = self.compute_assumed_opt_dist(area) + est_best_dist = self.compute_assumed_opt_dist(area) # Make sure that r= is normalized! # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!) ############## if TYPE_CHECKING: # Type hint for IDEs self.model = cast(ParticipationModel, self.model) - # # For TESTING - # # we just shuffle the option vector (ints) then normalize - # # and interpret the result as a preference vector (values=prefs) - # # (makes no sense, but it'll work for testing) - # r = np.arange(self.model.options.shape[0]) - # # Shuffle the array in place - # np.random.shuffle(r) - # r = np.array(r, dtype=float) - # r /= r.sum() - # return r - ############## + options = self.model.options dist_func = self.model.distance_func ranking = np.zeros(options.shape[0]) @@ -200,27 +192,38 @@ def estimate_real_distribution(self, area): """ # relevant_cells = area.filter_cells(self.known_cells) known_colors = np.array([cell.color for cell in self.known_cells]) + # Get the unique color ids present and count their occurrence unique, counts = np.unique(known_colors, return_counts=True) - distribution = np.zeros(self.model.num_colors) - distribution[unique] = counts / known_colors.size - confidence = len(self.known_cells) / area.num_cells - return distribution, confidence + # Update the est_real_dist and confidence values of the agent + self.est_real_dist.fill(0) # To ensure the ones not in unique are 0 + self.est_real_dist[unique] = counts / known_colors.size + self.confidence = len(self.known_cells) / area.num_cells + return self.est_real_dist, self.confidence class ColorCell(Agent): """ - Represents a cell's color + Represents a single cell (a field in the grid) with a specific color. + + Attributes: + color (int): The color of the cell. """ def __init__(self, unique_id, model, pos, initial_color: int): """ - Create a cell, in the given state, at the given row, col position. + Initializes a ColorCell, at the given row, col position. + + Args: + unique_id (int): The unique identifier of the cell. + model (mesa.Model): The mesa model of which the cell is part of. + pos (Tuple[int, int]): The position of the cell in the grid. + initial_color (int): The initial color of the cell. """ super().__init__(unique_id, model) # The "pos" variable in mesa is special, so I avoid it here self._row = pos[0] self._col = pos[1] - self._color = initial_color # The cell's current color (int) + self.color = initial_color # The cell's current color (int) self._next_color = None self.agents = [] self.areas = [] @@ -245,15 +248,6 @@ def position(self): # The variable pos is special in mesa! """The location of this cell.""" return self._row, self._col - @property - def color(self): - """The current color of this cell.""" - return self._color - - @color.setter - def color(self, value): - self._color = value - @property def num_agents_in_cell(self): """The number of agents in this cell.""" diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index 74685b8..ec14660 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -29,13 +29,13 @@ def __init__(self, unique_id, model, height, width, size_variance): super().__init__(unique_id=unique_id, model=model) self._set_dimensions(width, height, size_variance) self.agents = [] + self._personality_distribution = None self.cells = [] self._idx_field = None # An indexing position of the area in the grid - self.color_distribution = np.zeros(model.num_colors) # Initialize to 0 - self.voted_ordering = None - self.voter_turnout = 0 # In percent - self.dist_to_reality = None # Elected vs. actual color distribution - self.personality_dist = None + self._color_distribution = np.zeros(model.num_colors) # Initialize to 0 + self._voted_ordering = None + self._voter_turnout = 0 # In percent + self._dist_to_reality = None # Elected vs. actual color distribution def __str__(self): return (f"Area(id={self.unique_id}, size={self._height}x{self._width}, " @@ -72,6 +72,26 @@ def num_agents(self): def num_cells(self): return self._width * self._height + @property + def personality_distribution(self): + return self._personality_distribution + + @property + def color_distribution(self): + return self._color_distribution + + @property + def voted_ordering(self): + return self._voted_ordering + + @property + def voter_turnout(self): + return self._voter_turnout + + @property + def dist_to_reality(self): + return self._dist_to_reality + @property def idx_field(self): return self._idx_field @@ -121,8 +141,21 @@ def idx_field(self, pos: tuple): or y_area == self._height - 1): cell.is_border_cell = True self._idx_field = (adjusted_x, adjusted_y) - self.update_color_distribution() - self.update_personality_dist() + self._update_color_distribution() + self._update_personality_distribution() + + def _update_personality_distribution(self): + """ + This method calculates the areas current distribution of personalities. + """ + personalities = list(self.model.personalities) + p_counts = {str(i): 0 for i in personalities} + # Count the occurrence of each personality + for agent in self.agents: + p_counts[str(agent.personality)] += 1 + # Normalize the counts + self._personality_distribution = [p_counts[str(p)] / self.num_agents + for p in personalities] def add_agent(self, agent): self.agents.append(agent) @@ -131,61 +164,74 @@ def add_cell(self, cell): self.cells.append(cell) - def conduct_election(self): + def _conduct_election(self): """ This method holds the primary logic of the simulation by simulating the election in the area as well as handling the payments and rewards. :return voter_turnout: The percentage of agents that participated. """ - model = self.model - el_costs = model.election_costs - dist_func = model.distance_func - # Ask agents for participation + # Ask agents for participation and their votes + preference_profile = self._tally_votes() + # Check for the case that no agent participated + if preference_profile.ndim != 2: + print("Area", self.unique_id, "no one participated in the election") + return 0 # TODO: What to do in this case? Cease the simulation? + # Aggregate the preferences ⇒ returns an option ordering + aggregated = self.model.voting_rule(preference_profile) + # Save the "elected" ordering in self._voted_ordering + winning_option = aggregated[0] + self._voted_ordering = self.model.options[winning_option] + # Calculate and distribute rewards + self._distribute_rewards() + # TODO check whether the current color dist and the mutation of the + # colors is calculated and applied correctly and does not interfere + # in any way with the election process + # Statistics + n = preference_profile.shape[0] # Number agents participated + return int((n / self.num_agents) * 100) # Voter turnout in percent + + def _tally_votes(self): + """ + Tally the votes of agents in the area if they want to participate. + :return preference_profile: A NumPy array containing preferences (votes) + """ preference_profile = [] for agent in self.agents: + model = self.model + el_costs = model.election_costs # Give agents their (new) known fields agent.update_known_cells(area=self) if (agent.assets >= el_costs and agent.ask_for_participation(area=self)): agent.num_elections_participated += 1 - # Collect the participation fee from the agents + # Collect the participation fee agent.assets = agent.assets - el_costs - # Ask participating agents for their prefs + # Ask the agent for her preference preference_profile.append(agent.vote(area=self)) # agent.vote returns an array containing dissatisfaction values # between 0 and 1 for each option, interpretable as rank values. - preference_profile = np.array(preference_profile) - # Check for the case that no agent participated - if preference_profile.ndim != 2: - print("Area", self.unique_id, "no one participated in the election") - return 0 # TODO: What to do in this case? Cease the simulation? - # Aggregate the prefs using the v-rule ⇒ returns an option ordering - aggregated = model.voting_rule(preference_profile) - # Save the "elected" ordering in self.voted_ordering - winning_option = aggregated[0] - self.voted_ordering = model.options[winning_option] + return np.array(preference_profile) + + def _distribute_rewards(self): + model = self.model # Calculate the distance to the real distribution using distance_func real_color_ord = np.argsort(self.color_distribution)[::-1] # Descending - self.dist_to_reality = dist_func(real_color_ord, self.voted_ordering, - model.color_search_pairs) + dist_func = model.distance_func + self._dist_to_reality = dist_func(real_color_ord, self.voted_ordering, + model.color_search_pairs) # Calculate the rpa - rewards per agent (can be negative) rpa = (0.5 - self.dist_to_reality) * model.max_reward # TODO: change this (?) # Distribute the two types of rewards color_search_pairs = model.color_search_pairs - for agent in self.agents: + for a in self.agents: # Personality-based reward factor - p = dist_func(agent.personality, real_color_ord, color_search_pairs) - # + Common reward (reward_pa) for all agents - agent.assets = int(agent.assets + (0.5-p) * model.max_reward + rpa) - # Correct wealth if below zero - if agent.assets < 0: - agent.assets = 0 - # TODO check whether the current color dist and the mutation of the colors is calculated and applied correctly and does not interfere in any way with the election process - # Statistics - n = preference_profile.shape[0] # Number agents participated - return int((n / self.num_agents) * 100) # Voter turnout in percent + p = dist_func(a.personality, real_color_ord, color_search_pairs) + # + common reward (reward_pa) for all agents + a.assets = int(a.assets + (0.5 - p) * model.max_reward + rpa) + if a.assets < 0: # Correct wealth if it fell below zero + a.assets = 0 - def update_color_distribution(self): + def _update_color_distribution(self): """ This method calculates the current color distribution of the area and saves it in the color_distribution attribute. @@ -196,22 +242,9 @@ def update_color_distribution(self): color_count[color] = color_count.get(color, 0) + 1 for color in range(self.model.num_colors): dist_val = color_count.get(color, 0) / self.num_cells # Float - self.color_distribution[color] = dist_val - - def update_personality_dist(self): - """ - This method calculates the areas current distribution of personalities. - """ - personalities = list(self.model.personalities) - p_counts = {str(i): 0 for i in personalities} - # Count the occurrence of each personality - for agent in self.agents: - p_counts[str(agent.personality)] += 1 - # Normalize the counts - self.personality_dist = [p_counts[str(p)] / self.num_agents - for p in personalities] + self._color_distribution[color] = dist_val - def filter_cells(self, cell_list): + def _filter_cells(self, cell_list): """ This method is used to filter a given list of cells to return only those which are within the area. @@ -227,7 +260,7 @@ def step(self) -> None: mutate the cells' colors according to the election outcome and update the color distribution of the area. """ - self.voter_turnout = self.conduct_election() # The main election logic! + self._voter_turnout = self._conduct_election() # The main election logic! if self.voter_turnout == 0: return # TODO: What to do if no agent participated..? @@ -248,7 +281,7 @@ def step(self) -> None: for cell, color in zip(cells_to_mutate, colors): cell.color = color # Important: Update the color distribution (because colors changed) - self.update_color_distribution() + self._update_color_distribution() def compute_collective_assets(model): @@ -289,40 +322,6 @@ def get_voter_turnout(model): return voter_turnout_sum / num_areas -def color_by_dst(color_distribution) -> int: - """ - This method selects a color (int) of range(len(color_distribution)) - such that, each color is selected with a probability according to the - given color_distribution array. - Example: color_distribution = [0.2, 0.3, 0.5] - Color 1 is selected with a probability of 0.3 - """ - r = np.random.random() - for color_idx, prob in enumerate(color_distribution): - if r < prob: - return color_idx - r -= prob - - -def create_all_options(n, include_ties=False): - """ - Creates and returns a matrix (an array of all possible ranking vectors), - if specified including ties. - Rank values start from 0. - :param n: The number of items to rank (number of colors in our case) - :param include_ties: If True, rankings include ties. - :return r: A NumPy matrix containing all possible rankings of n items - """ - if include_ties: - # Create all possible combinations and sort out invalid rankings - # i.e. [1, 1, 1] or [1, 2, 2] aren't valid as no option is ranked first. - r = np.array([np.array(comb) for comb in product(range(n), repeat=n) - if set(range(max(comb))).issubset(comb)]) - else: - r = np.array([np.array(p) for p in permutations(range(n))]) - return r - - def create_personality(num_colors): """ NOT USED Creates and returns a list of 'personalities' that are to be assigned @@ -407,7 +406,67 @@ def step(self): class ParticipationModel(mesa.Model): - """A model with some number of agents.""" + """ + The ParticipationModel class provides a base environment for + multi-agent simulations within a grid-based world (split into territories) + that reacts dynamically to frequently held collective decision-making + processes ("elections"). It incorporates voting agents with personalities, + color cells (grid fields), and areas (election territories). This model is + designed to analyze different voting rules and their impact. + + This class provides mechanisms for creating and managing cells, agents, + and areas, along with data collection for analysis. Colors in the model + mutate depending on a predefined mutation rate and are influenced by + elections. Agents interact based on their personalities, knowledge, and + past experience. + + Attributes: + grid (mesa.space.SingleGrid): Grid representing the environment + with a single occupancy per cell (the color). + height (int): The height of the grid. + width (int): The width of the grid. + colors (ndarray): Array containing the unique color identifiers. + voting_rule (Callable): A function defining the social welfare + function to aggregate agent preferences. This callable typically + takes agent rankings as input and returns a single aggregate result. + distance_func (Callable): A function used to calculate a + distance metric when comparing rankings. It takes two rankings + and returns a numeric distance score. + mu (float): Mutation rate; the probability of each color cell to mutate + after an elections. + color_probs (ndarray): + Probabilities used to determine individual color mutation outcomes. + options (ndarray): Matrix (array of arrays) where each subarray + represents an option (color-ranking) available to agents. + option_vec (ndarray): Array holding the indices of the available options + for computational efficiency. + color_cells (list[ColorCell]): List of all color cells. + Initialized during the model setup. + voting_agents (list[VoteAgent]): List of all voting agents. + Initialized during the model setup. + personalities (list): List of unique personalities available for agents. + personality_distribution (ndarray): The (global) probability + distribution of personalities among all agents. + areas (list[Area]): List of areas (regions or territories within the + grid) in which elections take place. Initialized during model setup. + global_area (Area): The area encompassing the entire grid. + av_area_height (int): Average height of areas in the simulation. + av_area_width (int): Average width of areas created in the simulation. + area_size_variance (float): Variance in area sizes to introduce + non-uniformity among election territories. + common_assets (int): Total resources to be distributed among all agents. + av_area_color_dst (ndarray): Current (area)-average color distribution. + election_costs (float): Cost associated with participating in elections. + max_reward (float): Maximum reward possible for an agent each election. + known_cells (int): Number of cells each agent knows the color of. + datacollector (mesa.DataCollector): A tool for collecting data + (metrics and statistics) at each simulation step. + scheduler (CustomScheduler): The scheduler responsible for executing the + step function. + draw_borders (bool): Only for visualization (no effect on simulation). + _preset_color_dst (ndarray): A predefined global color distribution + (set randomly) that affects cell initialization globally. + """ def __init__(self, height, width, num_agents, num_colors, num_personalities, mu, election_impact_on_mutation, common_assets, known_cells, @@ -416,6 +475,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, rule_idx, distance_idx, election_costs, max_reward, show_area_stats): super().__init__() + # TODO clean up class (public/private variables) self.height = height self.width = width self.colors = np.arange(num_colors) @@ -426,19 +486,19 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, # MultiGrid allows multiple agents to be in the same cell. self.grid = mesa.space.SingleGrid(height=height, width=width, torus=True) # Random bias factors that affect the initial color distribution - self.vertical_bias = self.random.uniform(0, 1) - self.horizontal_bias = self.random.uniform(0, 1) + self._vertical_bias = self.random.uniform(0, 1) + self._horizontal_bias = self.random.uniform(0, 1) self.draw_borders = draw_borders # Color distribution (global) - self.color_dst = self.create_color_distribution(heterogeneity) - self._av_area_color_dst = self.color_dst + self._preset_color_dst = self.create_color_distribution(heterogeneity) + self._av_area_color_dst = self._preset_color_dst # Elections self.election_costs = election_costs self.max_reward = max_reward - self.known_cells = known_cells + self.known_cells = known_cells # Integer self.voting_rule = social_welfare_functions[rule_idx] self.distance_func = distance_functions[distance_idx] - self.options = create_all_options(num_colors) + self.options = self.create_all_options(num_colors) # Simulation variables self.mu = mu # Mutation rate for the color cells (0.1 = 10 % mutate) self.common_assets = common_assets @@ -450,7 +510,7 @@ def __init__(self, height, width, num_agents, num_colors, num_personalities, self.color_search_pairs = list(combinations(range(0, num_colors), 2)) # Create color cells self.color_cells: List[Optional[ColorCell]] = [None] * (height * width) - self.initialize_color_cells() + self._initialize_color_cells() # Create agents # TODO: Where do the agents get there known cells from and how!? self.voting_agents: List[Optional[VoteAgent]] = [None] * num_agents @@ -494,14 +554,18 @@ def num_agents(self): def num_areas(self): return len(self.areas) - def initialize_color_cells(self): + @property + def preset_color_dst(self): + return len(self._preset_color_dst) + + def _initialize_color_cells(self): """ This method initializes a color cells for each cell in the model's grid. """ # Create a color cell for each cell in the grid for unique_id, (_, (row, col)) in enumerate(self.grid.coord_iter()): # The colors are chosen by a predefined color distribution - color = color_by_dst(self.color_dst) + color = self.color_by_dst(self._preset_color_dst) # Create the cell cell = ColorCell(unique_id, self, (row, col), color) # Add it to the grid @@ -599,6 +663,7 @@ def initialize_all_areas(self): for x_coord, y_coord in zip(additional_x, additional_y): self.initialize_area(next(a_ids), x_coord, y_coord) + def initialize_global_area(self): """ This method initializes the global area spanning the whole grid. @@ -631,22 +696,6 @@ def create_personalities(self, n): del personality_options # Free up memory (variable may be very large) return selected_personalities - def pers_dist(self, size): - """ - This method creates a normalized normal distribution array for picking - and depicting the distribution of personalities in the model. - :param size: The mean value of the normal distribution. - :return: A normalized (sum is one) array mimicking a gaussian curve. - """ - # Generate a normal distribution - rng = np.random.default_rng() - dist = rng.normal(0, 1, size) - dist.sort() # To create a gaussian curve like array - dist = np.abs(dist) # Flip negative values "up" - # Normalize the distribution to sum to one - dist /= dist.sum() - return dist - def initialize_datacollector(self): color_data = {f"Color {i}": get_color_distribution_function(i) for i in @@ -699,6 +748,7 @@ def adjust_color_pattern(self, color_patches_steps, patch_power): most_common_color = self.color_patches(cell, patch_power) cell.color = most_common_color + def create_color_distribution(self, heterogeneity): """ This method is used to create a color distribution that has a bias @@ -719,6 +769,7 @@ def color_patches(self, cell, patch_power): using a similar logic to the color patches model. It uses a (normalized) bias coordinate to center the impact of the color patches structures impact around. + :param cell: The cell that may change its color accordingly :param patch_power: Like a radius of impact around the bias point. """ @@ -726,12 +777,12 @@ def color_patches(self, cell, patch_power): normalized_x = cell.row / self.height normalized_y = cell.col / self.width # Calculate the distance of the cell to the bias point - bias_factor = (abs(normalized_x - self.horizontal_bias) - + abs(normalized_y - self.vertical_bias)) + bias_factor = (abs(normalized_x - self._horizontal_bias) + + abs(normalized_y - self._vertical_bias)) # The closer the cell to the bias-point, the less often it is # to be replaced by a color chosen from the initial distribution: if abs(self.random.gauss(0, patch_power)) < bias_factor: - return color_by_dst(self.color_dst) + return self.color_by_dst(self._preset_color_dst) # Otherwise, apply the color patches logic neighbor_cells = self.grid.get_neighbors((cell.row, cell.col), moore=True, @@ -748,6 +799,7 @@ def color_patches(self, cell, patch_power): return self.random.choice(most_common_colors) return cell.color # Return the cell's own color if no consensus + def update_av_area_color_dst(self): """ This method updates the av_area_color_dst attribute of the model. @@ -758,3 +810,56 @@ def update_av_area_color_dst(self): sums += area.color_distribution # Return the average color distributions self.av_area_color_dst = sums / self.num_areas + + + @staticmethod + def pers_dist(size): + """ + This method creates a normalized normal distribution array for picking + and depicting the distribution of personalities in the model. + :param size: The mean value of the normal distribution. + :return: A normalized (sum is one) array mimicking a gaussian curve. + """ + # Generate a normal distribution + rng = np.random.default_rng() + dist = rng.normal(0, 1, size) + dist.sort() # To create a gaussian curve like array + dist = np.abs(dist) # Flip negative values "up" + # Normalize the distribution to sum to one + dist /= dist.sum() + return dist + + + @staticmethod + def create_all_options(n, include_ties=False): + """ + Creates and returns a matrix (an array of all possible ranking vectors), + if specified including ties. + Rank values start from 0. + :param n: The number of items to rank (number of colors in our case) + :param include_ties: If True, rankings include ties. + :return r: A NumPy matrix containing all possible rankings of n items + """ + if include_ties: + # Create all possible combinations and sort out invalid rankings + # i.e. [1, 1, 1] or [1, 2, 2] aren't valid as no option is ranked first. + r = np.array([np.array(comb) for comb in product(range(n), repeat=n) + if set(range(max(comb))).issubset(comb)]) + else: + r = np.array([np.array(p) for p in permutations(range(n))]) + return r + + @staticmethod + def color_by_dst(color_distribution) -> int: + """ + This method selects a color (int) of range(len(color_distribution)) + such that, each color is selected with a probability according to the + given color_distribution array. + Example: color_distribution = [0.2, 0.3, 0.5] + Color 1 is selected with a probability of 0.3 + """ + r = np.random.random() + for color_idx, prob in enumerate(color_distribution): + if r < prob: + return color_idx + r -= prob diff --git a/democracy_sim/visualisation_elements.py b/democracy_sim/visualisation_elements.py index b0fa9f6..bbc4bdf 100644 --- a/democracy_sim/visualisation_elements.py +++ b/democracy_sim/visualisation_elements.py @@ -213,7 +213,7 @@ def create_once(self, model): figsize=(8, 8), sharex=True) for ax, area in zip(axes.flatten(), model.areas): # Fetch data - p_dist = area.personality_dist + p_dist = area.personality_distribution num_agents = area.num_agents # Subplot heights = [int(val * num_agents) for val in p_dist] diff --git a/docs/concept.md b/docs/concept.md deleted file mode 100644 index 9bfc546..0000000 --- a/docs/concept.md +++ /dev/null @@ -1,93 +0,0 @@ -# Concept - -**DemocracySim** is a multi-agent simulation framework designed to explore the effects of different voting rules on democratic participation and welfare. Developed as part of a master's thesis at Leipzig University, the project investigates how collective decision-making processes shape individual participation, resource distribution, and long-term system dynamics. With a focus on agent-based modeling, the simulation ties together elements of participatory dynamics, resource allocation, and group decision effects in a controlled, evolving system. - ---- - -## Project Summary - -DemocracySim is set in a grid-based environment where agents interact with their surroundings and participate in group decision-making through elections. The system explores various scenarios and voting rules to understand key dynamics and challenges in democratic participation. - -### Key Features - -#### Simulated Environment: -- The grid is designed without boundaries, and each unit (field) within it adopts one of **x** colors. Fields change color based on election results, with a mutation rate affected by prior outcomes. -- Groups of fields form **territories**, which serve as the basis for elections and influence grid evolution. - -#### Agents: -- Agents are equipped with a basic artificial intelligence system and operate under a **"top-down" model**, learning decision-making strategies via training. -- Each agent has a **limited budget** and must decide whether to participate in elections. -- Agents have individual **preferences** over colors (called *personalities*) and are divided into **y** randomly distributed personality types. - *(The distribution of types forms majority-minority situations.)* - -#### Elections and Rewards (Two Dilemmas): -1. **Elections:** - - Elections concern the frequency distribution of field colors in a given territory, representing an "objective truth" aimed at emulating wise group decisions. - - For an intuitive understanding, the election addresses the question: - *"What is — or should be — the current color distribution within your territory?"* - -2. **Rewards:** - - Rewards are distributed to all agents in the territory, regardless of participation (*participation dilemma*). - These rewards consist of: - - **Base reward:** Distributed equally based on how well agents guess the true color distribution. - - **Personal reward:** Allocated based on the alignment between election results and agent preferences, introducing a second dilemma: - - *Should agents vote selfishly (favoring their preferences) or vote with a focus on the group's accuracy (collective good)?* - ---- - -## Simulation Metrics / Indicators - -### **Participation Rate** *(Aggregate Behavioral Variable)* -- Measures the percentage of agents actively participating in elections at a given time. -- Helps evaluate the *participation dilemma* by analyzing participation across the group and comparing rates for majority vs. minority groups. - -### **Altruism Factor** *(Individual Behavioral Variable)* -- Quantifies the extent to which agents prioritize the **collective good** (e.g., the group's accuracy in guessing) over **individual preferences**, including cases of non-cooperation with a majority they belong to when it conflicts with the (expected) collective good. -- Additionally, tracking the average altruism factor of personality groups can provide insights, though this may be misleading if agents/groups do not participate. - -### **Gini Index** *(Inequality Metric)* -- Measures the inequality in asset distribution among agents within the system. -- Ranges from **0** (perfect equality) to **1** (maximum inequality, where one agent holds all assets). -- Offers insights into how electoral decisions impact wealth/resource distribution over time. - -### **Collective Accuracy** -- Measures how accurately the group, as a collective, estimates the actual color distribution. -- This directly influences rewards and serves as a metric for evaluating group performance against a ground truth. - -### **Diversity of Shared Opinions** -- Evaluates the variation in agents' expressed preferences. -- To track whether participating agents provide diverse input or converge on overly similar opinions (e.g., due to majority influence). - -### **Distance to Optimum** -In principle, the optimal decision can be determined based on a predefined goal, allowing the distance between this optimum and the group's actual decision to be measured. - -**Possible predefined goals include:** - -1. **Utilitarian**: - - *Maximize the total sum of distributed rewards.* - - Focus on the *total reward*, regardless of how it is distributed. - -2. **Egalitarian**: - - *Minimize the overall inequality in individual rewards.* - - Focus on **fairness**, aiming for a more just distribution of rewards among members. - -3. **Rawlsian**: - - *Maximize the rewards for the poorest (personality-based) group.* - - Inspired by **John Rawls' Difference Principle**, the focus is on improving the well-being of the least advantaged group while tolerating inequalities elsewhere. - ---- - -## Research Questions - -DemocracySim seeks to answer several critical questions: - -- Do different voting procedures produce varying dynamics, and if so, how? -- How do minority and majority agent types behave in collective decision-making? -- What are the long-term effects of (non-)participation on the system? -- How does wealth distribution impact participation and welfare in the simulation? - ---- - -## Broader Implications - -This project offers a controlled testbed for understanding the complex interplay of individual and collective interest in democratic systems. DemocracySim has the potential to reveal valuable insights into real-world voting dynamics. diff --git a/docs/research/goals.md b/docs/research/goals.md new file mode 100644 index 0000000..70caa24 --- /dev/null +++ b/docs/research/goals.md @@ -0,0 +1,20 @@ +**DemocracySim** is a multi-agent simulation framework designed to explore the effects of different voting rules on democratic participation and welfare. +Its broader aim is to be able to include complex and diverse factors via simulation to investigate how collective decision-making processes can be improved. +With a focus on agent-based modeling, the simulation ties together elements of participatory dynamics, resource allocation, and group decision effects in a controlled, evolving system. + +--- + +## Research Questions + +DemocracySim seeks to answer several critical questions: + +- Do different voting procedures produce varying dynamics, and if so, how? +- How do minority and majority agent types behave in collective decision-making? +- What are the long-term effects of (non-)participation on the system? +- How does wealth distribution impact participation and welfare in the simulation? + +--- + +## Broader Implications + +This project offers a controlled testbed for understanding the complex interplay of individual and collective interest in democratic systems. DemocracySim has the potential to reveal valuable insights into real-world voting dynamics. diff --git a/docs/technical/api/VoteAgent.md b/docs/technical/api/VoteAgent.md new file mode 100644 index 0000000..c4c8375 --- /dev/null +++ b/docs/technical/api/VoteAgent.md @@ -0,0 +1,3 @@ +# Class `VoteAgent` + +::: democracy_sim.participation_agent.VoteAgent diff --git a/mkdocs.yml b/mkdocs.yml index 0341c9e..a86537d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,7 +13,23 @@ edit_uri: edit/dev/docs/ nav: - Home: index.md - Teaser: teaser.md - - Concept: concept.md + - Research: + - Concept: research/research_concept.md + - Metrics: research/metrics.md + - Goals: research/goals.md + - Technical: + - Overview: technical/technical_overview.md + - Installation Instructions: technical/installation_instructions.md + - API Reference: + - Model (Environment): technical/api/Model.md + - Area (Territory): technical/api/Area.md + - Grid Cell: technical/api/ColorCell.md + - Voting Agent: technical/api/VoteAgent.md + - Inherited Classes: technical/api/inherited.md + #- User Guide: technical/user_guide.md #1. Provide step-by-step guides for common project usage. + #- Examples: technical/examples.md #1. Show key use cases via practical code examples or interactive demos. + #- Developer Docs: technical/dev_docs.md #Offer guidelines for contributing or extending the project (e.g., folder structure, conventions, CI/CD pipelines). + - Architecture Overview: technical/architecture_overview.md #- Overview: overview.md #- Code: the_voting_process_step_by_step.md - Mesa: mesa_docs.md @@ -51,6 +67,8 @@ theme: - toc.follow - toc.integrate - search.share + - search.highlight + - search.suggest - content.action.edit # Plugins plugins: @@ -65,6 +83,8 @@ plugins: name: Deutsch build: true - search + - mkdocstrings: + default_handler: python # Extensions markdown_extensions: diff --git a/requirements.txt b/requirements.txt index 5118a61..cdd98e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ Mesa~=2.3.0 numpy~=1.26.4 -solara~=1.33 +solara~=1.35.1 matplotlib~=3.9.0 ipyvuetify~=1.9.4 seaborn~=0.13.2 @@ -12,8 +12,10 @@ pytest-cov~=5.0.0 toml~=0.10.2 Flask~=3.0.3 altair~=5.3.0 -streamlit~=1.34.0 +streamlit~=1.37.0 mkdocs-git-revision-date-localized-plugin~=0.9.0 mkdocs-static-i18n mkdocs-static-i18n[material] +mkdocstrings +mkdocstrings[python] git+https://github.com/Logende/mesa-replay@main#egg=Mesa-Replay \ No newline at end of file diff --git a/tests/test_distance_functions.py b/tests/test_distance_functions.py index 412fcee..2bdae8b 100644 --- a/tests/test_distance_functions.py +++ b/tests/test_distance_functions.py @@ -3,8 +3,6 @@ import numpy as np from itertools import combinations -from democracy_sim.participation_model import ParticipationModel - class TestKendallTauDistance(unittest.TestCase): diff --git a/tests/test_majority_rule.py b/tests/test_majority_rule.py index 5a3d31d..be68c3f 100644 --- a/tests/test_majority_rule.py +++ b/tests/test_majority_rule.py @@ -191,7 +191,7 @@ def test_with_random_matrix_small(): counts = np.array(list(wc.values())) # Calculate the coefficient of variation (CV) cv = np.std(counts) / np.mean(counts) - assert cv < 0.125 + assert cv < 0.15 print(f"\nCV: {cv}") # Print the time taken elapsed_time = stop_time - start_time diff --git a/tests/test_participation_area_agent.py b/tests/test_participation_area_agent.py index 7bb9fb3..dc84c86 100644 --- a/tests/test_participation_area_agent.py +++ b/tests/test_participation_area_agent.py @@ -23,7 +23,7 @@ def test_update_color_distribution(self): all_color_cells = self.model.color_cells rand_area.cells = random.sample(all_color_cells, len(rand_area.cells)) # Run/test the update_color_distribution method - rand_area.update_color_distribution() + rand_area._update_color_distribution() new_dst = rand_area.color_distribution print(f"Area {rand_area.unique_id}s new color distribution: {new_dst}") # Check if the distribution has changed @@ -39,10 +39,10 @@ def test_filter_cells(self): other_cells = random.sample(self.model.color_cells, 4) raw_cell_list = area_cell_sample + other_cells print(f"Cells to be filtered: {[c.unique_id for c in raw_cell_list]}") - filtered_cells = existing_area.filter_cells(raw_cell_list) + filtered_cells = existing_area._filter_cells(raw_cell_list) print(f"Filtered cells: {[c.unique_id for c in filtered_cells]}") # Check if the cells are filtered correctly - add_cells = existing_area.filter_cells(other_cells) + add_cells = existing_area._filter_cells(other_cells) if len(add_cells) > 0: print(f"Additional cells: {[c.unique_id for c in add_cells]}") area_cell_sample += add_cells @@ -53,16 +53,16 @@ def test_conduct_election(self): # Test with majority_rule and spearman self.model.voting_rule = majority_rule self.model.distance_func = spearman - area.conduct_election() + area._conduct_election() # Test with approval_voting and spearman self.model.voting_rule = approval_voting - area.conduct_election() + area._conduct_election() # Test with approval_voting and kendall_tau self.model.distance_func = kendall_tau - area.conduct_election() + area._conduct_election() # Test with majority_rule and kendall_tau self.model.voting_rule = majority_rule - area.conduct_election() + area._conduct_election() # TODO def test_adding_new_area_and_agent_within_it(self): @@ -92,7 +92,7 @@ def test_estimate_real_distribution(self): a_colors = [c.color for c in a.known_cells] # To test against print(f"Cells that agent {a.unique_id} knows of:\n" f"{[c.unique_id for c in a.known_cells]} with colors: {a_colors}") - filtered = rnd_area.filter_cells(a.known_cells) + filtered = rnd_area._filter_cells(a.known_cells) select_wrong = [c not in filtered for c in a.known_cells] wrong = [c.unique_id for i, c in enumerate(a.known_cells) if select_wrong[i]] From 276fda615ed7fc670a9082ec84b2d50df9475884 Mon Sep 17 00:00:00 2001 From: jurikane Date: Fri, 21 Mar 2025 19:02:24 +0800 Subject: [PATCH 37/38] worked on docs - major improve - not entirely finished --- .gitignore | 2 + democracy_sim/distance_functions.py | 64 +++-- democracy_sim/model_setup.py | 6 +- democracy_sim/participation_agent.py | 67 +++-- democracy_sim/participation_model.py | 307 ++++++++++++++++------ democracy_sim/run.py | 3 +- democracy_sim/social_welfare_functions.py | 79 +++--- docs/de/QA.md | 91 +++++++ docs/research/metrics.md | 39 +++ docs/research/research_concept.md | 27 ++ docs/technical/api/Area.md | 7 + docs/technical/api/ColorCell.md | 3 + docs/technical/api/Model.md | 3 + docs/technical/api/Utility_functions.md | 3 + docs/technical/api/inherited.md | 11 + docs/technical/technical_overview.md | 31 +++ mkdocs.yml | 5 +- tests/factory.py | 31 +++ tests/test_color_by_dst.py | 65 +++++ tests/test_conduct_election.py | 22 ++ tests/test_create_personalities.py | 58 ++++ tests/test_distribute_rewards.py | 15 ++ tests/test_initialize_all_areas.py | 97 +++++++ tests/test_set_dimensions.py | 28 ++ tests/test_tally_votes.py | 21 ++ tests/test_update_color_distribution.py | 23 ++ 26 files changed, 941 insertions(+), 167 deletions(-) create mode 100644 docs/de/QA.md create mode 100644 docs/research/metrics.md create mode 100644 docs/research/research_concept.md create mode 100644 docs/technical/api/Area.md create mode 100644 docs/technical/api/ColorCell.md create mode 100644 docs/technical/api/Model.md create mode 100644 docs/technical/api/Utility_functions.md create mode 100644 docs/technical/api/inherited.md create mode 100644 docs/technical/technical_overview.md create mode 100644 tests/factory.py create mode 100644 tests/test_color_by_dst.py create mode 100644 tests/test_conduct_election.py create mode 100644 tests/test_create_personalities.py create mode 100644 tests/test_distribute_rewards.py create mode 100644 tests/test_initialize_all_areas.py create mode 100644 tests/test_set_dimensions.py create mode 100644 tests/test_tally_votes.py create mode 100644 tests/test_update_color_distribution.py diff --git a/.gitignore b/.gitignore index 5a65933..3b9f4f3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ /.idea .DS_Store __pycache__/ +*.ipynb /examples /starter_model /mesa @@ -8,6 +9,7 @@ site/ sorted-out-tests /benchmarks /notes +/docs/work_in_progress_exclude # short term: Dockerfile docker-compose.yml diff --git a/democracy_sim/distance_functions.py b/democracy_sim/distance_functions.py index 73f0f1b..90af2fa 100644 --- a/democracy_sim/distance_functions.py +++ b/democracy_sim/distance_functions.py @@ -1,5 +1,9 @@ from math import comb import numpy as np +from numpy.typing import NDArray +from typing import TypeAlias + +FloatArray: TypeAlias = NDArray[np.float64] def kendall_tau_on_ranks(rank_arr_1, rank_arr_2, search_pairs, color_vec): @@ -15,11 +19,15 @@ def kendall_tau_on_ranks(rank_arr_1, rank_arr_2, search_pairs, color_vec): Rank vectors hold the rank of each option (option = index). Not to be confused with an ordering (or sequence) where the vector holds options and the index is the rank. - :param rank_arr_1: First (NumPy) array containing the ranks of each option - :param rank_arr_2: The second rank array - :param search_pairs: The pairs of indices (for efficiency) - :param color_vec: The vector of colors (for efficiency) - :return: The kendall tau distance + + Args: + rank_arr_1: First (NumPy) array containing the ranks of each option + rank_arr_2: The second rank array + search_pairs: The pairs of indices (for efficiency) + color_vec: The vector of colors (for efficiency) + + Returns: + The kendall tau distance """ # Get the ordering (option names being 0 to length) ordering_1 = np.argsort(rank_arr_1) @@ -46,10 +54,14 @@ def unnormalized_kendall_tau(ordering_1, ordering_2, search_pairs): """ This function calculates the kendal tau distance on two orderings. An ordering holds the option names in the order of their rank (rank=index). - :param ordering_1: First (NumPy) array containing ranked options - :param ordering_2: The second ordering array - :param search_pairs: Containing search pairs of indices (for efficiency) - :return: The kendall tau distance + + Args: + ordering_1: First (NumPy) array containing ranked options + ordering_2: The second ordering array + search_pairs: Containing search pairs of indices (for efficiency) + + Returns: + The kendall tau distance """ # Rename the elements to reduce the problem to counting inversions mapping = {option: idx for idx, option in enumerate(ordering_1)} @@ -70,10 +82,14 @@ def kendall_tau(ordering_1, ordering_2, search_pairs): The larger the distance, the more dissimilar the two lists are. Kendall tau distance is also called bubble-sort distance. An ordering holds the option names in the order of their rank (rank=index). - :param ordering_1: First (NumPy) array containing ranked options - :param ordering_2: The second ordering array - :param search_pairs: Containing the pairs of indices (for efficiency) - :return: The kendall tau distance + + Args: + ordering_1: First (NumPy) array containing ranked options + ordering_2: The second ordering array + search_pairs: Containing the pairs of indices (for efficiency) + + Returns: + The kendall tau distance """ # TODO: remove these tests (comment out) on actual simulations to speed up n = ordering_1.size @@ -103,9 +119,13 @@ def spearman_distance(rank_arr_1, rank_arr_2): of the two lists. This function is meant to work with numeric values as well. Hence, we only assume the rank values to be comparable (e.q. normalized). - :param rank_arr_1: First (NumPy) array containing the ranks of each option - :param rank_arr_2: The second rank array - :return: The Spearman distance + + Args: + rank_arr_1: First (NumPy) array containing the ranks of each option + rank_arr_2: The second rank array + + Returns: + The Spearman distance """ # TODO: remove these tests (comment out) on actual simulations assert rank_arr_1.size == rank_arr_2.size, \ @@ -123,10 +143,14 @@ def spearman(ordering_1, ordering_2, _search_pairs=None): Spearman's foot rule is a measure of the distance between ranked lists. It is given as the sum of the absolute differences between the ranks of the two orderings (values from 0 to n-1 in any order). - :param ordering_1: The first (NumPy) array containing the option's ranks. - :param ordering_2: The second rank array. - :param _search_pairs: This parameter is intentionally unused. - :return: The Spearman distance + + Args: + ordering_1: The first (NumPy) array containing the option's ranks. + ordering_2: The second rank array. + _search_pairs: This parameter is intentionally unused. + + Returns: + The Spearman distance """ # TODO: remove these tests (comment out) on actual simulations to speed up n = ordering_1.size diff --git a/democracy_sim/model_setup.py b/democracy_sim/model_setup.py index 76c37a6..29348d1 100644 --- a/democracy_sim/model_setup.py +++ b/democracy_sim/model_setup.py @@ -99,9 +99,11 @@ def participation_draw(cell: ColorCell): This function is registered with the visualization server to be called each tick to indicate how to draw the cell in its current color. - :param cell: The cell in the simulation + Args: + cell: The cell in the simulation - :return: The portrayal dictionary. + Returns: + The portrayal dictionary. """ if cell is None: raise AssertionError diff --git a/democracy_sim/participation_agent.py b/democracy_sim/participation_agent.py index 5b8c65e..adae33c 100644 --- a/democracy_sim/participation_agent.py +++ b/democracy_sim/participation_agent.py @@ -10,10 +10,17 @@ def combine_and_normalize(arr_1: np.array, arr_2: np.array, factor: float): Combine two arrays weighted by a factor favoring arr_1. The first array is to be the estimated real distribution. And the other is to be the personality vector of the agent. - :param arr_1: The first array to be combined (real distribution). - :param arr_2: The second array to be combined (personality vector). - :param factor: The factor to weigh the two arrays. - :return: The normalized result of the weighted linear combination. + + Args: + arr_1: The first array to be combined (real distribution). + arr_2: The second array to be combined (personality vector). + factor: The factor to weigh the two arrays. + + Returns: + result (np.array): The normalized weighted linear combination. + + Example: + TODO """ # Ensure f is between 0 and 1 TODO: remove this on simulations to speed up if not (0 <= factor <= 1): @@ -28,20 +35,18 @@ def combine_and_normalize(arr_1: np.array, arr_2: np.array, factor: float): class VoteAgent(Agent): """An agent that has limited knowledge and resources and - can decide to use them to participate in elections.""" + can decide to use them to participate in elections. + """ def __init__(self, unique_id, model, pos, personality, assets=1, add=True): """ Create a new agent. - :param unique_id: The unique identifier of the agent. - :param model: The simulation model of which the agent is part of. - :type model: ParticipationModel - :param pos: The position of the agent in the grid. - :type pos: Tuple - :param personality: Represents the agent's preferences among colors. - :type personality: Numpy.ndarray - :param assets: The wealth/assets/motivation of the agent. - :add: Whether to add the agent to the model's agent list and color cell. - The 'add' variable is set to false on initialization of the model. + + Attributes: + unique_id: The unique identifier of the agent. + model: The simulation model of which the agent is part of. + pos: The position of the agent in the grid. + personality: Represents the agent's preferences among colors. + assets: The wealth/assets/motivation of the agent. """ super().__init__(unique_id=unique_id, model=model) # The "pos" variable in mesa is special, so I avoid it here @@ -108,7 +113,9 @@ def num_elections_participated(self, value): def update_known_cells(self, area): """ This method is to update the list of known cells before casting a vote. - :param area: The area that holds the pool of cells in question + + Args: + area: The area that holds the pool of cells in question """ n_cells = len(area.cells) k = len(self.known_cells) @@ -122,8 +129,12 @@ def ask_for_participation(self, area): """ The agent decides whether to participate in the upcoming election of a given area. - :param area: The area in which the election takes place. - :return: True if the agent decides to participate, False otherwise + + Args: + area: The area in which the election takes place. + + Returns: + True if the agent decides to participate, False otherwise """ #print("Agent", self.unique_id, "decides whether to participate", # "in election of area", area.unique_id) @@ -145,9 +156,12 @@ def compute_assumed_opt_dist(self, area): Computes a color distribution that the agent assumes to be an optimal choice in any election (regardless of whether it exists as a real option to vote for or not). It takes "altruistic" concepts into consideration. - :param area: The area in which the election takes place. - :return: The assumed optimal color distribution (normalized). - TODO add unit test for this method + + Args: + area (Area): The area in which the election takes place. + + Returns: + ass_opt: The assumed optimal color distribution (normalized). """ # Compute the "altruism_factor" via a decision tree a_factor = self.decide_altruism_factor(area) # TODO: Implement this @@ -163,8 +177,9 @@ def vote(self, area): i.e., she returns a preference ranking vector over all options. (Ranking: `index = option`, `value proportional to rank`) The available options are set in the model. - :param area: The area in which the election takes place. - :return ranking: A normalized preference-ranking (sum-normalization) + + Args: + area (Area): The area in which the election takes place. """ # TODO Implement this (is to be decided upon a learned decision tree) # Compute the color distribution that is assumed to be the best choice. @@ -189,8 +204,10 @@ def estimate_real_distribution(self, area): """ The agent estimates the real color distribution in the area based on her own knowledge (self.known_cells). + + Args: + area (Area): The area the agent uses to estimate. """ - # relevant_cells = area.filter_cells(self.known_cells) known_colors = np.array([cell.color for cell in self.known_cells]) # Get the unique color ids present and count their occurrence unique, counts = np.unique(known_colors, return_counts=True) @@ -209,7 +226,7 @@ class ColorCell(Agent): color (int): The color of the cell. """ - def __init__(self, unique_id, model, pos, initial_color: int): + def __init__(self, unique_id, model, pos: tuple, initial_color: int): """ Initializes a ColorCell, at the given row, col position. diff --git a/democracy_sim/participation_model.py b/democracy_sim/participation_model.py index ec14660..8555dc5 100644 --- a/democracy_sim/participation_model.py +++ b/democracy_sim/participation_model.py @@ -1,4 +1,3 @@ -import itertools from typing import TYPE_CHECKING, cast, List, Optional import mesa from democracy_sim.participation_agent import VoteAgent, ColorCell @@ -18,11 +17,13 @@ class Area(mesa.Agent): def __init__(self, unique_id, model, height, width, size_variance): """ Create a new area. - :param unique_id: The unique identifier of the area. - :param model: The simulation model of which the area is part of. - :param height: The average height of the area (see size_variance). - :param width: The average width of the area (see size_variance). - :param size_variance: A variance factor applied to height and width. + + Attributes: + unique_id (int): The unique identifier of the area. + model (ParticipationModel): The simulation model of which the area is part of. + height (int): The average height of the area (see size_variance). + width (int): The average width of the area (see size_variance). + size_variance (float): A variance factor applied to height and width. """ if TYPE_CHECKING: # Type hint for IDEs model = cast(ParticipationModel, model) @@ -45,10 +46,20 @@ def __str__(self): def _set_dimensions(self, width, height, size_var): """ - Set the dimensions of the area right, based on the size variance. - :param width: The average width of the area. - :param height: The average height of the area. - :param size_var: A variance factor applied to height and width. + Sets the area's dimensions based on the provided width, height, and variance factor. + + This function adjusts the width and height by a random factor drawn from + the range [1 - size_var, 1 + size_var]. If size_var is zero, no variance + is applied. + + Args: + width (int): The average width of the area. + height (int): The average height of the area. + size_var (float): A variance factor applied to width and height. + Must be in [0, 1]. + + Raises: + ValueError: If size_var is not between 0 and 1. """ if size_var == 0: self._width = width @@ -99,10 +110,14 @@ def idx_field(self): @idx_field.setter def idx_field(self, pos: tuple): """ + Sets the indexing field (cell coordinate in the grid) of the area. + This method sets the areas indexing-field (top-left cell coordinate) which determines which cells and agents on the grid belong to the area. The cells and agents are added to the area's lists of cells and agents. - :param pos: (x, y) representing the areas top-left coordinates. + + Args: + pos: (x, y) representing the areas top-left coordinates. """ # TODO: Check - isn't it better to make sure agents are added to the area when they are created? # TODO -- There is something wrong here!!! (Agents are not added to the areas) @@ -144,7 +159,7 @@ def idx_field(self, pos: tuple): self._update_color_distribution() self._update_personality_distribution() - def _update_personality_distribution(self): + def _update_personality_distribution(self) -> None: """ This method calculates the areas current distribution of personalities. """ @@ -157,18 +172,37 @@ def _update_personality_distribution(self): self._personality_distribution = [p_counts[str(p)] / self.num_agents for p in personalities] - def add_agent(self, agent): + def add_agent(self, agent: VoteAgent) -> None: + """ + Appends an agent to the areas agents list. + + Args: + agent (VoteAgent): The agent to be added to the area. + """ self.agents.append(agent) - def add_cell(self, cell): + def add_cell(self, cell: ColorCell) -> None: + """ + Appends a cell to the areas cells list. + + Args: + cell (ColorCell): The agent to be added to the area. + """ self.cells.append(cell) - def _conduct_election(self): + def _conduct_election(self) -> int: """ - This method holds the primary logic of the simulation by simulating - the election in the area as well as handling the payments and rewards. - :return voter_turnout: The percentage of agents that participated. + Simulates the election within the area and manages rewards. + + The election process asks agents to participate, collects votes, + aggregates preferences using the model's voting rule, + and saves the elected option as the latest winning option. + Agents incur costs for participation + and may receive rewards based on the outcome. + + Returns: + int: The voter turnout in percent. Returns 0 if no agent participates. """ # Ask agents for participation and their votes preference_profile = self._tally_votes() @@ -192,8 +226,14 @@ def _conduct_election(self): def _tally_votes(self): """ - Tally the votes of agents in the area if they want to participate. - :return preference_profile: A NumPy array containing preferences (votes) + Gathers votes from agents who choose to (and can afford to) participate. + + Each participating agent contributes a vector of dissatisfaction values with + respect to the available options. These values are combined into a NumPy array. + + Returns: + np.ndarray: A 2D array representing the preference profiles of all + participating agents. Each row corresponds to an agent's vote. """ preference_profile = [] for agent in self.agents: @@ -212,7 +252,14 @@ def _tally_votes(self): # between 0 and 1 for each option, interpretable as rank values. return np.array(preference_profile) - def _distribute_rewards(self): + def _distribute_rewards(self) -> None: + """ + Calculates and distributes rewards (or penalties) to agents based on outcomes. + + The function measures the difference between the actual color distribution + and the elected outcome using a distance metric. It then increments or reduces + agent assets accordingly, ensuring assets do not fall below zero. + """ model = self.model # Calculate the distance to the real distribution using distance_func real_color_ord = np.argsort(self.color_distribution)[::-1] # Descending @@ -231,10 +278,12 @@ def _distribute_rewards(self): if a.assets < 0: # Correct wealth if it fell below zero a.assets = 0 - def _update_color_distribution(self): + def _update_color_distribution(self) -> None: """ - This method calculates the current color distribution of the area - and saves it in the color_distribution attribute. + Recalculates the area's color distribution and updates the _color_distribution attribute. + + This method counts how many cells of each color belong to the area, normalizes + the counts by the total number of cells, and stores the result internally. """ color_count = {} for cell in self.cells: @@ -248,14 +297,20 @@ def _filter_cells(self, cell_list): """ This method is used to filter a given list of cells to return only those which are within the area. - :param cell_list: A list of ColorCell cells to be filtered. - :return: A list of ColorCell cells that are within the area. + + Args: + cell_list: A list of ColorCell cells to be filtered. + + Returns: + A list of ColorCell cells that are within the area. """ cell_set = set(self.cells) return [c for c in cell_list if c in cell_set] def step(self) -> None: """ + Run one step of the simulation. + Conduct an election in the area, mutate the cells' colors according to the election outcome and update the color distribution of the area. @@ -329,7 +384,9 @@ def create_personality(num_colors): but it is not a full ranking vector since the number of colors influencing the personality is limited. The array is therefore not normalized. White (color 0) is never part of a personality. - :param num_colors: The number of colors in the simulation. + + Args: + num_colors: The number of colors in the simulation. """ # TODO add unit tests for this function personality = np.random.randint(0, 100, num_colors) # TODO low=0 or 1? @@ -350,7 +407,9 @@ def create_personality(num_colors): def get_color_distribution_function(color): """ This method returns a lambda function for the color distribution chart. - :param color: The color number (used as index). + + Args: + color: The color number (used as index). """ return lambda m: m.av_area_color_dst[color] @@ -374,24 +433,20 @@ def get_election_results(area): """ Returns the voted ordering as a list or None if not available. - :return: List of voted ordering or None. + Returns: + List of voted ordering or None. """ if isinstance(area, Area) and area.voted_ordering is not None: return area.voted_ordering.tolist() return None -# def get_area_personality_based_reward(area): -# # Assuming you have a method to calculate this in the Area class -# return area.calculate_personality_reward() -# -# def get_area_gini_index(area): -# # Assuming you have a method to calculate this in the Area class -# return area.calculate_gini_index() class CustomScheduler(mesa.time.BaseScheduler): def step(self): - """Execute the step function for all area- and cell-agents by type, - first for Areas then for ColorCells.""" + """ + Execute the step function for all area- and cell-agents by type, + first for Areas then for ColorCells. + """ model = self.model if TYPE_CHECKING: model = cast(ParticipationModel, model) @@ -418,7 +473,7 @@ class ParticipationModel(mesa.Model): and areas, along with data collection for analysis. Colors in the model mutate depending on a predefined mutation rate and are influenced by elections. Agents interact based on their personalities, knowledge, and - past experience. + experiences. Attributes: grid (mesa.space.SingleGrid): Grid representing the environment @@ -466,7 +521,7 @@ class ParticipationModel(mesa.Model): draw_borders (bool): Only for visualization (no effect on simulation). _preset_color_dst (ndarray): A predefined global color distribution (set randomly) that affects cell initialization globally. - """ + """ def __init__(self, height, width, num_agents, num_colors, num_personalities, mu, election_impact_on_mutation, common_assets, known_cells, @@ -604,7 +659,9 @@ def init_color_probs(self, election_impact): """ This method initializes a probability array for the mutation of colors. The probabilities reflect the election outcome with some impact factor. - :param election_impact: The impact the election has on the mutation. + + Args: + election_impact (float): The impact the election has on the mutation. """ p = (np.arange(self.num_colors, 0, -1)) ** election_impact # Normalize @@ -623,13 +680,36 @@ def initialize_area(self, a_id: int, x_coord, y_coord): # Save in the models' areas-list self.areas[a_id] = area - - def initialize_all_areas(self): + def initialize_all_areas(self) -> None: """ - This method initializes the areas in the models' grid in such a way - that the areas are spread approximately evenly across the grid. - Depending on grid size, the number of areas and their (average) sizes. - TODO create unit tests for this method (Tested manually so far) + Initializes all areas on the grid in the model. + + This method divides the grid into approximately evenly distributed areas, + ensuring that the areas are spaced as uniformly as possible based + on the grid dimensions and the average area size specified by + `av_area_width` and `av_area_height`. + + The grid may contain more or fewer areas than an exact square + grid arrangement due to `num_areas` not always being a perfect square. + If the number of areas is not a perfect square, the remaining areas + are placed randomly on the grid to ensure that `num_areas` + areas are initialized. + + Args: + None. + + Returns: + None. initializes `num_areas` and places them directly on the grid. + + Raises: + None, but if `self.num_areas == 0`, the method exits early. + + Example: + - Given `num_areas = 4` and `grid.width = grid.height = 10`, + this method might initialize areas with approximate distances + to maximize uniform distribution (like a 2x2 grid). + - For `num_areas = 5`, four areas will be initialized evenly, and + the fifth will be placed randomly due to the uneven distribution. """ if self.num_areas == 0: return @@ -667,6 +747,9 @@ def initialize_all_areas(self): def initialize_global_area(self): """ This method initializes the global area spanning the whole grid. + + Returns: + Area: The global area (with unique_id set to -1 and idx to (0, 0)). """ global_area = Area(-1, self, self.height, self.width, 0) # Place the area in the grid using its indexing field @@ -675,26 +758,39 @@ def initialize_global_area(self): return global_area - def create_personalities(self, n): + def create_personalities(self, n: int): """ - TODO ensure that we end up with n personalities (with unique orderings) - maybe have to use orderings and convert them - """ - #p_colors = range(1, self.num_colors) # Personalities exclude white - # TODO is it possible to leave out white (--> dist-func)? - p_colors = list(range(self.num_colors)) - # Make sure the personalities aren't always in the same order - self.random.shuffle(p_colors) # Not actually needed but just in case - personality_options = np.array(list(itertools.permutations(p_colors))) + Creates n unique "personalities," where a "personality" is a specific + permutation of self.num_colors color indices. + + Args: + n (int): Number of unique personalities to generate. - if len(personality_options) < n: - raise ValueError("Not enough unique personality options available.") + Returns: + np.ndarray: Array of shape `(n, num_colors)`. - indices = np.random.choice(len(personality_options), n, replace=False) - selected_personalities = personality_options[indices].copy() + Raises: + ValueError: If `n` exceeds the possible unique permutations. + + Example: + for n=2 and self.num_colors=3, the function could return: + + [[1, 0, 2], + [2, 1, 0]] + """ + # p_colors = range(1, self.num_colors) # Personalities exclude white + max_permutations = np.math.factorial(self.num_colors) + if n > max_permutations or n < 1: + raise ValueError(f"Cannot generate {n} unique personalities: " + f"only {max_permutations} unique ones exist.") + selected_permutations = set() + while len(selected_permutations) < n: + # Sample a permutation lazily and add it to the set + perm = tuple(self.random.sample(range(self.num_colors), + self.num_colors)) + selected_permutations.add(perm) - del personality_options # Free up memory (variable may be very large) - return selected_personalities + return np.array(list(selected_permutations)) def initialize_datacollector(self): @@ -727,7 +823,9 @@ def initialize_datacollector(self): def step(self): - """Advance the model by one step.""" + """ + Advance the model by one step. + """ # Conduct elections in the areas # and then mutate the color cells according to election outcomes @@ -738,8 +836,13 @@ def step(self): self.datacollector.collect(self) - def adjust_color_pattern(self, color_patches_steps, patch_power): - """Adjusting the color pattern to make it less predictable.""" + def adjust_color_pattern(self, color_patches_steps: int, patch_power: float): + """Adjusting the color pattern to make it less predictable. + + Args: + color_patches_steps: How often to run the color-patches step. + patch_power: The power of the patching (like a radius of impact). + """ cells = self.color_cells for _ in range(color_patches_steps): print(f"Color adjustment step {_}") @@ -749,11 +852,13 @@ def adjust_color_pattern(self, color_patches_steps, patch_power): cell.color = most_common_color - def create_color_distribution(self, heterogeneity): + def create_color_distribution(self, heterogeneity: float): """ This method is used to create a color distribution that has a bias according to the given heterogeneity factor. - :param heterogeneity: Factor used as sigma in 'random.gauss'. + + Args: + heterogeneity (float): Factor used as sigma in 'random.gauss'. """ colors = range(self.num_colors) values = [abs(self.random.gauss(1, heterogeneity)) for _ in colors] @@ -763,15 +868,19 @@ def create_color_distribution(self, heterogeneity): return dst_array - def color_patches(self, cell, patch_power): + def color_patches(self, cell: ColorCell, patch_power: float): """ This method is used to create a less random initial color distribution using a similar logic to the color patches model. It uses a (normalized) bias coordinate to center the impact of the color patches structures impact around. - :param cell: The cell that may change its color accordingly - :param patch_power: Like a radius of impact around the bias point. + Args: + cell: The cell that may change its color accordingly + patch_power: Like a radius of impact around the bias point. + + Returns: + int: The consensus color or the cell's own color if no consensus. """ # Calculate the normalized position of the cell normalized_x = cell.row / self.height @@ -817,8 +926,12 @@ def pers_dist(size): """ This method creates a normalized normal distribution array for picking and depicting the distribution of personalities in the model. - :param size: The mean value of the normal distribution. - :return: A normalized (sum is one) array mimicking a gaussian curve. + + Args: + size: The mean value of the normal distribution. + + Returns: + np.array: Normalized (sum is one) array mimicking a gaussian curve. """ # Generate a normal distribution rng = np.random.default_rng() @@ -831,14 +944,18 @@ def pers_dist(size): @staticmethod - def create_all_options(n, include_ties=False): + def create_all_options(n: int, include_ties=False): """ - Creates and returns a matrix (an array of all possible ranking vectors), + Creates a matrix (an array of all possible ranking vectors), if specified including ties. Rank values start from 0. - :param n: The number of items to rank (number of colors in our case) - :param include_ties: If True, rankings include ties. - :return r: A NumPy matrix containing all possible rankings of n items + + Args: + n (int): The number of items to rank (number of colors in our case) + include_ties (bool): If True, rankings include ties. + + Returns: + np.array: A matrix containing all possible ranking vectors. """ if include_ties: # Create all possible combinations and sort out invalid rankings @@ -850,16 +967,32 @@ def create_all_options(n, include_ties=False): return r @staticmethod - def color_by_dst(color_distribution) -> int: + def color_by_dst(color_distribution: np.array) -> int: """ - This method selects a color (int) of range(len(color_distribution)) - such that, each color is selected with a probability according to the - given color_distribution array. - Example: color_distribution = [0.2, 0.3, 0.5] - Color 1 is selected with a probability of 0.3 + Selects a color (int) from range(len(color_distribution)) + based on the given color_distribution array, where each entry represents + the probability of selecting that index. + + Args: + color_distribution: Array determining the selection probabilities. + + Returns: + int: The selected index based on the given probabilities. + + Example: + color_distribution = [0.2, 0.3, 0.5] + Color 1 will be selected with a probability of 0.3. """ - r = np.random.random() + if abs(sum(color_distribution) -1) > 1e-8: + raise ValueError("The color_distribution array must sum to 1.") + r = np.random.random() # Random float between 0 and 1 + cumulative_sum = 0.0 for color_idx, prob in enumerate(color_distribution): - if r < prob: + if prob < 0: + raise ValueError("color_distribution contains negative value.") + cumulative_sum += prob + if r < cumulative_sum: # Compare r against the cumulative probability return color_idx - r -= prob + + # This point should never be reached. + raise ValueError("Unexpected error in color_distribution.") diff --git a/democracy_sim/run.py b/democracy_sim/run.py index 0f14c16..2e6f0fa 100644 --- a/democracy_sim/run.py +++ b/democracy_sim/run.py @@ -9,7 +9,8 @@ class CustomModularServer(ModularServer): """ This is to prevent double initialization of the model. For some reason, the Server resets the model once on initialization - and again on server launch. """ + and again on server launch. + """ def __init__(self, model_cls, visualization_elements, name="Mesa Model", model_params=None, port=None): self.initialized = False diff --git a/democracy_sim/social_welfare_functions.py b/democracy_sim/social_welfare_functions.py index 5712cf5..ff933f1 100644 --- a/democracy_sim/social_welfare_functions.py +++ b/democracy_sim/social_welfare_functions.py @@ -15,10 +15,13 @@ def complete_ranking(ranking: np.array, num_options: int): """ This function adds options that are not in the ranking in a random order. - :param ranking: The ranking to be completed with the missing options. - :param num_options: The total number of options. - ------- - :return: The completed ranking. + + Args: + ranking: The ranking to be completed with the missing options. + num_options: The total number of options. + + Returns: + The completed ranking. """ all_options = np.arange(num_options) mask = np.isin(all_options, ranking, invert=True) @@ -28,14 +31,17 @@ def complete_ranking(ranking: np.array, num_options: int): def run_tie_breaking_preparation_for_majority(pref_table, noise_factor=100): """ - This function prepares the preference table for majority rule such that - it handles ties in the voters' preferences. - Because majority rule cannot usually deal with ties. - The tie breaking is randomized to ensure anonymity and neutrality. - :param pref_table: The agent's preferences. - :param noise_factor: Influences the amount of noise to be added - to the preference table to break ties (higher is less noise). - :return: The preference table without ties for first choices. + This function prepares the preference table for majority rule such that + it handles ties in the voters' preferences. + Because majority rule cannot usually deal with ties. + The tie breaking is randomized to ensure anonymity and neutrality. + + Args: + pref_table: The agent's preferences. + noise_factor: Influences the amount of noise to be added + + Returns: + The preference table without ties for first choices. """ # Add some random noise to break ties (based on the variances) variances = np.var(pref_table, axis=1) @@ -68,10 +74,12 @@ def majority_rule(pref_table): This function implements the majority rule social welfare function. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). - :param pref_table: The agent's preferences (disagreement) as a NumPy matrix - containing the normalized ranking vectors of all agents. - The logic: lower rank = lower disagreement - :return: The resulting preference ranking (beware: its not a pref. relation) + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: its not a pref. relation) """ n, m = pref_table.shape # n agents, m options # Break ties if they exist @@ -109,9 +117,13 @@ def preprocessing_for_approval(pref_table, threshold=None): It also ensures that, on average, half of the options will be approved. The actual number of approved options, however, can still vary depending on the specific values in the preference table. - :param pref_table: The agent's preferences. - :param threshold: The threshold for approval. - :return: The preference table with the options approved or not. + + Args: + pref_table: The agent's preferences. + threshold: The threshold for approval. + + Returns: + The preference table with the options approved or not. """ if threshold is None: threshold = 1 / pref_table.shape[1] @@ -122,9 +134,12 @@ def imp_prepr_for_approval(pref_table): """ This is just like preprocessing_for_approval, but more intelligent. It sets the threshold depending on the variances. - :param pref_table: The agent's preferences. - Beware: the values are disagreements => smaller = less disagreement - :return: The preference table with the options approved or not. + + Args: + pref_table: The agent's preferences. + + Returns: + The preference table with the options approved or not. """ # The threshold is set according to the variances threshold = np.mean(pref_table, axis=1) - np.var(pref_table, axis=1) @@ -138,10 +153,12 @@ def approval_voting(pref_table): This function implements the approval voting social welfare function. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). - :param pref_table: The agent's preferences (disagreement) as a NumPy matrix - containing the normalized ranking vectors of all agents. - The logic: lower rank = lower disagreement - :return: The resulting preference ranking (beware: not a pref. relation). + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: not a pref. relation). """ pref_table = imp_prepr_for_approval(pref_table) # Count how often each option is approved @@ -161,10 +178,12 @@ def continuous_score_voting(pref_table): This function implements a continuous score voting based on disagreement. Beware: Input is a preference table (values define a ranking, index=option), but the output is a ranking/an ordering (values represent options). - :param pref_table: The agent's preferences (disagreement) as a NumPy matrix - containing the normalized ranking vectors of all agents. - The logic: lower rank = lower disagreement - :return: The resulting preference ranking (beware: not a pref. relation). + + Args: + pref_table: The agent's preferences (disagreement) as a NumPy matrix + + Returns: + The resulting preference ranking (beware: not a pref. relation). """ # Sum up the disagreement for each option scores = np.sum(pref_table, axis=0) diff --git a/docs/de/QA.md b/docs/de/QA.md new file mode 100644 index 0000000..f3cbc1e --- /dev/null +++ b/docs/de/QA.md @@ -0,0 +1,91 @@ +### Warum ist die Granularität so grob, bzw. die Komplexität so niedrig? + +Die Granularität ist weit weg von der Realität. +Das ist Absicht und hat vor allem zwei Gründe. +Zum einen ist die Arbeit noch sehr grundlegend, +weil es in der Literatur zu simulativen Vergleichen von Wahlverfahren noch nicht viel gibt, +auf dem eine höere Komplexität oder Granularität aufgebaut werden kann. +Zum zweiten schwindet mit einer höheren Komplexität/Granularität sehr schnell die Interpretierbarkeit +(und möglicherweise auch Reproduzierbarkeit). +In Simulationen können aber nicht selten schon anhand sehr einfacher Modelle unerwartete Effekte +und Mechanismen auftauchen. +Das ist auch hier die Hoffnung. + + +### Was ist der Hauptkonflikt, den die Simulation untersucht? +Der Hauptkonflikt ist die Teilnahme an der Wahl. +Da die einzelne Agentin zunächst (auf kurze Sicht) i.d.R. nicht erwarten kann +ihre assets durch die Teilnahme zu steigern (außer alle Agenten denken so +und die Wahlbeteiligung ist entsprechend niedrig), +hat sie einen Anreiz sich die Kosten zu sparen und auf die Teilnahme zu verzichten +(nach dem Motto "meine Stimme macht eh keinen Unterschied, dann muss ich auch nicht abstimmen"). +Auf lange Sicht würde sich eine dauerhafte Nichtteilnahme aber vermutlich negativ für die Agentin auswirken, +da zu erwarten ist, dass sich die Umgebung entgegen ihrer Interessen entwickelt. + + +Ein weiterer Konflikt ist die Abstimmung der Agentin selbst, +also ob sie vorrangig ihr Wissen für die tatsächliche Verteilung (der Farben) in die Abstimmung einbringt +(und damit allen hilft), oder eher ihren Interessen (eigenen Präferenzen) nach abstimmt +(um einerseits selbst einen höheren Anteil an der Belohnung zu bekommen +und andererseits die Umgebung zu ihren Gunsten zu beeinflussen). +Ob sie also eher "egoistisch" oder eher "altruistisch" bzw. "Gemeinwohl-orientiert" abstimmt. + + +### Wonach wird optimiert? +Für die Partizipation gibt es hoffentlich kein leicht zu berechnendes Optimum, +da eine Simulation sonst überflüssig wäre, also das müssen wir meinem Verständnis nach verhindern +(in dem Fall müssten wir das Modell komplexer machen). +Die Optimierungsfunktion für das Training der Agenten ist nicht ganz leicht zu lösende Aufgabe. +Gut wäre, wenn es ausreichte, die eigene Belohnung zu maximieren, +weil das i.d.R. die Standardannahme ist. +Ob das ausreicht oder die Agenten Modelle dann zu simpel werden ist noch nicht klar. +Auf jeden Fall dürfen die Agenten weder zu intelligent, noch zu simpel sein. +Vor allem weder zu kurz, noch zu weitsichtig. +Das dürfte aber nicht nur eine Frage der Optimierungsfunktion sein, +sondern auch der genauen Ausgestaltung des Trainings und der Input-Variablen. +Auf jeden Fall ist das Training sehr wahrscheinlich der heikelste Part. + +### Wie funktioniert die Ausschüttung der Belohnung(en)? +1. Nähe des Konsenses an der "Realität": + Jede Agentin (nicht nur die Teilnehmenden) erhält eine Belohnung $b_1$, + welche von dem Ergebnis der Wahl abhängt. + Je näher das Ergebnis der Wahl (die durch die Wahl geschätzte Häufigkeitsreihenfolge der Feldfarben) + an der tatsächlichen Häufigkeitsreihenfolge der Feldfarben ist, + desto größer $b_1$. +2. Nähe des Ergebnisses zur (fixen) persönlichen Präferenz (Persönlichkeit): + Jede Agentin, bekommt eine Belohnung $b_2$ + (wahrscheinlich mit $0 ≤ b_2 ≤ b_1$ oder sogar $-b_1 ≤ b_2 ≤ b_1$), + je nachdem wie gut das Ergebnis mit ihrer fixen persönlichen Präferenz + (also ihrer "Persönlichkeit", nicht der von ihr abgegebenen Präferenz) übereinstimmt. + +Dabei soll $b_1$ den Umstand abbilden, dass die Beteiligung an einer Wahl einen +(zwar eigentlich in seiner Höhe sehr subjektiven, aber dennoch vorhandenen) Aufwand bedeutet. +Und dass das Ergebnis bzw. die Folgen des Wahlausganges für alle Personen gleichermaßen gültig sind, +egal ob diese an der Wahl teilgenommen haben oder nicht. + +Durch $b_2$ soll die Tatsache abgebildet werden, dass die Agenten auch eigene Vorlieben oder Bedürfnisse haben, +dass also das Ergebnis für sie persönlich lebensqualitätsbeeinflussend sein kann. +Außerdem ermöglicht $b_2$ die konfliktive Situation, +dass die Wählenden eine Abwägung zwischen einer eher nach persönlicher Präferenz geprägten Stimmabgabe +und einer eher nach eigenem Wissen geprägten (tendenziell eher dem Gemeinwohl dienenden) Stimmabgabe treffen müssen. + +### Welche Wahlverfahren werden untersucht? +Die Wahl (und Anzahl) der Wahlverfahren steht noch nicht ganz fest. +Im Moment ist geplant die folgenden Wahlverfahren zu untersuchen: +- "Plurality" als Standardverfahren +- "Approval-Voting" da weitläufig als bestes Verfahren unter ComSoc-WissenschaftlerInnen angesehen +- "Kemeny" (Ebenfalls oft als bestes Verfahren angesehen, allerdings NP-Schwer). + +Und möglicherweise noch einige Standardverfahren. +Interessant wären auch "exotischere" (weniger gut mathematisch untersuchte oder verbreitete) Verfahren +wie "Systemisches-Konsensieren", "liquid-democracy" +oder repräsentative Wahlverfahren (Wahl eines Gremiums) zu untersuchen. + +### Weitere bzw. weiter führende Forschungsfragen +Ebenfalls interessant wäre am Ende der Vergleiche zu untersuchen, +wie sich die Simulation verändert, wenn stets ein fixer Anteil an Agenten zufällig bestimmt wird, +um (kostenlos oder sogar mit Aufwandsentschädigung) an der Wahr teilzunehmen +(anstelle einer Freiwilligkeit welche mit Kosten verbunden ist). + +Des Weiteren könnte untersucht werden was passiert, wenn Agenten zusätzliches "Wissen" (über Feldfarben) kaufen +oder durch "laufen" bzw. springen "erkunden" können. diff --git a/docs/research/metrics.md b/docs/research/metrics.md new file mode 100644 index 0000000..7def853 --- /dev/null +++ b/docs/research/metrics.md @@ -0,0 +1,39 @@ +## Simulation Metrics / Indicators + +### **Participation Rate** *(Aggregate Behavioral Variable)* +- Measures the percentage of agents actively participating in elections at a given time. +- Helps evaluate the *participation dilemma* by analyzing participation across the group and comparing rates for majority vs. minority groups. + +### **Altruism Factor** *(Individual Behavioral Variable)* +- Quantifies the extent to which agents prioritize the **collective good** (e.g., the group's accuracy in guessing) over **individual preferences**, including cases of non-cooperation with a majority they belong to when it conflicts with the (expected) collective good. +- Additionally, tracking the average altruism factor of personality groups can provide insights, though this may be misleading if agents/groups do not participate. + +### **Gini Index** *(Inequality Metric)* +- Measures the inequality in asset distribution among agents within the system. +- Ranges from **0** (perfect equality) to **1** (maximum inequality, where one agent holds all assets). +- Offers insights into how electoral decisions impact wealth/resource distribution over time. + +### **Collective Accuracy** +- Measures how accurately the group, as a collective, estimates the actual color distribution. +- This directly influences rewards and serves as a metric for evaluating group performance against a ground truth. + +### **Diversity of Shared Opinions** +- Evaluates the variation in agents' expressed preferences. +- To track whether participating agents provide diverse input or converge on overly similar opinions (e.g., due to majority influence). + +### **Distance to Optimum** +In principle, the optimal decision can be determined based on a predefined goal, allowing the distance between this optimum and the group's actual decision to be measured. + +**Possible predefined goals include:** + +1. **Utilitarian**: + - *Maximize the total sum of distributed rewards.* + - Focus on the *total reward*, regardless of how it is distributed. + +2. **Egalitarian**: + - *Minimize the overall inequality in individual rewards.* + - Focus on **fairness**, aiming for a more just distribution of rewards among members. + +3. **Rawlsian**: + - *Maximize the rewards for the poorest (personality-based) group.* + - Inspired by **John Rawls' Difference Principle**, the focus is on improving the well-being of the least advantaged group while tolerating inequalities elsewhere. diff --git a/docs/research/research_concept.md b/docs/research/research_concept.md new file mode 100644 index 0000000..0f78663 --- /dev/null +++ b/docs/research/research_concept.md @@ -0,0 +1,27 @@ +DemocracySim is set in a grid-based environment where agents interact with their surroundings and participate in group decision-making through elections. The system explores various scenarios and voting rules to understand key dynamics and challenges in democratic participation. + +## Key Features + +### Simulated Environment: +- The grid is designed without boundaries, and each unit (field) within it adopts one of **x** colors. Fields change color based on election results, with a mutation rate affected by prior outcomes. +- Groups of fields form **territories**, which serve as the basis for elections and influence grid evolution. + +### Agents: +- Agents are equipped with a basic artificial intelligence system and operate under a **"top-down" model**, learning decision-making strategies via training. +- Each agent has a **limited budget** and must decide whether to participate in elections. +- Agents have individual **preferences** over colors (called *personalities*) and are divided into **y** randomly distributed personality types. + *(The distribution of types forms majority-minority situations.)* + +### Elections and Rewards (Two Dilemmas): +1. **Elections:** + - Elections concern the frequency distribution of field colors in a given territory, representing an "objective truth" aimed at emulating wise group decisions. + - For an intuitive understanding, the election addresses the question: + *"What is — or should be — the current color distribution within your territory?"* + +2. **Rewards:** + - Rewards are distributed to all agents in the territory, regardless of participation (*participation dilemma*). + These rewards consist of: + - **Base reward:** Distributed equally based on how well agents guess the true color distribution. + - **Personal reward:** Allocated based on the alignment between election results and agent preferences, introducing a second dilemma: + - *Should agents vote selfishly (favoring their preferences) or vote with a focus on the group's accuracy (collective good)?* + diff --git a/docs/technical/api/Area.md b/docs/technical/api/Area.md new file mode 100644 index 0000000..e43d86c --- /dev/null +++ b/docs/technical/api/Area.md @@ -0,0 +1,7 @@ +# Class `Area` + +::: democracy_sim.participation_model.Area + +## Private Method + +::: democracy_sim.participation_model.Area._conduct_election diff --git a/docs/technical/api/ColorCell.md b/docs/technical/api/ColorCell.md new file mode 100644 index 0000000..7152413 --- /dev/null +++ b/docs/technical/api/ColorCell.md @@ -0,0 +1,3 @@ +# Class `ColorCell` + +::: democracy_sim.participation_agent.ColorCell \ No newline at end of file diff --git a/docs/technical/api/Model.md b/docs/technical/api/Model.md new file mode 100644 index 0000000..c8cb990 --- /dev/null +++ b/docs/technical/api/Model.md @@ -0,0 +1,3 @@ +# Class `ParticipationModel` + +::: democracy_sim.participation_model.ParticipationModel diff --git a/docs/technical/api/Utility_functions.md b/docs/technical/api/Utility_functions.md new file mode 100644 index 0000000..0f333ad --- /dev/null +++ b/docs/technical/api/Utility_functions.md @@ -0,0 +1,3 @@ +# Utility functions + +::: democracy_sim.participation_agent.combine_and_normalize \ No newline at end of file diff --git a/docs/technical/api/inherited.md b/docs/technical/api/inherited.md new file mode 100644 index 0000000..007aeff --- /dev/null +++ b/docs/technical/api/inherited.md @@ -0,0 +1,11 @@ +## Mesa Base Model Class + +:::mesa.Model + +--- +--- + +## Mesa Base Agent Class + +:::mesa.Agent + diff --git a/docs/technical/technical_overview.md b/docs/technical/technical_overview.md new file mode 100644 index 0000000..5d0fb3f --- /dev/null +++ b/docs/technical/technical_overview.md @@ -0,0 +1,31 @@ +# Technical overview + +**DemocracySim** is a multi-agent simulation framework designed to examine democratic participation. +This project models agents (with personal interests forming majority-minority groups), environments +(evolving under the influence of the collective behavior of the agents), +and elections to analyze how voting rules influence participation, +welfare, system dynamics and overall collective outcomes. + +Key features: + +- Multi-agent system simulation using **Mesa framework**. +- **Grid-based environment** with wrap-around support (toroidal topology). +- Explore societal outcomes under different voting rules. + +--- + +### Features +- **Agents**: + - Independently acting entities modeled with preferences, budgets, and decision-making strategies. + - Can participate in elections, have personal preferences and limited information about surroundings. + - Trained with decision-tree methods to simulate behavior. + +- **Environment**: + - Structured as a grid divided into "territories" or "areas." + - A single unit of the grid is a "cell" or "field." + - Each cell has a specific "color" representing a state. Elections influence these states, and areas mutate over time. + +- **Metrics**: + - Participation rates, altruism factors, and metrics such as the Gini Index to analyze inequalities and long-term trends. + +Learn more in the following sections. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index a86537d..6523426 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -19,17 +19,18 @@ nav: - Goals: research/goals.md - Technical: - Overview: technical/technical_overview.md - - Installation Instructions: technical/installation_instructions.md + #- Installation Instructions: technical/installation_instructions.md - API Reference: - Model (Environment): technical/api/Model.md - Area (Territory): technical/api/Area.md - Grid Cell: technical/api/ColorCell.md - Voting Agent: technical/api/VoteAgent.md - Inherited Classes: technical/api/inherited.md + - Utility Functions: technical/api/Utility_functions.md #- User Guide: technical/user_guide.md #1. Provide step-by-step guides for common project usage. #- Examples: technical/examples.md #1. Show key use cases via practical code examples or interactive demos. #- Developer Docs: technical/dev_docs.md #Offer guidelines for contributing or extending the project (e.g., folder structure, conventions, CI/CD pipelines). - - Architecture Overview: technical/architecture_overview.md + #- Architecture Overview: technical/architecture_overview.md #- Overview: overview.md #- Code: the_voting_process_step_by_step.md - Mesa: mesa_docs.md diff --git a/tests/factory.py b/tests/factory.py new file mode 100644 index 0000000..3379e66 --- /dev/null +++ b/tests/factory.py @@ -0,0 +1,31 @@ +from democracy_sim.participation_model import ParticipationModel + + +def create_default_model(**overrides): + """Create a ParticipationModel instance, with optional parameter overrides.""" + params = { + "height": 100, + "width": 80, + "num_agents": 800, + "num_colors": 3, + "num_personalities": 4, + "mu": 0.05, + "election_impact_on_mutation": 1.8, + "common_assets": 40000, + "known_cells": 10, + "num_areas": 16, + "av_area_height": 25, + "av_area_width": 20, + "area_size_variance": 0.0, + "patch_power": 1.0, + "color_patches_steps": 3, + "draw_borders": True, + "heterogeneity": 0.3, + "rule_idx": 1, + "distance_idx": 1, + "election_costs": 1, + "max_reward": 50, + "show_area_stats": False + } + params.update(overrides) + return ParticipationModel(**params) diff --git a/tests/test_color_by_dst.py b/tests/test_color_by_dst.py new file mode 100644 index 0000000..ce80cc0 --- /dev/null +++ b/tests/test_color_by_dst.py @@ -0,0 +1,65 @@ +import unittest +import numpy as np +from democracy_sim.participation_model import ParticipationModel + +class TestColorByDst(unittest.TestCase): + + def test_valid_output(self): + """Test that the function always returns a valid index.""" + color_distribution = np.array([0.2, 0.3, 0.5]) + for _ in range(1000): + result = ParticipationModel.color_by_dst(color_distribution) + self.assertIn(result, range(len(color_distribution)), + "Output index is out of range") + + def test_sum_to_one(self): + """Test that it correctly handles distributions summing to one.""" + color_distribution = np.array([0.1, 0.2, 0.1, 0.4, 0.2]) + for _ in range(50): + result = ParticipationModel.color_by_dst(color_distribution) + self.assertIn(result, range(len(color_distribution))) + + def test_single_color(self): + """Test that a single-color distribution always returns index 0.""" + color_distribution = np.array([1.0]) + for _ in range(10): + self.assertEqual( + ParticipationModel.color_by_dst(color_distribution), 0) + + def test_edge_cases(self): + """Test edge cases like a uniform distribution.""" + color_distribution = np.array([0.5, 0.5]) + results = [ParticipationModel.color_by_dst( + color_distribution) for _ in range(1000)] + unique, counts = np.unique(results, return_counts=True) + self.assertEqual(set(unique), {0, 1}, + "Function should only return 0 or 1") + self.assertGreater(int(counts[0]), 400, "Dst not uniform") + self.assertGreater(int(counts[1]), 400, "Dst not uniform") + + def test_invalid_distribution(self): + """Test that an invalid distribution raises an error.""" + with self.assertRaises(ValueError): # Negative probability + ParticipationModel.color_by_dst(np.array([-0.1, 0.3, 0.8])) + + with self.assertRaises(ValueError): # Doesn't sum to 1 + ParticipationModel.color_by_dst(np.array([0.2, 0.3])) + + with self.assertRaises(ValueError): # All zeros + ParticipationModel.color_by_dst(np.array([0.0, 0.0, 0.0])) + + def test_probability_distribution(self): + """Test if the function follows the given probability distribution.""" + color_distribution = np.array([0.2, 0.3, 0.5]) + num_samples = 10000 + results = [ParticipationModel.color_by_dst( + color_distribution) for _ in range(num_samples)] + + counts = np.bincount(results, + minlength=len(color_distribution)) / num_samples + err_message = "Generated samples do not match expected distribution" + np.testing.assert_almost_equal(counts, color_distribution, decimal=1, + err_msg=err_message) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_conduct_election.py b/tests/test_conduct_election.py new file mode 100644 index 0000000..38a883a --- /dev/null +++ b/tests/test_conduct_election.py @@ -0,0 +1,22 @@ +import unittest +from unittest.mock import MagicMock +from tests.factory import create_default_model + +# TODO add more complex tests + +class TestConductElection(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_election_returns_integer_turnout(self): + area = self.model.areas[0] + turnout = area._conduct_election() + self.assertIsInstance(turnout, int) + + def test_no_participation_scenario(self): + for agent in self.model.voting_agents: + agent.assets = 0 + area = self.model.areas[0] + turnout = area._conduct_election() + self.assertEqual(turnout, 0) \ No newline at end of file diff --git a/tests/test_create_personalities.py b/tests/test_create_personalities.py new file mode 100644 index 0000000..98a90cd --- /dev/null +++ b/tests/test_create_personalities.py @@ -0,0 +1,58 @@ +import unittest +import numpy as np +from itertools import permutations +from tests.factory import create_default_model +from unittest.mock import MagicMock + + +class TestParticipationModel(unittest.TestCase): + + def setUp(self): + """Create a fresh model instance before each test and mock `initialize_area`.""" + self.model = create_default_model( + height=10, width=10, num_agents=100, num_colors=4, + num_personalities=10, area_size_variance=0.2, + num_areas=4, av_area_height=5, av_area_width=5, + heterogeneity=0.5, + ) + self.model.initialize_area = MagicMock() + + + def test_create_personalities_shape(self): + """Test that the generated personalities array has the correct shape.""" + for n_personalities in range(2, 15): + personalities = self.model.create_personalities(n_personalities) + self.assertEqual(personalities.shape, + (n_personalities, self.model.num_colors)) + + def test_create_personalities_uniqueness(self): + """Test that the generated personalities are unique.""" + n_personalities = 12 + personalities = self.model.create_personalities(n_personalities) + unique_personalities = set(map(tuple, personalities)) + self.assertEqual(len(unique_personalities), n_personalities) + + def test_create_personalities_max_limit(self): + """Test that the method raises an error when + n exceeds the total number of permutations.""" + assert self.model.num_colors == 4 # 4! = 24 unique permutations + n_personalities = 25 + with self.assertRaises(ValueError): + self.model.create_personalities(n_personalities) + + def test_create_personalities_minimum_input(self): + """Test that the method can handle generating a single personality.""" + personalities = self.model.create_personalities(1) + self.assertEqual(personalities.shape, (1, self.model.num_colors)) + + def test_create_personalities_full_permutation(self): + """Test that generating the full set of permutations does return all.""" + num_colors = self.model.num_colors + n_personalities = np.math.factorial(num_colors) + personalities = self.model.create_personalities(n_personalities) + expected_permutations = set(permutations(range(num_colors))) + self.assertEqual(set(map(tuple, personalities)), expected_permutations) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/test_distribute_rewards.py b/tests/test_distribute_rewards.py new file mode 100644 index 0000000..4197a3e --- /dev/null +++ b/tests/test_distribute_rewards.py @@ -0,0 +1,15 @@ +import unittest +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestDistributeRewards(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_distribute(self): + area = self.model.areas[0] + area._conduct_election() # Ensure there's a result + area._distribute_rewards() + for agent in area.agents: + self.assertGreaterEqual(agent.assets, 0) diff --git a/tests/test_initialize_all_areas.py b/tests/test_initialize_all_areas.py new file mode 100644 index 0000000..bea2faa --- /dev/null +++ b/tests/test_initialize_all_areas.py @@ -0,0 +1,97 @@ +import unittest +from unittest.mock import MagicMock +from numpy import sqrt +from tests.factory import create_default_model # Import from factory.py + + +class TestParticipationModelInitializeAllAreas(unittest.TestCase): + + def setUp(self): + """Create a fresh model instance before each test and mock `initialize_area`.""" + self.model = create_default_model( + num_areas=4, # Override num_areas to 4 + height=10, # Set grid height + width=10, # Set grid width + av_area_height=5, # Set average area height + av_area_width=5, # Set average area width + ) + self.model.initialize_area = MagicMock() # Mock `initialize_area` for side effect tracking + + def test_initialize_all_areas_uniform_distribution(self): + """Test that areas are initialized uniformly if num_areas is a perfect square.""" + + # Check if the areas are initialized in a roughly uniform grid-like pattern + expected_calls = [(0, 0), (5, 0), (0, 5), (5, 5)] + # Check if 4 areas were initialized + self.assertEqual(self.model.num_areas, 4) # Check num_areas==4 + idx_fields = [area.idx_field for area in self.model.areas] + # Collect idx_fields from all areas + for idx_field in idx_fields: + assert idx_field in expected_calls + + def test_initialize_all_areas_with_non_square_number(self): + """Test that the method handles non-square numbers by adding extra areas randomly.""" + model = create_default_model( + num_areas=5, # Override num_areas to 5 + ) + # model.initialize_all_areas() # Runs on initialization + # Check that 5 areas were initialized after calling the function + self.assertEqual(model.num_areas, 5) + + def test_initialize_all_areas_no_areas(self): + """Test that the method does nothing if num_areas is 0.""" + model = create_default_model( + num_areas=0, # Set num_areas to 0 + ) + assert model.num_areas == 0 # Verify no areas were initialized + + def test_initialize_all_areas_random_additional_areas(self): + """Test that additional areas are placed randomly if num_areas exceeds uniform grid capacity.""" + model = create_default_model( + num_areas=5, # Override num_areas to 5 + height=10, + width=10, + av_area_height=5, + av_area_width=5 + ) + + # Check that the number of initialized areas matches num_areas + self.assertEqual(model.num_areas, 5) # Check that exactly 5 areas are initialized + + # Check that at least one area was placed outside the uniform pattern + idx_fields = [area.idx_field for area in model.areas] + expected_calls = [(0, 0), (5, 0), (0, 5), (5, 5)] + random_area_detected = any( + idx_field not in expected_calls for idx_field in idx_fields + ) + self.assertTrue(random_area_detected) + + def test_initialize_all_areas_handles_non_square_distribution(self): + """Test that the number of areas matches `num_areas` even for non-square cases.""" + model = create_default_model( + num_areas=6, # Override num_areas to 6 + ) + # Check that exactly 6 areas are initialized + self.assertEqual(model.num_areas, 6) + + def test_initialize_all_areas_calculates_distances_correctly(self): + """Test that area distances are calculated correctly.""" + model = create_default_model( + num_areas=4, # Override num_areas to 4 + height=10, + width=10, + av_area_height=5, + av_area_width=5 + ) + # Calculate the expected distances + roo_apx = round(sqrt(model.num_areas)) + expected_distance_x = model.grid.width // roo_apx + expected_distance_y = model.grid.height // roo_apx + + # Check the calculated distances + self.assertEqual(expected_distance_x, 5) + self.assertEqual(expected_distance_y, 5) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tests/test_set_dimensions.py b/tests/test_set_dimensions.py new file mode 100644 index 0000000..936218c --- /dev/null +++ b/tests/test_set_dimensions.py @@ -0,0 +1,28 @@ +import unittest +from tests.factory import create_default_model + +class TestSetDimensions(unittest.TestCase): + def setUp(self): + self.model = create_default_model( + num_areas=1, + height=10, + width=10, + av_area_height=5, + av_area_width=5, + area_size_variance=0 + ) + + def test_dimensions_no_variance(self): + area = self.model.areas[0] + self.assertEqual(area._width, 5) + self.assertEqual(area._height, 5) + + def test_dimensions_out_of_range(self): + with self.assertRaises(ValueError): + bad_model = create_default_model( + num_areas=1, + av_area_width=5, + av_area_height=5, + area_size_variance=2 + ) + _ = bad_model.areas[0] \ No newline at end of file diff --git a/tests/test_tally_votes.py b/tests/test_tally_votes.py new file mode 100644 index 0000000..e7f4eda --- /dev/null +++ b/tests/test_tally_votes.py @@ -0,0 +1,21 @@ +import unittest +import numpy as np +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestTallyVotes(unittest.TestCase): + def setUp(self): + self.model = create_default_model(num_areas=1) + self.model.initialize_area = MagicMock() + + def test_tally_votes_array(self): + area = self.model.areas[0] + votes = area._tally_votes() + self.assertIsInstance(votes, np.ndarray) + + def test_tally_votes_empty(self): + for agent in self.model.voting_agents: + agent.assets = 0 + area = self.model.areas[0] + votes = area._tally_votes() + self.assertEqual(votes.size, 0) diff --git a/tests/test_update_color_distribution.py b/tests/test_update_color_distribution.py new file mode 100644 index 0000000..4e74f9e --- /dev/null +++ b/tests/test_update_color_distribution.py @@ -0,0 +1,23 @@ +import unittest +import numpy as np +from unittest.mock import MagicMock +from tests.factory import create_default_model + +class TestUpdateColorDistribution(unittest.TestCase): + def setUp(self): + self.model = create_default_model( + num_areas=1, + num_colors=3 + ) + self.model.initialize_area = MagicMock() + + def test_color_distribution(self): + area = self.model.areas[0] + old_dist = np.copy(area._color_distribution) + # Manually change some cell colors + for cell in area.cells[:3]: + cell.color = 1 + area._update_color_distribution() + new_dist = area._color_distribution + self.assertFalse(np.array_equal(old_dist, new_dist)) + self.assertAlmostEqual(np.sum(new_dist), 1.0, places=5) From 9acdade6aff84d757c18b703446a35bcab59cc5f Mon Sep 17 00:00:00 2001 From: jurikane Date: Mon, 16 Jun 2025 00:40:48 +0800 Subject: [PATCH 38/38] update README --- .gitignore | 4 ++ README.md | 81 +++++++++++++++------------------------- np_performance_test_1.py | 15 -------- np_performance_test_2.py | 64 ------------------------------- 4 files changed, 34 insertions(+), 130 deletions(-) delete mode 100644 np_performance_test_1.py delete mode 100644 np_performance_test_2.py diff --git a/.gitignore b/.gitignore index 3b9f4f3..d446339 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.* +!.gitignore /.idea .DS_Store __pycache__/ @@ -22,3 +24,5 @@ templates *.cache ai_info.txt convert_docstrings.py +TODO.txt +democracy_sim/simulation_output diff --git a/README.md b/README.md index 09752bf..111744b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![Pages](https://github.com/jurikane/DemocracySim/actions/workflows/ci.yml/badge.svg)](https://jurikane.github.io/DemocracySim/) [![pytest main](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml/badge.svg?branch=main)](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml) -[![codecov](https://codecov.io/gh/jurikane/DemocracySim/graph/badge.svg?token=QVNSXWIGNE)](https://codecov.io/gh/jurikane/DemocracySim) +[![codecov](https://codecov.io/gh/jurikane/DemocracySim/branch/main/graph/badge.svg)](https://codecov.io/gh/jurikane/DemocracySim) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [//]: # ([![pytest dev](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml/badge.svg?branch=dev)](https://github.com/jurikane/DemocracySim/actions/workflows/python-app.yml)) @@ -18,53 +18,32 @@ This project is kindly supported by [OpenPetition](https://osd.foundation). For details see the [documentation](https://jurikane.github.io/DemocracySim/) on GitHub-pages. -## Short overview in German - -• Multi-Agenten Simulation - - untersucht werden soll die Partizipation der Agenten an den Wahlen - - Auswirkung von verschiedenen Wahlverfahren auf die Partizipation - - Verlauf der Partizipation über die Zeit (Umgebungsänderung, Änderung der Vermögensverteilung, ...) - -• Umgebung: - - Gitterstruktur ohne Ränder - - jedes Viereck im Gitter ist ein Feld, eine Menge von (zusammenhängenden) Feldern ist ein Gebiet - - Jedes Feld im Gitter hat eine von vier Farben (r, g, b, w) diese verändert sich mit einer bestimmten - "Mutationsrate", die Änderung ist abhängig von den Ergebnissen der letzten Gruppenentscheidung - über ein Gebiet, welches das Feld einschließt (d.h. die Umgebung reagiert auf die Gruppenentscheidungen) - -• Agenten - - Intelligenz: top-down approach d.h. die Agenten bekommen durch ein training eine einfache KI - (auf Basis von Entscheidungsbäumen um das Verhalten nachvollziehen zu können) - - Haben bestimmtes Budget (Motivation) - - Entscheidungen (Agenten können): - - umliegende Felder erkunden ("sich bilden") - kostet - - an Wahlen teilnehmen - kostet - - abwarten - geringe kosten - - "Agentenpersönlichkeit": jede Agentin besitzt zwei Präferenzrelationen über die Farben r, g und b - - ⇨ 15 Agententypen (zufällig und normalverteilt) - - diese wirken sich auf die Belohnungen der Abstimmungsergebnisse aus - -• Wahlen - - Abgestimmt wird über die Häufigkeitsverteilung der 4 Farben im Wahlgebiet (objektive Wahrheit soll - "kluge Gruppenentscheidung" simulieren) - - Belohnung wird an alle Agenten im Wahlgebiet ausgeschüttet: - - je näher (kendal-tau Distanz) die abgestimmte Verteilung (Wahlergebnis) an der wahren Verteilung liegt, - umso größer die Belohnung - - eine Hälfte der Belohnung geht an alle zu gleichen Teilen - - zweite Hälfte wird entsprechend der "Agentenpersönlichkeit" (siehe oben) ausgeschüttet - - - ⇨ abstimmende Agenten im Zwiespalt: - - so abstimmen wie die Verteilung vermutlich wirklich ist (gute Entscheidung für alle - nach bestem wissen) - - oder eher egoistisch, sodass jetzt und in Zukunft möglichst viel an den Agenten selbst geht - (beachte: Das Ergebnis beeinflusst auch die zukünftige Verteilung im Gebiet) - -Interessante Fragen: -- machen die verschiedenen Wahlverfahren einen Unterschied und wenn ja welchen? -- wie verhalten sich Agententypen die in der Minderheit/Mehrheit sind? -- wie wirkt sich (nicht) Partizipation langfristig aus? -- wie wirkt sich die Verteilung von Vermögen auf die Partizipation aus und umgekehrt? -- welche Muster entstehen in den Gebieten (lokal, regional, global)? -Auch interessant: -- was passiert, wenn die Gruppe der abstimmenden Agenten zufällig ausgewählt wird -(„Bürgerräte“ also kostenlose oder vergütete Teilnahme von x% aller Agenten)? -(werden die Entscheidungen „besser“, wie verteilt sich der Wohlstand, …) +## Overview + +**DemocracySim** is a multi-agent simulation framework designed to study democratic participation +and group decision-making in a dynamic, evolving environment. +Agents interact within a grid-based world, form beliefs about their surroundings, +and vote in elections that influence both their individual outcomes and the state of the system. + +The environment consists of a toroidal grid of colored fields, where neighboring cells form territories. +Each territory holds regular elections in which agents vote on the observed color distribution. +The results of these elections not only influence how agents are rewarded +but also shape the environment itself through controlled mutation processes. + +Agents have limited resources and face decisions about whether to participate in elections, or remain inactive. +Each agent belongs to a personality type defined by preferences over the possible field colors, +with types distributed to create majority and minority dynamics. +During elections, agents face a strategic trade-off between voting for what benefits them personally +and voting for what they believe to be the most accurate representation of their territory—decisions +that impact both immediate rewards and the system’s future state. + +The simulation tracks a range of metrics including participation rates, collective accuracy, +reward inequality (Gini index), and behavioral indicators such as altruism and diversity of expressed opinions. +**DemocracySim** also allows for the evaluation of group performance under different normative goals—utilitarian, +egalitarian, or Rawlsian—by comparing actual outcomes to theoretically optimal decisions. + +By modeling participation dilemmas, reward mechanisms, and personality-driven behavior, +**DemocracySim** provides a controlled environment for investigating how democratic systems +respond to different institutional rules and individual incentives. +It is intended both as a research tool and as a foundation for future explorations into deliberation, representation, +and fairness in collective choice. diff --git a/np_performance_test_1.py b/np_performance_test_1.py deleted file mode 100644 index d2d677a..0000000 --- a/np_performance_test_1.py +++ /dev/null @@ -1,15 +0,0 @@ -import time -import numpy as np -np.random.seed(42) -a = np.random.uniform(size=(300, 300)) -runtimes = 10 - -timecosts = [] -for _ in range(runtimes): - s_time = time.time() - for i in range(100): - a += 1 - np.linalg.svd(a) - timecosts.append(time.time() - s_time) - -print(f'mean of {runtimes} runs: {np.mean(timecosts):.5f}s') diff --git a/np_performance_test_2.py b/np_performance_test_2.py deleted file mode 100644 index 743283a..0000000 --- a/np_performance_test_2.py +++ /dev/null @@ -1,64 +0,0 @@ -# SOURCE: https://gist.github.com/markus-beuckelmann/8bc25531b11158431a5b09a45abd6276 - -import numpy as np -from time import time -from datetime import datetime - -start_time = datetime.now() - -# Let's take the randomness out of random numbers (for reproducibility) -np.random.seed(0) - -size = 4096 -A, B = np.random.random((size, size)), np.random.random((size, size)) -C, D = np.random.random((size * 128,)), np.random.random((size * 128,)) -E = np.random.random((int(size / 2), int(size / 4))) -F = np.random.random((int(size / 2), int(size / 2))) -F = np.dot(F, F.T) -G = np.random.random((int(size / 2), int(size / 2))) - -# Matrix multiplication -N = 20 -t = time() -for i in range(N): - np.dot(A, B) -delta = time() - t -print('Dotted two %dx%d matrices in %0.2f s.' % (size, size, delta / N)) -del A, B - -# Vector multiplication -N = 5000 -t = time() -for i in range(N): - np.dot(C, D) -delta = time() - t -print('Dotted two vectors of length %d in %0.2f ms.' % (size * 128, 1e3 * delta / N)) -del C, D - -# Singular Value Decomposition (SVD) -N = 3 -t = time() -for i in range(N): - np.linalg.svd(E, full_matrices = False) -delta = time() - t -print("SVD of a %dx%d matrix in %0.2f s." % (size / 2, size / 4, delta / N)) -del E - -# Cholesky Decomposition -N = 3 -t = time() -for i in range(N): - np.linalg.cholesky(F) -delta = time() - t -print("Cholesky decomposition of a %dx%d matrix in %0.2f s." % (size / 2, size / 2, delta / N)) - -# Eigendecomposition -t = time() -for i in range(N): - np.linalg.eig(G) -delta = time() - t -print("Eigendecomposition of a %dx%d matrix in %0.2f s." % (size / 2, size / 2, delta / N)) - -print('') -end_time = datetime.now() -print(f'TOTAL TIME = {(end_time - start_time).seconds} seconds')