From 062e2b9e687d6ae02315d3aaa418ef673f65f2a6 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Thu, 21 Jul 2022 06:50:02 +0100 Subject: [PATCH 01/13] a try --- examples/burgers/config.py | 38 ++++ examples/burgers/testing_cases.txt | 1 + examples/burgers_try/config.py | 38 ++++ examples/burgers_try/meshgen.py | 2 + examples/burgers_try/network.py | 43 +++++ examples/burgers_try/testing_cases.txt | 1 + examples/makefile | 5 + examples/models/burgers.py | 173 ++++++++++++++++++ examples/models/burgers_try.py | 200 +++++++++++++++++++++ nn_adapt/solving_time.py | 232 +++++++++++++++++++++++++ 10 files changed, 733 insertions(+) create mode 100644 examples/burgers/config.py create mode 100644 examples/burgers/testing_cases.txt create mode 100644 examples/burgers_try/config.py create mode 100644 examples/burgers_try/meshgen.py create mode 100644 examples/burgers_try/network.py create mode 100644 examples/burgers_try/testing_cases.txt create mode 100644 examples/models/burgers.py create mode 100644 examples/models/burgers_try.py create mode 100644 nn_adapt/solving_time.py diff --git a/examples/burgers/config.py b/examples/burgers/config.py new file mode 100644 index 0000000..18fcc5d --- /dev/null +++ b/examples/burgers/config.py @@ -0,0 +1,38 @@ +from models.burgers import * +from nn_adapt.ann import sample_uniform +import numpy as np + + +testing_cases = ["demo"] + + +def initialise(case, discrete=False): + """ + Given some training case (for which ``case`` + is an integer) or testing case (for which + ``case`` is a string), set up the physical + problems defining the Burgers problem. + + For training data, these values are chosen + randomly. + """ + parameters.case = case + parameters.discrete = discrete + if isinstance(case, int): + parameters.turbine_coords = [] + np.random.seed(100 * case) + + # Random initial speed from 0.01 m/s to 6 m/s + parameters.initial_speed = sample_uniform(0.01, 6.0) + + # Random viscosity from 0.00001 m^2/s to 1 m^2/s + parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + return + elif "demo" in case: + parameters.viscosity_coefficient = 0.0001 + parameters.initial_speed = 1.0 + else: + raise ValueError(f"Test case {case} not recognised") + + if "reversed" in case: + parameters.initial_speed *= -1 diff --git a/examples/burgers/testing_cases.txt b/examples/burgers/testing_cases.txt new file mode 100644 index 0000000..1549b67 --- /dev/null +++ b/examples/burgers/testing_cases.txt @@ -0,0 +1 @@ +demo diff --git a/examples/burgers_try/config.py b/examples/burgers_try/config.py new file mode 100644 index 0000000..c290e52 --- /dev/null +++ b/examples/burgers_try/config.py @@ -0,0 +1,38 @@ +from models.burgers_try import * +from nn_adapt.ann import sample_uniform +import numpy as np + + +testing_cases = ["demo"] + + +def initialise(case, discrete=False): + """ + Given some training case (for which ``case`` + is an integer) or testing case (for which + ``case`` is a string), set up the physical + problems defining the Burgers problem. + + For training data, these values are chosen + randomly. + """ + parameters.case = case + parameters.discrete = discrete + if isinstance(case, int): + parameters.turbine_coords = [] + np.random.seed(100 * case) + + # Random initial speed from 0.01 m/s to 6 m/s + parameters.initial_speed = sample_uniform(0.01, 6.0) + + # Random viscosity from 0.00001 m^2/s to 1 m^2/s + parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + return + elif "demo" in case: + parameters.viscosity_coefficient = 0.0001 + parameters.initial_speed = 1.0 + else: + raise ValueError(f"Test case {case} not recognised") + + if "reversed" in case: + parameters.initial_speed *= -1 diff --git a/examples/burgers_try/meshgen.py b/examples/burgers_try/meshgen.py new file mode 100644 index 0000000..3467cea --- /dev/null +++ b/examples/burgers_try/meshgen.py @@ -0,0 +1,2 @@ +def generate_geo(config, reverse=False): + return diff --git a/examples/burgers_try/network.py b/examples/burgers_try/network.py new file mode 100644 index 0000000..b7f9907 --- /dev/null +++ b/examples/burgers_try/network.py @@ -0,0 +1,43 @@ +from nn_adapt.layout import NetLayoutBase + + +class NetLayout(NetLayoutBase): + """ + Default configuration + ===================== + + Input layer: + ------------ + [coarse-grained DWR] + + [viscosity coefficient] + + [element size] + + [element orientation] + + [element shape] + + [boundary element?] + + [12 forward DoFs per element] + + [12 adjoint DoFs per element] + = 30 + + Hidden layer: + ------------- + + 60 neurons + + Output layer: + ------------- + + [1 error indicator value] + """ + + inputs = ( + "estimator_coarse", + "physics_viscosity", + "mesh_d", + "mesh_h1", + "mesh_h2", + "mesh_bnd", + "forward_dofs", + "adjoint_dofs", + ) + num_hidden_neurons = 60 + dofs_per_element = 12 diff --git a/examples/burgers_try/testing_cases.txt b/examples/burgers_try/testing_cases.txt new file mode 100644 index 0000000..1549b67 --- /dev/null +++ b/examples/burgers_try/testing_cases.txt @@ -0,0 +1 @@ +demo diff --git a/examples/makefile b/examples/makefile index 061bea1..8a0e939 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,8 +3,13 @@ all: setup network convergence profile snapshot # --- Configurable parameters APPROACHES = anisotropic +<<<<<<< Updated upstream MODEL = turbine NUM_TRAINING_CASES = 100 +======= +MODEL = burgers +NUM_TRAINING_CASES = 1 +>>>>>>> Stashed changes TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/examples/models/burgers.py b/examples/models/burgers.py new file mode 100644 index 0000000..bc53a8d --- /dev/null +++ b/examples/models/burgers.py @@ -0,0 +1,173 @@ +from firedrake import * +from firedrake.petsc import PETSc +import nn_adapt.model +import nn_adapt.solving + + +class Parameters(nn_adapt.model.Parameters): + """ + Class encapsulating all parameters required for a simple + Burgers equation test case. + """ + + qoi_name = "right boundary integral" + qoi_unit = r"m\,s^{-1}" + + # Adaptation parameters + h_min = 1.0e-10 # Minimum metric magnitude + h_max = 1.0 # Maximum metric magnitude + + # Physical parameters + viscosity_coefficient = 0.0001 + initial_speed = 1.0 + + # Timestepping parameters + timestep = 0.05 + + solver_parameters = {} + adjoint_solver_parameters = {} + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def drag(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + def ic(self, mesh): + """ + Initial condition + """ + x, y = SpatialCoordinate(mesh) + expr = self.initial_speed * sin(pi * x) + return as_vector([expr, 0]) + + +PETSc.Sys.popErrorHandler() +parameters = Parameters() + + +def get_function_space(mesh): + r""" + Construct the :math:`\mathbb P2` finite element space + used for the prognostic solution. + """ + return VectorFunctionSpace(mesh, "CG", 2) + + +class Solver(nn_adapt.solving.Solver): + """ + Solver object based on current mesh and state. + """ + + def __init__(self, mesh, ic, **kwargs): + """ + :arg mesh: the mesh to define the solver on + :arg ic: the current state / initial condition + """ + self.mesh = mesh + + # Collect parameters + dt = Constant(parameters.timestep) + nu = parameters.viscosity(mesh) + + # Define variational formulation + V = self.function_space + self.u = Function(V) + self.u_ = Function(V) + v = TestFunction(V) + self._form = ( + inner((self.u - self.u_) / dt, v) * dx + + inner(dot(self.u, nabla_grad(self.u)), v) * dx + + nu * inner(grad(self.u), grad(v)) * dx + ) + problem = NonlinearVariationalProblem(self._form, self.u) + + # Set initial condition + self.u_.project(parameters.ic(mesh)) + + # Create solver + self._solver = NonlinearVariationalSolver(problem) + + @property + def function_space(self): + r""" + The :math:`\mathbb P2` finite element space. + """ + return get_function_space(self.mesh) + + @property + def form(self): + """ + The weak form of Burgers equation + """ + return self._form + + @property + def solution(self): + return self.u + + def iterate(self, **kwargs): + """ + Take a single timestep of Burgers equation + """ + self._solver.solve() + self.u_.assign(self.u) + + +def get_initial_condition(function_space): + """ + Compute an initial condition based on the initial + speed parameter. + """ + u = Function(function_space) + u.interpolate(parameters.ic(function_space.mesh())) + return u + + +def get_qoi(mesh): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + + def qoi(sol): + return inner(sol, sol) * ds(2) + + return qoi + + +# Initial mesh for all test cases +initial_mesh = UnitSquareMesh(30, 30) + +# a = Solver(mesh = initial_mesh, ic = 0, kwargs='0') +# a.iterate() +# b = a.solution +# import matplotlib.pyplot as plt +# fig, axes = plt.subplots() +# tricontourf(b, axes=axes) +# plt.show() diff --git a/examples/models/burgers_try.py b/examples/models/burgers_try.py new file mode 100644 index 0000000..b771552 --- /dev/null +++ b/examples/models/burgers_try.py @@ -0,0 +1,200 @@ +from firedrake import * +from firedrake.petsc import PETSc +import nn_adapt.model +import nn_adapt.solving + +''' +A memory hungry method solving time dependent PDE. +''' + +class Parameters(nn_adapt.model.Parameters): + """ + Class encapsulating all parameters required for a simple + Burgers equation test case. + """ + + qoi_name = "right boundary integral" + qoi_unit = r"m\,s^{-1}" + + # Adaptation parameters + h_min = 1.0e-10 # Minimum metric magnitude + h_max = 1.0 # Maximum metric magnitude + + # Physical parameters + viscosity_coefficient = 0.0001 + initial_speed = 1.0 + + # Timestepping parameters + timestep = 0.05 + steps = 20 + + solver_parameters = {} + adjoint_solver_parameters = {} + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def drag(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + def ic(self, mesh): + """ + Initial condition + """ + x, y = SpatialCoordinate(mesh) + expr = self.initial_speed * sin(pi * x) + return as_vector([expr, 0]) + + +PETSc.Sys.popErrorHandler() +parameters = Parameters() + + +def get_function_space(mesh): + r""" + Construct the :math:`\mathbb P2` finite element space + used for the prognostic solution. + """ + return VectorFunctionSpace(mesh, "CG", 2) + + +class Solver(nn_adapt.solving.Solver): + """ + Solver object based on current mesh and state. + """ + + def __init__(self, meshes, ic, **kwargs): + """ + :arg mesh: the mesh to define the solver on + :arg ic: the current state / initial condition + """ + self.meshes = meshes + + # Collect parameters + self._step = 0 + self.total_steps = parameters.steps + self.dt = Constant(parameters.timestep) + assert len(meshes) == self.total_steps + + self.nu = [parameters.viscosity(meshes[i]) for i in range(self.total_steps)] + + # Define variational formulation + V = self.function_space + self.u = [Function(V[i]) for i in range(self.total_steps)] + self.u_ = [Function(V[i]) for i in range(self.total_steps)] + v = [TestFunction(V[i]) for i in range(self.total_steps)] + self._form = [( + inner((self.u[i] - self.u_[i]) / self.dt, v[i]) * dx + + inner(dot(self.u[i], nabla_grad(self.u[i])), v[i]) * dx + + self.nu[i] * inner(grad(self.u[i]), grad(v[i])) * dx + ) for i in range(self.total_steps)] + + # Set initial condition + self.u_[0].project(parameters.ic(meshes[0])) + + @property + def function_space(self): + r""" + The :math:`\mathbb P2` finite element space. + """ + return [get_function_space(self.meshes[i]) for i in range(self.total_steps)] + + @property + def form(self): + """ + The weak form of Burgers equation + """ + return self._form + + @property + def solution(self): + return self.u + + @property + def step(self): + return self._step + + def iterate(self, **kwargs): + """ + Take a single timestep of Burgers equation + """ + for i in range(self.total_steps): + print(f"iter {self._step}") + + # Set problem + problem = NonlinearVariationalProblem(self._form[self._step], self.u[self._step]) + + # Create solver + self._solver = NonlinearVariationalSolver(problem) + self._solver.solve() + + # Set timestep + self._step += 1 + + if self._step+1 < self.total_steps: + self.u_[self._step+1].assign(self.u[self._step]) + + +def get_initial_condition(function_space): + """ + Compute an initial condition based on the initial + speed parameter. + """ + u = Function(function_space) + u.interpolate(parameters.ic(function_space.mesh())) + return u + + +def get_qoi(mesh): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + + def qoi(sol): + return inner(sol, sol) * ds(2) + + return qoi + + +# Initial mesh for all test cases +initial_mesh = [UnitSquareMesh(30, 30) for _ in range(20)] + + + +# A simple pretest +a = Solver(meshes = initial_mesh, ic = 0, kwargs='0') +a.iterate() +b = a.solution +import matplotlib.pyplot as plt +# fig, axes = plt.subplots(20) +# for i in range(20): +# tricontourf(b[i], axes=axes[i]) +fig, axes = plt.subplots() +tricontourf(b[10], axes=axes) +plt.show() diff --git a/nn_adapt/solving_time.py b/nn_adapt/solving_time.py new file mode 100644 index 0000000..98c6386 --- /dev/null +++ b/nn_adapt/solving_time.py @@ -0,0 +1,232 @@ +""" +Time dependent goal-oriented error estimation +""" +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_solutions( + mesh, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + Trying to work it out. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + total_step = config.total_steps + + # Solve forward problem in base space + V = config.get_function_space(mesh) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver(mesh, ic, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + J = config.get_qoi(mesh)(q) + qoi = assemble(J) + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + q_star = Function(V) + F = solver_obj.form + dFdq = derivative(F, q, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + out["adjoint"] = q_star + out["times"]["adjoint"] += perf_counter() + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) + q_plus = solver_obj.solution + J = config.get_qoi(refined_mesh)(q_plus) + F = solver_obj.form + tm.prolong(q, q_plus) + q_star_plus = Function(V) + dFdq = derivative(F, q_plus, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q_plus, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) + out["enriched_adjoint"] = q_star_plus + out["times"]["estimator"] += perf_counter() + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + + # Prolong + V_plus = adj_sol_plus.function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr From 56354a53d5d18b22879184dea2fc9c6e4102fad7 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Mon, 25 Jul 2022 04:05:39 +0100 Subject: [PATCH 02/13] added memory-hungry solver and adjoint solver --- examples/a_test.py | 39 +++++++++ examples/makefile | 4 +- examples/models/burgers.py | 22 +++++ examples/models/burgers_try.py | 146 ++++++++++++++++++++++----------- examples/run_adapt.py | 7 ++ nn_adapt/solving.py | 1 + nn_adapt/solving_time.py | 83 ++++++++++++++----- 7 files changed, 232 insertions(+), 70 deletions(-) create mode 100644 examples/a_test.py diff --git a/examples/a_test.py b/examples/a_test.py new file mode 100644 index 0000000..72d4827 --- /dev/null +++ b/examples/a_test.py @@ -0,0 +1,39 @@ +from nn_adapt.features import * +from nn_adapt.metric import * +from nn_adapt.parse import Parser +from nn_adapt.solving_time import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt +from firedrake.petsc import PETSc + +import importlib +import numpy as np +from time import perf_counter + +import matplotlib.pyplot as plt + +tt_steps = 10 + +setup1 = importlib.import_module(f"burgers_try.config") +meshes = [UnitSquareMesh(30, 30) for _ in range(tt_steps)] +# meshes[5] = UnitSquareMesh(28, 30) + +out = get_time_solutions(meshes=meshes, config=setup1) +fig, axes = plt.subplots(10,2) + +for i in range(tt_steps): + tricontourf(out['forward'][i], axes=axes[i][0]) + tricontourf(out['adjoint'][i], axes=axes[i][1]) + +plt.savefig("test1.jpg") + +# mesh = UnitSquareMesh(30, 30) +# setup2 = importlib.import_module(f"burgers.config") +# out = get_solutions(mesh=mesh, config=setup2) +# fig, axes = plt.subplots(2) +# tricontourf(out['forward'], axes=axes[0]) +# tricontourf(out['adjoint'], axes=axes[1]) + +# plt.savefig("test2.jpg") + diff --git a/examples/makefile b/examples/makefile index 259ce67..98fe612 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,8 +3,8 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = steady_turbine -NUM_TRAINING_CASES = 100 +MODEL = burgers +NUM_TRAINING_CASES = 1 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/examples/models/burgers.py b/examples/models/burgers.py index f6fdea0..1c70683 100644 --- a/examples/models/burgers.py +++ b/examples/models/burgers.py @@ -163,3 +163,25 @@ def qoi(sol): # Initial mesh for all test cases initial_mesh = UnitSquareMesh(30, 30) + +# # A simple pretest +# a = Solver(mesh = initial_mesh, ic = 0, kwargs='0') +# b = [] +# a.iterate() +# b.append(a.solution) +# a.iterate() +# b.append(a.solution) +# a.iterate() +# b.append(a.solution) + +# import matplotlib.pyplot as plt + +# # fig, axes = plt.subplots() +# # tricontourf(b, axes=axes) + +# fig, axes = plt.subplots(3) +# tricontourf(b[0], axes=axes[0]) +# tricontourf(b[1], axes=axes[1]) +# tricontourf(b[2], axes=axes[2]) + +# plt.show() diff --git a/examples/models/burgers_try.py b/examples/models/burgers_try.py index b771552..4cd9621 100644 --- a/examples/models/burgers_try.py +++ b/examples/models/burgers_try.py @@ -26,7 +26,7 @@ class Parameters(nn_adapt.model.Parameters): # Timestepping parameters timestep = 0.05 - steps = 20 + tt_steps = 10 solver_parameters = {} adjoint_solver_parameters = {} @@ -86,6 +86,66 @@ class Solver(nn_adapt.solving.Solver): Solver object based on current mesh and state. """ + def __init__(self, mesh, ic, **kwargs): + """ + :arg mesh: the mesh to define the solver on + :arg ic: the current state / initial condition + """ + self.mesh = mesh + + # Collect parameters + dt = Constant(parameters.timestep) + nu = parameters.viscosity(mesh) + + # Define variational formulation + V = self.function_space + u = Function(V) + u_ = Function(V) + v = TestFunction(V) + self._form = ( + inner((u - u_) / dt, v) * dx + + inner(dot(u, nabla_grad(u)), v) * dx + + nu * inner(grad(u), grad(v)) * dx + ) + problem = NonlinearVariationalProblem(self._form, u) + + # Set initial condition + u_.project(parameters.ic(mesh)) + + # Create solver + self._solver = NonlinearVariationalSolver(problem) + self._solution = u + + @property + def function_space(self): + r""" + The :math:`\mathbb P2` finite element space. + """ + return get_function_space(self.mesh) + + @property + def form(self): + """ + The weak form of Burgers equation + """ + return self._form + + @property + def solution(self): + return self._solution + + def iterate(self, **kwargs): + """ + Take a single timestep of Burgers equation + """ + self._solver.solve() + + +class time_dependent_Solver(nn_adapt.solving.Solver): + """ + Solver object based on current mesh and state. + """ + def __init__(self, meshes, ic, **kwargs): """ :arg mesh: the mesh to define the solver on @@ -94,24 +154,25 @@ def __init__(self, meshes, ic, **kwargs): self.meshes = meshes # Collect parameters - self._step = 0 - self.total_steps = parameters.steps - self.dt = Constant(parameters.timestep) - assert len(meshes) == self.total_steps + self.tt_steps = parameters.tt_steps + dt = Constant(parameters.timestep) + assert self.tt_steps == len(self.meshes) - self.nu = [parameters.viscosity(meshes[i]) for i in range(self.total_steps)] + nu = [parameters.viscosity(meshes[i]) for i in range(self.tt_steps)] # Define variational formulation V = self.function_space - self.u = [Function(V[i]) for i in range(self.total_steps)] - self.u_ = [Function(V[i]) for i in range(self.total_steps)] - v = [TestFunction(V[i]) for i in range(self.total_steps)] + self.u = [Function(V[i]) for i in range(self.tt_steps)] + self.u_ = [Function(V[i]) for i in range(self.tt_steps)] + self.v = [TestFunction(V[i]) for i in range(self.tt_steps)] self._form = [( - inner((self.u[i] - self.u_[i]) / self.dt, v[i]) * dx - + inner(dot(self.u[i], nabla_grad(self.u[i])), v[i]) * dx - + self.nu[i] * inner(grad(self.u[i]), grad(v[i])) * dx - ) for i in range(self.total_steps)] - + inner((self.u[i] - self.u_[i]) / dt, self.v[i]) * dx + + inner(dot(self.u[i], nabla_grad(self.u[i])), self.v[i]) * dx + + nu[i] * inner(grad(self.u[i]), grad(self.v[i])) * dx + ) for i in range(self.tt_steps)] + + self._solution = [] + # Set initial condition self.u_[0].project(parameters.ic(meshes[0])) @@ -120,7 +181,7 @@ def function_space(self): r""" The :math:`\mathbb P2` finite element space. """ - return [get_function_space(self.meshes[i]) for i in range(self.total_steps)] + return [get_function_space(self.meshes[i]) for i in range(self.tt_steps)] @property def form(self): @@ -131,31 +192,21 @@ def form(self): @property def solution(self): - return self.u - - @property - def step(self): - return self._step + return self._solution def iterate(self, **kwargs): """ Take a single timestep of Burgers equation """ - for i in range(self.total_steps): - print(f"iter {self._step}") - - # Set problem - problem = NonlinearVariationalProblem(self._form[self._step], self.u[self._step]) - - # Create solver - self._solver = NonlinearVariationalSolver(problem) - self._solver.solve() + solve(self._form[0] == 0, self.u[0]) + self._solution.append(self.u[0]) + + for step in range(1, self.tt_steps): + self.u_[step].project(self.u[step-1]) - # Set timestep - self._step += 1 + solve(self._form[step] == 0, self.u[step]) + self._solution.append(self.u[step]) - if self._step+1 < self.total_steps: - self.u_[self._step+1].assign(self.u[self._step]) def get_initial_condition(function_space): @@ -182,19 +233,22 @@ def qoi(sol): return qoi -# Initial mesh for all test cases -initial_mesh = [UnitSquareMesh(30, 30) for _ in range(20)] +# # Initial mesh for all test cases +# initial_mesh = [UnitSquareMesh(30, 30), UnitSquareMesh(50, 30)] + + +# # A simple pretest +# a = time_dependent_Solver(meshes = initial_mesh, ic = 0, kwargs='0') +# a.iterate() +# b = a.solution +# import matplotlib.pyplot as plt +# # fig, axes = plt.subplots(20) +# # for i in range(20): +# # tricontourf(b[i], axes=axes[i]) +# fig, axes = plt.subplots(2) +# tricontourf(b[0], axes=axes[0]) +# tricontourf(b[1], axes=axes[1]) -# A simple pretest -a = Solver(meshes = initial_mesh, ic = 0, kwargs='0') -a.iterate() -b = a.solution -import matplotlib.pyplot as plt -# fig, axes = plt.subplots(20) -# for i in range(20): -# tricontourf(b[i], axes=axes[i]) -fig, axes = plt.subplots() -tricontourf(b[10], axes=axes) -plt.show() +# plt.show() diff --git a/examples/run_adapt.py b/examples/run_adapt.py index 69d9cd4..7cd14a0 100644 --- a/examples/run_adapt.py +++ b/examples/run_adapt.py @@ -16,6 +16,7 @@ import importlib import numpy as np from time import perf_counter +import matplotlib.pyplot as plt set_log_level(ERROR) @@ -96,6 +97,12 @@ if "metric" not in out: break adj_sol, dwr, metric = out["adjoint"], out["dwr"], out["metric"] + + fig, axes = plt.subplots(1,2) + tricontourf(fwd_sol, axes=axes[0]) + tricontourf(adj_sol, axes=axes[1]) + plt.savefig("out.jpg") + if not no_outputs: fwd_file.write(*fwd_sol.split()) adj_file.write(*adj_sol.split()) diff --git a/nn_adapt/solving.py b/nn_adapt/solving.py index b7a950c..258e4c0 100644 --- a/nn_adapt/solving.py +++ b/nn_adapt/solving.py @@ -123,6 +123,7 @@ def get_solutions( solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) out["adjoint"] = q_star out["times"]["adjoint"] += perf_counter() + if refined_mesh is None: return out diff --git a/nn_adapt/solving_time.py b/nn_adapt/solving_time.py index 98c6386..ffc4b13 100644 --- a/nn_adapt/solving_time.py +++ b/nn_adapt/solving_time.py @@ -63,8 +63,8 @@ def solution(self): pass -def get_solutions( - mesh, +def get_time_solutions( + meshes, config, solve_adjoint=True, refined_mesh=None, @@ -93,41 +93,80 @@ def get_solutions( and enriched adjoint solution (if requested) """ - total_step = config.total_steps + tt_steps = config.parameters.tt_steps # Solve forward problem in base space - V = config.get_function_space(mesh) + V = [config.get_function_space(meshes[step]) for step in range(tt_steps)] out = {"times": {"forward": -perf_counter()}} - with PETSc.Log.Event("Forward solve"): - if init is None: - ic = config.get_initial_condition(V) - else: - ic = init(V) - solver_obj = config.Solver(mesh, ic, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - J = config.get_qoi(mesh)(q) - qoi = assemble(J) + # with PETSc.Log.Event("Forward solve"): + # if init is None: + # ic = config.get_initial_condition(V) + # else: + # ic = init(V) + solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + qoi = [] + j_list = [] + for step in range(tt_steps): + J = config.get_qoi(meshes[step])(q[step]) + j_list.append(J) + qoi.append(assemble(J)) out["times"]["forward"] += perf_counter() out["qoi"] = qoi out["forward"] = q if convergence_checker is not None: - if convergence_checker.check_qoi(qoi): + cirt = 0 + for step in range(tt_steps): + if not convergence_checker.check_qoi(qoi[step]): + cirt = 1 + if cirt == 1: return out if not solve_adjoint: return out # Solve adjoint problem in base space out["times"]["adjoint"] = -perf_counter() - with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - q_star = Function(V) - F = solver_obj.form - dFdq = derivative(F, q, TrialFunction(V)) + # with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + F = solver_obj.form + adj_solution = [] + + + # q_star = Function(V[tt_steps-1]) + # dFdq = derivative(F[tt_steps-1], q[tt_steps-1], TrialFunction(V[tt_steps-1])) + # dFdq_transpose = adjoint(dFdq) + # dJdq = derivative(j_list[tt_steps-1], q[tt_steps-1], TestFunction(V[tt_steps-1])) + # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + # adj_solution.append(q_star) + + # for step in range(tt_steps-2, -1, -1): + # print(step) + # q_star_next = Function(V[step]) + # q_star_next.project(q_star) + + # q_star = Function(V[step]) + + # dFdq = derivative(F[step], q_star_next, TrialFunction(V[step])) + # dFdq_transpose = adjoint(dFdq) + # dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) + # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + + # adj_solution.append(q_star) + + + for step in range(tt_steps-1, -1, -1): + + q_star = Function(V[step]) + + dFdq = derivative(F[step], q[step], TrialFunction(V[step])) dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q, TestFunction(V)) + dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - out["adjoint"] = q_star + + adj_solution.append(q_star) + + out["adjoint"] = adj_solution.reverse() out["times"]["adjoint"] += perf_counter() if refined_mesh is None: return out From 4429c223f497027b3ce306c319623799d01b9564 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Wed, 27 Jul 2022 02:03:00 +0100 Subject: [PATCH 03/13] adjoint solver successful --- build/lib/nn_adapt/__init__.py | 0 build/lib/nn_adapt/ann.py | 147 +++++++++++++++ build/lib/nn_adapt/features.py | 250 ++++++++++++++++++++++++++ build/lib/nn_adapt/layout.py | 61 +++++++ build/lib/nn_adapt/metric.py | 107 +++++++++++ build/lib/nn_adapt/model.py | 43 +++++ build/lib/nn_adapt/parse.py | 142 +++++++++++++++ build/lib/nn_adapt/plotting.py | 15 ++ build/lib/nn_adapt/solving.py | 227 +++++++++++++++++++++++ build/lib/nn_adapt/solving_time.py | 279 +++++++++++++++++++++++++++++ build/lib/nn_adapt/utility.py | 66 +++++++ examples/a_test.py | 2 +- examples/models/burgers_try.py | 99 ++++++---- nn_adapt/solving.py | 2 +- nn_adapt/solving_time.py | 67 +++---- 15 files changed, 1428 insertions(+), 79 deletions(-) create mode 100644 build/lib/nn_adapt/__init__.py create mode 100644 build/lib/nn_adapt/ann.py create mode 100644 build/lib/nn_adapt/features.py create mode 100644 build/lib/nn_adapt/layout.py create mode 100644 build/lib/nn_adapt/metric.py create mode 100644 build/lib/nn_adapt/model.py create mode 100644 build/lib/nn_adapt/parse.py create mode 100644 build/lib/nn_adapt/plotting.py create mode 100644 build/lib/nn_adapt/solving.py create mode 100644 build/lib/nn_adapt/solving_time.py create mode 100644 build/lib/nn_adapt/utility.py diff --git a/build/lib/nn_adapt/__init__.py b/build/lib/nn_adapt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/nn_adapt/ann.py b/build/lib/nn_adapt/ann.py new file mode 100644 index 0000000..26d17ad --- /dev/null +++ b/build/lib/nn_adapt/ann.py @@ -0,0 +1,147 @@ +""" +Classes and functions related to using neural networks. +""" +import random +import numpy as np +import torch +from torch import nn + + +# Set device +if torch.cuda.device_count() > 0 and torch.cuda.is_available(): + dev = torch.cuda.current_device() + print(f"Cuda installed. Running on GPU {dev}.") + device = torch.device(f"cuda:{dev}") + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.enabled = True +else: + print("No GPU available.") + device = torch.device("cpu") + + +def set_seed(seed): + """ + Set all random seeds to a fixed value + + :arg seed: the seed value + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def sample_uniform(l, u): + """ + Sample from the continuous uniform + distribution :math:`U(l, u)`. + + :arg l: the lower bound + :arg u: the upper bound + """ + return l + (u - l) * np.random.rand() + + +class SingleLayerFCNN(nn.Module): + """ + Fully Connected Neural Network (FCNN) + for goal-oriented metric-based mesh + adaptation with a single hidden layer. + """ + + def __init__(self, layout, preproc="arctan"): + """ + :arg layout: class instance inherited from + :class:`NetLayoutBase`, with numbers of + inputs, hidden neurons and outputs + specified. + :kwarg preproc: pre-processing function to + apply to the input data + """ + super().__init__() + + # Define preprocessing function + if preproc == "none": + self.preproc1 = lambda x: x + if preproc == "arctan": + self.preproc1 = torch.arctan + elif preproc == "tanh": + self.preproc1 = torch.tanh + elif preproc == "logabs": + self.preproc1 = lambda x: torch.log(torch.abs(x)) + else: + raise ValueError(f'Preprocessor "{preproc}" not recognised.') + + # Define layers + self.linear1 = nn.Linear(layout.num_inputs, layout.num_hidden_neurons) + self.linear2 = nn.Linear(layout.num_hidden_neurons, 1) + + # Define activation functions + self.activate1 = nn.Sigmoid() + + def forward(self, x): + p = self.preproc1(x) + z1 = self.linear1(p) + a1 = self.activate1(z1) + z2 = self.linear2(a1) + return z2 + + +def propagate(data_loader, model, loss_fn, optimizer=None): + """ + Propagate data from a :class:`DataLoader` object + through the neural network. + + If ``optimizer`` is not ``None`` then training is + performed. Otherwise, validation is performed. + + :arg data_loader: PyTorch :class:`DataLoader` instance + :arg model: PyTorch :class:`Module` instance + :arg loss_fn: PyTorch loss function instance + :arg optimizer: PyTorch optimizer instance + """ + num_batches = len(data_loader) + cumulative_loss = 0 + + for x, y in data_loader: + + # Compute prediction and loss + prediction = model(x.to(device)) + loss = loss_fn(prediction, y.to(device)) + cumulative_loss += loss.item() + + # Backpropagation + if optimizer is not None: + optimizer.zero_grad() + loss.backward() + optimizer.step() + + return cumulative_loss / num_batches + + +def collect_features(feature_dict, layout): + """ + Given a dictionary of feature arrays, stack their + data appropriately to be fed into a neural network. + + :arg feature_dict: dictionary containing feature data + :arg layout: :class:`NetLayout` instance + """ + features = {key: val for key, val in feature_dict.items() if key in layout.inputs} + dofs = [feature for key, feature in features.items() if "dofs" in key] + nodofs = [feature for key, feature in features.items() if "dofs" not in key] + return np.hstack((np.vstack(nodofs).transpose(), np.hstack(dofs))) + + +def Loss(): + """ + Custom loss function. + + Needed when there is only one output value. + """ + + def mse(output, target): + target = target.reshape(*output.shape) + return torch.nn.MSELoss(reduction="sum")(output, target) + + return mse diff --git a/build/lib/nn_adapt/features.py b/build/lib/nn_adapt/features.py new file mode 100644 index 0000000..f185f89 --- /dev/null +++ b/build/lib/nn_adapt/features.py @@ -0,0 +1,250 @@ +""" +Functions for extracting feature data from configuration +files, meshes and solution fields. +""" +import firedrake +from firedrake.petsc import PETSc +from firedrake import op2 +import numpy as np +from pyroteus.metric import * +import ufl +from nn_adapt.solving import dwr_indicator +from collections import Iterable + + +__all__ = ["extract_features", "get_values_at_elements"] + + +@PETSc.Log.EventDecorator("Extract components") +def extract_components(matrix): + r""" + Extract components of a matrix that describe its + size, orientation and shape. + + The latter two components are combined in such + a way that we avoid errors relating to arguments + zero and :math:`2\pi` being equal. + """ + density, quotients, evecs = density_and_quotients(matrix, reorder=True) + fs = density.function_space() + ar = firedrake.interpolate(ufl.sqrt(quotients[1]), fs) + armin = ar.vector().gather().min() + assert armin >= 1.0, f"An element has aspect ratio is less than one ({armin})" + theta = firedrake.interpolate(ufl.atan(evecs[1, 1] / evecs[1, 0]), fs) + h1 = firedrake.interpolate(ufl.cos(theta) ** 2 / ar + ufl.sin(theta) ** 2 * ar, fs) + h2 = firedrake.interpolate((1 / ar - ar) * ufl.sin(theta) * ufl.cos(theta), fs) + return density, h1, h2 + + +@PETSc.Log.EventDecorator("Extract elementwise") +def get_values_at_elements(f): + """ + Extract the values for all degrees of freedom associated + with each element. + + :arg f: some :class:`Function` + :return: a vector :class:`Function` holding all DoFs of `f` + """ + fs = f.function_space() + mesh = fs.mesh() + dim = mesh.topological_dimension() + if dim == 2: + assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" + elif dim == 3: + assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" + else: + raise ValueError(f"Dimension {dim} not supported") + el = fs.ufl_element() + if el.sub_elements() == []: + p = el.degree() + size = el.value_size() * (p + 1) * (p + 2) // 2 + else: + size = 0 + for sel in el.sub_elements(): + p = sel.degree() + size += sel.value_size() * (p + 1) * (p + 2) // 2 + P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size) + values = firedrake.Function(P0_vec) + kernel = "for (int i=0; i < vertexwise.dofs; i++) elementwise[i] += vertexwise[i];" + keys = {"vertexwise": (f, op2.READ), "elementwise": (values, op2.INC)} + firedrake.par_loop(kernel, ufl.dx, keys) + return values + + +@PETSc.Log.EventDecorator("Extract at centroids") +def get_values_at_centroids(f): + """ + Extract the values for the function at each element centroid, + along with all derivatives up to the :math:`p^{th}`, where + :math:`p` is the polynomial degree. + + :arg f: some :class:`Function` + :return: a vector :class:`Function` holding all DoFs of `f` + """ + fs = f.function_space() + mesh = fs.mesh() + dim = mesh.topological_dimension() + if dim == 2: + assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" + elif dim == 3: + assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" + else: + raise ValueError(f"Dimension {dim} not supported") + el = fs.ufl_element() + if el.sub_elements() == []: + p = el.degree() + degrees = [p] + size = el.value_size() * (p + 1) * (p + 2) // 2 + funcs = [f] + else: + size = 0 + degrees = [sel.degree() for sel in el.sub_elements()] + for sel, p in zip(el.sub_elements(), degrees): + size += sel.value_size() * (p + 1) * (p + 2) // 2 + funcs = f + values = firedrake.Function(firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size)) + P0 = firedrake.FunctionSpace(mesh, "DG", 0) + P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0) + P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) + i = 0 + for func, p in zip(funcs, degrees): + values.dat.data[:, i] = firedrake.project(func, P0).dat.data_ro + i += 1 + if p == 0: + continue + g = firedrake.project(ufl.grad(func), P0_vec) + values.dat.data[:, i] = g.dat.data_ro[:, 0] + values.dat.data[:, i + 1] = g.dat.data_ro[:, 1] + i += 2 + if p == 1: + continue + H = firedrake.project(ufl.grad(ufl.grad(func)), P0_ten) + values.dat.data[:, i] = H.dat.data_ro[:, 0, 0] + values.dat.data[:, i + 1] = 0.5 * ( + H.dat.data_ro[:, 0, 1] + H.dat.data_ro[:, 1, 0] + ) + values.dat.data[:, i + 2] = H.dat.data_ro[:, 1, 1] + i += 3 + if p > 2: + raise NotImplementedError( + "Polynomial degrees greater than 2 not yet considered" + ) + return values + + +def split_into_scalars(f): + """ + Given a :class:`Function`, split it into + components from its constituent scalar + spaces. + + If it is not mixed then no splitting is + required. + + :arg f: the mixed :class:`Function` + :return: a dictionary containing the + nested structure of the mixed function + """ + V = f.function_space() + if V.value_size > 1: + if not isinstance(V.node_count, Iterable): + assert len(V.shape) == 1, "Tensor spaces not supported" + el = V.ufl_element() + fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) + return {0: [firedrake.interpolate(f[i], fs) for i in range(V.shape[0])]} + subspaces = [V.sub(i) for i in range(len(V.node_count))] + ret = {} + for i, (Vi, fi) in enumerate(zip(subspaces, f.split())): + if len(Vi.shape) == 0: + ret[i] = [fi] + else: + assert len(Vi.shape) == 1, "Tensor spaces not supported" + el = Vi.ufl_element() + fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) + ret[i] = [firedrake.interpolate(fi[j], fs) for j in range(Vi.shape[0])] + return ret + else: + return {0: [f]} + + +def extract_array(f, mesh=None, centroid=False, project=False): + r""" + Extract a cell-wise data array from a :class:`Constant` or + :class:`Function`. + + For constants and scalar fields, this will be an :math:`n\times 1` + array, where :math:`n` is the number of mesh elements. For a mixed + field with :math:`m` components, it will be :math:`n\times m`. + + :arg f: the :class:`Constant` or :class:`Function` + :kwarg mesh: the underlying :class:`MeshGeometry` + :kwarg project: if ``True``, project the field into + :math:`\mathbb P0` space + """ + mesh = mesh or f.ufl_domain() + if isinstance(f, firedrake.Constant): + ones = np.ones(mesh.num_cells()) + assert len(f.values()) == 1 + return f.values()[0] * ones + elif not isinstance(f, firedrake.Function): + raise ValueError(f"Unexpected input type {type(f)}") + if project: + if len(f.function_space().shape) > 0: + raise NotImplementedError("Can currently only project scalar fields") # TODO + element = f.ufl_element() + if (element.family(), element.degree()) != ("Discontinuous Lagrange", 0): + P0 = firedrake.FunctionSpace(mesh, "DG", 0) + f = firedrake.project(f, P0) + s = sum([fi for i, fi in split_into_scalars(f).items()], start=[]) + get = get_values_at_centroids if centroid else get_values_at_elements + if len(s) == 1: + return get(s[0]).dat.data + else: + return np.hstack([get(si).dat.data for si in s]) + + +@PETSc.Log.EventDecorator("Extract features") +def extract_features(config, fwd_sol, adj_sol): + """ + Extract features from the outputs of a run. + + :arg config: the configuration file + :arg fwd_sol: the forward solution + :arg adj_sol: the adjoint solution + :return: a list of feature arrays + """ + mesh = fwd_sol.function_space().mesh() + + # Coarse-grained DWR estimator + with PETSc.Log.Event("Extract estimator"): + dwr = dwr_indicator(config, mesh, fwd_sol, adj_sol) + + # Features describing the mesh element + with PETSc.Log.Event("Analyse element"): + P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) + + # Element size, orientation and shape + J = ufl.Jacobian(mesh) + JTJ = firedrake.interpolate(ufl.dot(ufl.transpose(J), J), P0_ten) + d, h1, h2 = (extract_array(p) for p in extract_components(JTJ)) + + # Is the element on the boundary? + p0test = firedrake.TestFunction(dwr.function_space()) + bnd = firedrake.assemble(p0test * ufl.ds).dat.data + + # Combine the features together + features = { + "estimator_coarse": extract_array(dwr), + "physics_drag": extract_array(config.parameters.drag(mesh)), + "physics_viscosity": extract_array(config.parameters.viscosity(mesh), project=True), + "physics_bathymetry": extract_array(config.parameters.bathymetry(mesh), project=True), + "mesh_d": d, + "mesh_h1": h1, + "mesh_h2": h2, + "mesh_bnd": bnd, + "forward_dofs": extract_array(fwd_sol, centroid=True), + "adjoint_dofs": extract_array(adj_sol, centroid=True), + } + for key, value in features.items(): + assert not np.isnan(value).any() + return features diff --git a/build/lib/nn_adapt/layout.py b/build/lib/nn_adapt/layout.py new file mode 100644 index 0000000..060502f --- /dev/null +++ b/build/lib/nn_adapt/layout.py @@ -0,0 +1,61 @@ +""" +Classes for defining the layout of a neural network. +""" + + +class NetLayoutBase(object): + """ + Base class for specifying the number + of inputs, hidden neurons and outputs + in a neural network. + + The derived class should give values + for each of these parameters. + """ + + # TODO: Allow more general networks + + colours = { + "estimator": "b", + "physics": "C0", + "mesh": "deepskyblue", + "forward": "mediumturquoise", + "adjoint": "mediumseagreen", + } + + def __init__(self): + if not hasattr(self, "inputs"): + raise ValueError("Need to set self.inputs") + colours = set(self.colours.keys()) + for i in self.inputs: + okay = False + for c in colours: + if i.startswith(c): + okay = True + break + if not okay: + raise ValueError("Input names must begin with one of {colours}") + if not hasattr(self, "num_hidden_neurons"): + raise ValueError("Need to set self.num_hidden_neurons") + if not hasattr(self, "dofs_per_element"): + raise ValueError("Need to set self.dofs_per_element") + + def count_inputs(self, prefix): + """ + Count all scalar inputs that start with a given `prefix`. + """ + cnt = 0 + for i in self.inputs: + if i.startswith(prefix): + if i in ("forward_dofs", "adjoint_dofs"): + cnt += self.dofs_per_element + else: + cnt += 1 + return cnt + + @property + def num_inputs(self): + """ + The total number of scalar inputs. + """ + return self.count_inputs("") diff --git a/build/lib/nn_adapt/metric.py b/build/lib/nn_adapt/metric.py new file mode 100644 index 0000000..22ed97e --- /dev/null +++ b/build/lib/nn_adapt/metric.py @@ -0,0 +1,107 @@ +""" +Functions for generating Riemannian metrics from solution +fields. +""" +from pyroteus import * +from nn_adapt.features import split_into_scalars +from nn_adapt.solving import * +from firedrake.meshadapt import RiemannianMetric +from time import perf_counter + + +def get_hessians(f, **kwargs): + """ + Compute Hessians for each component of + a :class:`Function`. + + Any keyword arguments are passed to + ``recover_hessian``. + + :arg f: the function + :return: list of Hessians of each + component + """ + kwargs.setdefault("method", "Clement") + return [ + space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") + for i, fi in split_into_scalars(f).items() + for fij in fi + ] + + +def go_metric( + mesh, + config, + enrichment_method="h", + target_complexity=4000.0, + average=True, + interpolant="Clement", + anisotropic=False, + retall=False, + convergence_checker=None, + **kwargs, +): + """ + Compute an anisotropic goal-oriented + metric field, based on a mesh and + a configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg target_complexity: target complexity + of the goal-oriented metric + :kwarg average: should the Hessian components + be combined using averaging (or intersection)? + :kwarg interpolant: which method to use to + interpolate into the target space? + :kwarg anisotropic: toggle isotropic vs. + anisotropic metric + :kwarg h_min: minimum magnitude + :kwarg h_max: maximum magnitude + :kwarg a_max: maximum anisotropy + :kwarg retall: if ``True``, the error indicator, + forward solution and adjoint solution + are returned, in addition to the metric + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + """ + h_min = kwargs.pop("h_min", 1.0e-30) + h_max = kwargs.pop("h_max", 1.0e+30) + a_max = kwargs.pop("a_max", 1.0e+30) + out = indicate_errors( + mesh, + config, + enrichment_method=enrichment_method, + retall=True, + convergence_checker=convergence_checker, + **kwargs, + ) + if retall and "adjoint" not in out: + return out + out["estimator"] = out["dwr"].vector().gather().sum() + if convergence_checker is not None: + if convergence_checker.check_estimator(out["estimator"]): + return out + + out["times"]["metric"] = -perf_counter() + with PETSc.Log.Event("Metric construction"): + if anisotropic: + hessian = combine_metrics(*get_hessians(out["forward"]), average=average) + else: + hessian = None + metric = anisotropic_metric( + out["dwr"], + hessian=hessian, + target_complexity=target_complexity, + target_space=TensorFunctionSpace(mesh, "CG", 1), + interpolant=interpolant, + ) + space_normalise(metric, target_complexity, "inf") + enforce_element_constraints(metric, h_min, h_max, a_max) + out["metric"] = RiemannianMetric(mesh) + out["metric"].assign(metric) + out["times"]["metric"] += perf_counter() + return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/model.py b/build/lib/nn_adapt/model.py new file mode 100644 index 0000000..f3587f9 --- /dev/null +++ b/build/lib/nn_adapt/model.py @@ -0,0 +1,43 @@ +import abc + + +class Parameters(abc.ABC): + """ + Abstract base class defining the API for parameter + classes that describe PDE models. + """ + + def __init__(self): + self.case = None + if not hasattr(self, "qoi_name"): + raise NotImplementedError("qoi_name attribute must be set") + if not hasattr(self, "qoi_unit"): + raise NotImplementedError("qoi_unit attribute must be set") + + @abc.abstractmethod + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + """ + pass + + @abc.abstractmethod + def drag(self, mesh): + """ + Compute the drag coefficient on the current `mesh`. + """ + pass + + @abc.abstractmethod + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + pass + + @abc.abstractmethod + def ic(self, mesh): + """ + Compute the initial condition on the current `mesh`. + """ + pass diff --git a/build/lib/nn_adapt/parse.py b/build/lib/nn_adapt/parse.py new file mode 100644 index 0000000..7565690 --- /dev/null +++ b/build/lib/nn_adapt/parse.py @@ -0,0 +1,142 @@ +import argparse +import git +import numpy as np + + +__all__ = ["Parser"] + + +def _check_in_range(value, typ, l, u): + tvalue = typ(value) + if not (tvalue >= l and tvalue <= u): + raise argparse.ArgumentTypeError(f"{value} is not in [{l}, {u}]") + return tvalue + + +def _check_strictly_in_range(value, typ, l, u): + tvalue = typ(value) + if not (tvalue >= l and tvalue <= u): + raise argparse.ArgumentTypeError(f"{value} is not in ({l}, {u})") + return tvalue + + +nonnegative_float = lambda value: _check_in_range(value, float, 0, np.inf) +nonnegative_int = lambda value: _check_in_range(value, int, 0, np.inf) +positive_float = lambda value: _check_strictly_in_range(value, float, 0, np.inf) +positive_int = lambda value: _check_strictly_in_range(value, int, 0, np.inf) + + +def bounded_float(l, u): + def chk(value): + return _check_in_range(value, float, l, u) + + return chk + + +def bounded_int(l, u): + def chk(value): + return _check_in_range(value, int, l, u) + + return chk + + +class Parser(argparse.ArgumentParser): + """ + Custom :class:`ArgumentParser` for `nn_adapt`. + """ + + def __init__(self, prog): + super().__init__( + self, prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + self.add_argument("model", help="The model", type=str) + self.add_argument("test_case", help="The configuration file number or name") + self.add_argument( + "--optimise", + help="Turn off plotting and debugging", + action="store_true", + ) + + def parse_convergence_criteria(self): + self.add_argument( + "--miniter", + help="Minimum number of iterations", + type=positive_int, + default=3, + ) + self.add_argument( + "--maxiter", + help="Maximum number of iterations", + type=positive_int, + default=35, + ) + self.add_argument( + "--qoi_rtol", + help="Relative tolerance for QoI", + type=positive_float, + default=0.001, + ) + self.add_argument( + "--element_rtol", + help="Element count tolerance", + type=positive_float, + default=0.001, + ) + self.add_argument( + "--estimator_rtol", + help="Error estimator tolerance", + type=positive_float, + default=0.001, + ) + + def parse_num_refinements(self, default=4): + self.add_argument( + "--num_refinements", + help="Number of mesh refinements", + type=positive_int, + default=default, + ) + + def parse_approach(self): + self.add_argument( + "-a", + "--approach", + help="Adaptive approach to consider", + choices=["isotropic", "anisotropic"], + default="anisotropic", + ) + self.add_argument( + "--transfer", + help="Transfer the solution from the previous mesh as initial guess", + action="store_true", + ) + + def parse_target_complexity(self): + self.add_argument( + "--base_complexity", + help="Base metric complexity", + type=positive_float, + default=200.0, + ) + self.add_argument( + "--target_complexity", + help="Target metric complexity", + type=positive_float, + default=4000.0, + ) + + def parse_preproc(self): + self.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", + ) + + def parse_tag(self): + self.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=git.Repo(search_parent_directories=True).head.object.hexsha, + ) diff --git a/build/lib/nn_adapt/plotting.py b/build/lib/nn_adapt/plotting.py new file mode 100644 index 0000000..3822c3a --- /dev/null +++ b/build/lib/nn_adapt/plotting.py @@ -0,0 +1,15 @@ +""" +Configuration for plotting. +""" +import matplotlib +import matplotlib.pyplot as plt # noqa + + +matplotlib.rc("text", usetex=True) +matplotlib.rcParams["mathtext.fontset"] = "custom" +matplotlib.rcParams["mathtext.rm"] = "Bitstream Vera Sans" +matplotlib.rcParams["mathtext.it"] = "Bitstream Vera Sans:italic" +matplotlib.rcParams["mathtext.bf"] = "Bitstream Vera Sans:bold" +matplotlib.rcParams["mathtext.fontset"] = "stix" +matplotlib.rcParams["font.family"] = "STIXGeneral" +matplotlib.rcParams["font.size"] = 12 diff --git a/build/lib/nn_adapt/solving.py b/build/lib/nn_adapt/solving.py new file mode 100644 index 0000000..1ba4ee9 --- /dev/null +++ b/build/lib/nn_adapt/solving.py @@ -0,0 +1,227 @@ +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_solutions( + mesh, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + # Solve forward problem in base space + V = config.get_function_space(mesh) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver(mesh, ic, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + J = config.get_qoi(mesh)(q) + qoi = assemble(J) + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + q_star = Function(V) + F = solver_obj.form + dFdq = derivative(F, q, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + out["adjoint"] = q_star + out["times"]["adjoint"] += perf_counter() + + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) + q_plus = solver_obj.solution + J = config.get_qoi(refined_mesh)(q_plus) + F = solver_obj.form + tm.prolong(q, q_plus) + q_star_plus = Function(V) + dFdq = derivative(F, q_plus, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q_plus, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) + out["enriched_adjoint"] = q_star_plus + out["times"]["estimator"] += perf_counter() + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + + # Prolong + V_plus = adj_sol_plus.function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr diff --git a/build/lib/nn_adapt/solving_time.py b/build/lib/nn_adapt/solving_time.py new file mode 100644 index 0000000..bc5bb82 --- /dev/null +++ b/build/lib/nn_adapt/solving_time.py @@ -0,0 +1,279 @@ +""" +Time dependent goal-oriented error estimation +""" +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_time_solutions( + meshes, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + Trying to work it out. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + tt_steps = config.parameters.tt_steps + + # Solve forward problem in base space + V = [config.get_function_space(meshes[step]) for step in range(tt_steps)] + out = {"times": {"forward": -perf_counter()}} + # with PETSc.Log.Event("Forward solve"): + # if init is None: + # ic = config.get_initial_condition(V) + # else: + # ic = init(V) + solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + qoi = [] + j_list = [] + for step in range(tt_steps): + J = config.get_qoi(meshes[step])(q[step]) + j_list.append(J) + qoi.append(assemble(J)) + # qoi.append(assemble(J)) + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + cirt = 0 + for step in range(tt_steps): + if not convergence_checker.check_qoi(qoi[step]): + cirt = 1 + if cirt == 1: + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + # with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + F = solver_obj.form + adj_solution = [] + + + # q_star = Function(V[tt_steps-1]) + # dFdq = derivative(F[tt_steps-1], q[tt_steps-1], TrialFunction(V[tt_steps-1])) + # dFdq_transpose = adjoint(dFdq) + # dJdq = derivative(j_list[tt_steps-1], q[tt_steps-1], TestFunction(V[tt_steps-1])) + # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + # adj_solution.append(q_star) + + # for step in range(tt_steps-2, -1, -1): + # print(step) + # q_star_next = Function(V[step]) + # q_star_next.project(q_star) + + # q_star = Function(V[step]) + + # dFdq = derivative(F[step], q_star_next, TrialFunction(V[step])) + # dFdq_transpose = adjoint(dFdq) + # dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) + # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + + # adj_solution.append(q_star) + + + for step in range(tt_steps-1, -1, -1): + + q_star = Function(V[step]) + + q_mask = Function(V[step]) + q_mask.project(q[tt_steps-1]) + + dFdq = derivative(F[step], q[step], TrialFunction(V[step])) + dFdq_transpose = adjoint(dFdq) + + J_cmesh = Function(V[step]) + J_cmesh.project(J) + + dJdq = derivative(J_cmesh, q[step], TestFunction(V[step])) + solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + + adj_solution.append(q_star) + + out["adjoint"] = adj_solution + out["times"]["adjoint"] += perf_counter() + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) + q_plus = solver_obj.solution + J = config.get_qoi(refined_mesh)(q_plus) + F = solver_obj.form + tm.prolong(q, q_plus) + q_star_plus = Function(V) + dFdq = derivative(F, q_plus, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q_plus, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) + out["enriched_adjoint"] = q_star_plus + out["times"]["estimator"] += perf_counter() + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + + # Prolong + V_plus = adj_sol_plus.function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr diff --git a/build/lib/nn_adapt/utility.py b/build/lib/nn_adapt/utility.py new file mode 100644 index 0000000..229de16 --- /dev/null +++ b/build/lib/nn_adapt/utility.py @@ -0,0 +1,66 @@ +__all__ = ["ConvergenceTracker"] + + +class ConvergenceTracker: + """ + Class for checking convergence of fixed point + iteration loops. + """ + + def __init__(self, mesh, parsed_args): + self.qoi_old = None + self.elements_old = mesh.num_cells() + self.estimator_old = None + self.converged_reason = None + self.qoi_rtol = parsed_args.qoi_rtol + self.element_rtol = parsed_args.element_rtol + self.estimator_rtol = parsed_args.estimator_rtol + self.fp_iteration = 0 + self.miniter = parsed_args.miniter + self.maxiter = parsed_args.maxiter + assert self.maxiter >= self.miniter + + def check_maxiter(self): + """ + Check for reaching maximum number of iterations. + """ + converged = False + if self.fp_iteration >= self.maxiter: + self.converged_reason = "reaching maximum iteration count" + converged = True + return converged + + def _chk(self, val, old, rtol, reason): + converged = False + if old is not None and self.fp_iteration >= self.miniter: + if abs(val - old) < rtol * abs(old): + self.converged_reason = reason + converged = True + return converged + + def check_qoi(self, val): + """ + Check for QoI convergence. + """ + r = "QoI convergence" + converged = self._chk(val, self.qoi_old, self.qoi_rtol, r) + self.qoi_old = val + return converged + + def check_estimator(self, val): + """ + Check for error estimator convergence. + """ + r = "error estimator convergence" + converged = self._chk(val, self.estimator_old, self.estimator_rtol, r) + self.estimator_old = val + return converged + + def check_elements(self, val): + """ + Check for mesh element count convergence. + """ + r = "element count convergence" + converged = self._chk(val, self.elements_old, self.element_rtol, r) + self.elements_old = val + return converged diff --git a/examples/a_test.py b/examples/a_test.py index 72d4827..fbb422e 100644 --- a/examples/a_test.py +++ b/examples/a_test.py @@ -25,7 +25,7 @@ for i in range(tt_steps): tricontourf(out['forward'][i], axes=axes[i][0]) tricontourf(out['adjoint'][i], axes=axes[i][1]) - + plt.savefig("test1.jpg") # mesh = UnitSquareMesh(30, 30) diff --git a/examples/models/burgers_try.py b/examples/models/burgers_try.py index 4cd9621..194739e 100644 --- a/examples/models/burgers_try.py +++ b/examples/models/burgers_try.py @@ -1,5 +1,8 @@ +from copy import deepcopy from firedrake import * from firedrake.petsc import PETSc +from firedrake_adjoint import * +from firedrake.adjoint import get_solve_blocks import nn_adapt.model import nn_adapt.solving @@ -66,7 +69,9 @@ def ic(self, mesh): """ x, y = SpatialCoordinate(mesh) expr = self.initial_speed * sin(pi * x) - return as_vector([expr, 0]) + yside = self.initial_speed * sin(pi * y) + yside = 0 + return as_vector([expr, yside]) PETSc.Sys.popErrorHandler() @@ -155,33 +160,18 @@ def __init__(self, meshes, ic, **kwargs): # Collect parameters self.tt_steps = parameters.tt_steps - dt = Constant(parameters.timestep) + self.dt = Constant(parameters.timestep) assert self.tt_steps == len(self.meshes) - nu = [parameters.viscosity(meshes[i]) for i in range(self.tt_steps)] - - # Define variational formulation - V = self.function_space - self.u = [Function(V[i]) for i in range(self.tt_steps)] - self.u_ = [Function(V[i]) for i in range(self.tt_steps)] - self.v = [TestFunction(V[i]) for i in range(self.tt_steps)] - self._form = [( - inner((self.u[i] - self.u_[i]) / dt, self.v[i]) * dx - + inner(dot(self.u[i], nabla_grad(self.u[i])), self.v[i]) * dx - + nu[i] * inner(grad(self.u[i]), grad(self.v[i])) * dx - ) for i in range(self.tt_steps)] - - self._solution = [] - - # Set initial condition - self.u_[0].project(parameters.ic(meshes[0])) + # Physical parameters + self.nu = Constant(parameters.viscosity_coefficient) @property def function_space(self): r""" The :math:`\mathbb P2` finite element space. """ - return [get_function_space(self.meshes[i]) for i in range(self.tt_steps)] + return get_function_space(self.meshes) @property def form(self): @@ -192,21 +182,64 @@ def form(self): @property def solution(self): - return self._solution + return self._solutions + + def adjoint_setup(self): + J_form = inner(self._u, self._u)*ds(2) + J = assemble(J_form) + + tape = get_working_tape() + g = compute_gradient(J, Control(self.nu)) + + solve_blocks = get_solve_blocks() + + # 'Initial condition' for both adjoint + dJdu = assemble(derivative(J_form, self._u)) + + return dJdu, solve_blocks def iterate(self, **kwargs): """ - Take a single timestep of Burgers equation + Get the final timestep of Burgers equation """ - solve(self._form[0] == 0, self.u[0]) - self._solution.append(self.u[0]) - - for step in range(1, self.tt_steps): - self.u_[step].project(self.u[step-1]) - - solve(self._form[step] == 0, self.u[step]) - self._solution.append(self.u[step]) + stop_annotating(); + + # Assign initial condition + V = get_function_space(self.meshes[0]) + ic = parameters.ic(self.meshes[0]) + u = Function(V) + u.project(ic) + + _solutions = [u.copy(deepcopy=True)] + + # solve forward + step = 0 + + for step in range(self.tt_steps): + # Define P2 function space and corresponding test function + V = get_function_space(self.meshes[step]) + v = TestFunction(V) + + # Create Functions for the solution and time-lagged solution + u_ = Function(V) + u_.project(u) + u = Function(V, name="Velocity") + + # Define nonlinear form + F = (inner((u - u_)/self.dt, v) + inner(dot(u, nabla_grad(u)), v) + self.nu*inner(grad(u), grad(v)))*dx + + solve(F == 0, u) + + # Store forward solution at exports so we can plot again later + _solutions.append(u.copy(deepcopy=True)) + + self._form = F + self._solutions = _solutions + self._u = u + + stop_annotating(); + def get_initial_condition(function_space): @@ -226,9 +259,11 @@ def get_qoi(mesh): It should have one argument - the prognostic solution. """ - + def qoi(sol): - return inner(sol, sol) * ds(2) + sol_temp = Function(mesh) + sol_temp.project(sol) + return inner(sol_temp, sol_temp) * ds(2) return qoi diff --git a/nn_adapt/solving.py b/nn_adapt/solving.py index 258e4c0..1ba4ee9 100644 --- a/nn_adapt/solving.py +++ b/nn_adapt/solving.py @@ -123,7 +123,7 @@ def get_solutions( solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) out["adjoint"] = q_star out["times"]["adjoint"] += perf_counter() - + if refined_mesh is None: return out diff --git a/nn_adapt/solving_time.py b/nn_adapt/solving_time.py index ffc4b13..da17454 100644 --- a/nn_adapt/solving_time.py +++ b/nn_adapt/solving_time.py @@ -8,6 +8,8 @@ from firedrake import * from firedrake.petsc import PETSc from firedrake.mg.embedded import TransferManager +from firedrake_adjoint import * +from firedrake.adjoint import get_solve_blocks from pyroteus.error_estimation import get_dwr_indicator import abc from time import perf_counter @@ -106,21 +108,14 @@ def get_time_solutions( solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) solver_obj.iterate() q = solver_obj.solution - qoi = [] - j_list = [] - for step in range(tt_steps): - J = config.get_qoi(meshes[step])(q[step]) - j_list.append(J) - qoi.append(assemble(J)) + J = config.get_qoi(V[tt_steps-1])(q[tt_steps-1]) + qoi = assemble(J) + out["times"]["forward"] += perf_counter() out["qoi"] = qoi out["forward"] = q if convergence_checker is not None: - cirt = 0 - for step in range(tt_steps): - if not convergence_checker.check_qoi(qoi[step]): - cirt = 1 - if cirt == 1: + if not convergence_checker.check_qoi(qoi): return out if not solve_adjoint: return out @@ -128,45 +123,27 @@ def get_time_solutions( # Solve adjoint problem in base space out["times"]["adjoint"] = -perf_counter() # with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - F = solver_obj.form + adj_solution = [] + # sp = config.parameters.adjoint_solver_parameters + # tape = get_working_tape() + # nu = Constant(config.parameters.viscosity_coefficient) + # g = compute_gradient(qoi, Control(nu)) + # solve_blocks = get_solve_blocks() + # # 'Initial condition' for both adjoint + # dJdu = assemble(derivative(J, q[-1])) - # q_star = Function(V[tt_steps-1]) - # dFdq = derivative(F[tt_steps-1], q[tt_steps-1], TrialFunction(V[tt_steps-1])) - # dFdq_transpose = adjoint(dFdq) - # dJdq = derivative(j_list[tt_steps-1], q[tt_steps-1], TestFunction(V[tt_steps-1])) - # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - # adj_solution.append(q_star) - - # for step in range(tt_steps-2, -1, -1): - # print(step) - # q_star_next = Function(V[step]) - # q_star_next.project(q_star) - - # q_star = Function(V[step]) - - # dFdq = derivative(F[step], q_star_next, TrialFunction(V[step])) - # dFdq_transpose = adjoint(dFdq) - # dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) - # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - - # adj_solution.append(q_star) + # adj_solution = [] + dJdu, solve_blocks = solver_obj.adjoint_setup() - - for step in range(tt_steps-1, -1, -1): - - q_star = Function(V[step]) - - dFdq = derivative(F[step], q[step], TrialFunction(V[step])) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) - solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - - adj_solution.append(q_star) + for step in range(tt_steps-1): + adjoint_solution = solve_blocks[step].adj_sol + adj_solution.append(adjoint_solution) + + adj_solution.append(dJdu) - out["adjoint"] = adj_solution.reverse() + out["adjoint"] = adj_solution out["times"]["adjoint"] += perf_counter() if refined_mesh is None: return out From 9c11ea402e85f5a95adbe1ed8bf56e53a8c7c57c Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Thu, 28 Jul 2022 19:41:44 +0100 Subject: [PATCH 04/13] multiple meshes for multiple steps --- build/lib/nn_adapt/__init__.py | 0 build/lib/nn_adapt/ann.py | 147 --------------- build/lib/nn_adapt/features.py | 250 -------------------------- build/lib/nn_adapt/layout.py | 61 ------- build/lib/nn_adapt/metric.py | 107 ----------- build/lib/nn_adapt/model.py | 43 ----- build/lib/nn_adapt/parse.py | 142 --------------- build/lib/nn_adapt/plotting.py | 15 -- build/lib/nn_adapt/solving.py | 227 ----------------------- build/lib/nn_adapt/solving_time.py | 279 ----------------------------- build/lib/nn_adapt/utility.py | 66 ------- examples/a_test.py | 25 ++- examples/models/burgers.py | 25 +-- examples/models/burgers_try.py | 21 +-- nn_adapt/solving_time.py | 123 +++++++------ 15 files changed, 92 insertions(+), 1439 deletions(-) delete mode 100644 build/lib/nn_adapt/__init__.py delete mode 100644 build/lib/nn_adapt/ann.py delete mode 100644 build/lib/nn_adapt/features.py delete mode 100644 build/lib/nn_adapt/layout.py delete mode 100644 build/lib/nn_adapt/metric.py delete mode 100644 build/lib/nn_adapt/model.py delete mode 100644 build/lib/nn_adapt/parse.py delete mode 100644 build/lib/nn_adapt/plotting.py delete mode 100644 build/lib/nn_adapt/solving.py delete mode 100644 build/lib/nn_adapt/solving_time.py delete mode 100644 build/lib/nn_adapt/utility.py diff --git a/build/lib/nn_adapt/__init__.py b/build/lib/nn_adapt/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/lib/nn_adapt/ann.py b/build/lib/nn_adapt/ann.py deleted file mode 100644 index 26d17ad..0000000 --- a/build/lib/nn_adapt/ann.py +++ /dev/null @@ -1,147 +0,0 @@ -""" -Classes and functions related to using neural networks. -""" -import random -import numpy as np -import torch -from torch import nn - - -# Set device -if torch.cuda.device_count() > 0 and torch.cuda.is_available(): - dev = torch.cuda.current_device() - print(f"Cuda installed. Running on GPU {dev}.") - device = torch.device(f"cuda:{dev}") - torch.backends.cudnn.benchmark = True - torch.backends.cudnn.enabled = True -else: - print("No GPU available.") - device = torch.device("cpu") - - -def set_seed(seed): - """ - Set all random seeds to a fixed value - - :arg seed: the seed value - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def sample_uniform(l, u): - """ - Sample from the continuous uniform - distribution :math:`U(l, u)`. - - :arg l: the lower bound - :arg u: the upper bound - """ - return l + (u - l) * np.random.rand() - - -class SingleLayerFCNN(nn.Module): - """ - Fully Connected Neural Network (FCNN) - for goal-oriented metric-based mesh - adaptation with a single hidden layer. - """ - - def __init__(self, layout, preproc="arctan"): - """ - :arg layout: class instance inherited from - :class:`NetLayoutBase`, with numbers of - inputs, hidden neurons and outputs - specified. - :kwarg preproc: pre-processing function to - apply to the input data - """ - super().__init__() - - # Define preprocessing function - if preproc == "none": - self.preproc1 = lambda x: x - if preproc == "arctan": - self.preproc1 = torch.arctan - elif preproc == "tanh": - self.preproc1 = torch.tanh - elif preproc == "logabs": - self.preproc1 = lambda x: torch.log(torch.abs(x)) - else: - raise ValueError(f'Preprocessor "{preproc}" not recognised.') - - # Define layers - self.linear1 = nn.Linear(layout.num_inputs, layout.num_hidden_neurons) - self.linear2 = nn.Linear(layout.num_hidden_neurons, 1) - - # Define activation functions - self.activate1 = nn.Sigmoid() - - def forward(self, x): - p = self.preproc1(x) - z1 = self.linear1(p) - a1 = self.activate1(z1) - z2 = self.linear2(a1) - return z2 - - -def propagate(data_loader, model, loss_fn, optimizer=None): - """ - Propagate data from a :class:`DataLoader` object - through the neural network. - - If ``optimizer`` is not ``None`` then training is - performed. Otherwise, validation is performed. - - :arg data_loader: PyTorch :class:`DataLoader` instance - :arg model: PyTorch :class:`Module` instance - :arg loss_fn: PyTorch loss function instance - :arg optimizer: PyTorch optimizer instance - """ - num_batches = len(data_loader) - cumulative_loss = 0 - - for x, y in data_loader: - - # Compute prediction and loss - prediction = model(x.to(device)) - loss = loss_fn(prediction, y.to(device)) - cumulative_loss += loss.item() - - # Backpropagation - if optimizer is not None: - optimizer.zero_grad() - loss.backward() - optimizer.step() - - return cumulative_loss / num_batches - - -def collect_features(feature_dict, layout): - """ - Given a dictionary of feature arrays, stack their - data appropriately to be fed into a neural network. - - :arg feature_dict: dictionary containing feature data - :arg layout: :class:`NetLayout` instance - """ - features = {key: val for key, val in feature_dict.items() if key in layout.inputs} - dofs = [feature for key, feature in features.items() if "dofs" in key] - nodofs = [feature for key, feature in features.items() if "dofs" not in key] - return np.hstack((np.vstack(nodofs).transpose(), np.hstack(dofs))) - - -def Loss(): - """ - Custom loss function. - - Needed when there is only one output value. - """ - - def mse(output, target): - target = target.reshape(*output.shape) - return torch.nn.MSELoss(reduction="sum")(output, target) - - return mse diff --git a/build/lib/nn_adapt/features.py b/build/lib/nn_adapt/features.py deleted file mode 100644 index f185f89..0000000 --- a/build/lib/nn_adapt/features.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Functions for extracting feature data from configuration -files, meshes and solution fields. -""" -import firedrake -from firedrake.petsc import PETSc -from firedrake import op2 -import numpy as np -from pyroteus.metric import * -import ufl -from nn_adapt.solving import dwr_indicator -from collections import Iterable - - -__all__ = ["extract_features", "get_values_at_elements"] - - -@PETSc.Log.EventDecorator("Extract components") -def extract_components(matrix): - r""" - Extract components of a matrix that describe its - size, orientation and shape. - - The latter two components are combined in such - a way that we avoid errors relating to arguments - zero and :math:`2\pi` being equal. - """ - density, quotients, evecs = density_and_quotients(matrix, reorder=True) - fs = density.function_space() - ar = firedrake.interpolate(ufl.sqrt(quotients[1]), fs) - armin = ar.vector().gather().min() - assert armin >= 1.0, f"An element has aspect ratio is less than one ({armin})" - theta = firedrake.interpolate(ufl.atan(evecs[1, 1] / evecs[1, 0]), fs) - h1 = firedrake.interpolate(ufl.cos(theta) ** 2 / ar + ufl.sin(theta) ** 2 * ar, fs) - h2 = firedrake.interpolate((1 / ar - ar) * ufl.sin(theta) * ufl.cos(theta), fs) - return density, h1, h2 - - -@PETSc.Log.EventDecorator("Extract elementwise") -def get_values_at_elements(f): - """ - Extract the values for all degrees of freedom associated - with each element. - - :arg f: some :class:`Function` - :return: a vector :class:`Function` holding all DoFs of `f` - """ - fs = f.function_space() - mesh = fs.mesh() - dim = mesh.topological_dimension() - if dim == 2: - assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" - elif dim == 3: - assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" - else: - raise ValueError(f"Dimension {dim} not supported") - el = fs.ufl_element() - if el.sub_elements() == []: - p = el.degree() - size = el.value_size() * (p + 1) * (p + 2) // 2 - else: - size = 0 - for sel in el.sub_elements(): - p = sel.degree() - size += sel.value_size() * (p + 1) * (p + 2) // 2 - P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size) - values = firedrake.Function(P0_vec) - kernel = "for (int i=0; i < vertexwise.dofs; i++) elementwise[i] += vertexwise[i];" - keys = {"vertexwise": (f, op2.READ), "elementwise": (values, op2.INC)} - firedrake.par_loop(kernel, ufl.dx, keys) - return values - - -@PETSc.Log.EventDecorator("Extract at centroids") -def get_values_at_centroids(f): - """ - Extract the values for the function at each element centroid, - along with all derivatives up to the :math:`p^{th}`, where - :math:`p` is the polynomial degree. - - :arg f: some :class:`Function` - :return: a vector :class:`Function` holding all DoFs of `f` - """ - fs = f.function_space() - mesh = fs.mesh() - dim = mesh.topological_dimension() - if dim == 2: - assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" - elif dim == 3: - assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" - else: - raise ValueError(f"Dimension {dim} not supported") - el = fs.ufl_element() - if el.sub_elements() == []: - p = el.degree() - degrees = [p] - size = el.value_size() * (p + 1) * (p + 2) // 2 - funcs = [f] - else: - size = 0 - degrees = [sel.degree() for sel in el.sub_elements()] - for sel, p in zip(el.sub_elements(), degrees): - size += sel.value_size() * (p + 1) * (p + 2) // 2 - funcs = f - values = firedrake.Function(firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size)) - P0 = firedrake.FunctionSpace(mesh, "DG", 0) - P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0) - P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) - i = 0 - for func, p in zip(funcs, degrees): - values.dat.data[:, i] = firedrake.project(func, P0).dat.data_ro - i += 1 - if p == 0: - continue - g = firedrake.project(ufl.grad(func), P0_vec) - values.dat.data[:, i] = g.dat.data_ro[:, 0] - values.dat.data[:, i + 1] = g.dat.data_ro[:, 1] - i += 2 - if p == 1: - continue - H = firedrake.project(ufl.grad(ufl.grad(func)), P0_ten) - values.dat.data[:, i] = H.dat.data_ro[:, 0, 0] - values.dat.data[:, i + 1] = 0.5 * ( - H.dat.data_ro[:, 0, 1] + H.dat.data_ro[:, 1, 0] - ) - values.dat.data[:, i + 2] = H.dat.data_ro[:, 1, 1] - i += 3 - if p > 2: - raise NotImplementedError( - "Polynomial degrees greater than 2 not yet considered" - ) - return values - - -def split_into_scalars(f): - """ - Given a :class:`Function`, split it into - components from its constituent scalar - spaces. - - If it is not mixed then no splitting is - required. - - :arg f: the mixed :class:`Function` - :return: a dictionary containing the - nested structure of the mixed function - """ - V = f.function_space() - if V.value_size > 1: - if not isinstance(V.node_count, Iterable): - assert len(V.shape) == 1, "Tensor spaces not supported" - el = V.ufl_element() - fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) - return {0: [firedrake.interpolate(f[i], fs) for i in range(V.shape[0])]} - subspaces = [V.sub(i) for i in range(len(V.node_count))] - ret = {} - for i, (Vi, fi) in enumerate(zip(subspaces, f.split())): - if len(Vi.shape) == 0: - ret[i] = [fi] - else: - assert len(Vi.shape) == 1, "Tensor spaces not supported" - el = Vi.ufl_element() - fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) - ret[i] = [firedrake.interpolate(fi[j], fs) for j in range(Vi.shape[0])] - return ret - else: - return {0: [f]} - - -def extract_array(f, mesh=None, centroid=False, project=False): - r""" - Extract a cell-wise data array from a :class:`Constant` or - :class:`Function`. - - For constants and scalar fields, this will be an :math:`n\times 1` - array, where :math:`n` is the number of mesh elements. For a mixed - field with :math:`m` components, it will be :math:`n\times m`. - - :arg f: the :class:`Constant` or :class:`Function` - :kwarg mesh: the underlying :class:`MeshGeometry` - :kwarg project: if ``True``, project the field into - :math:`\mathbb P0` space - """ - mesh = mesh or f.ufl_domain() - if isinstance(f, firedrake.Constant): - ones = np.ones(mesh.num_cells()) - assert len(f.values()) == 1 - return f.values()[0] * ones - elif not isinstance(f, firedrake.Function): - raise ValueError(f"Unexpected input type {type(f)}") - if project: - if len(f.function_space().shape) > 0: - raise NotImplementedError("Can currently only project scalar fields") # TODO - element = f.ufl_element() - if (element.family(), element.degree()) != ("Discontinuous Lagrange", 0): - P0 = firedrake.FunctionSpace(mesh, "DG", 0) - f = firedrake.project(f, P0) - s = sum([fi for i, fi in split_into_scalars(f).items()], start=[]) - get = get_values_at_centroids if centroid else get_values_at_elements - if len(s) == 1: - return get(s[0]).dat.data - else: - return np.hstack([get(si).dat.data for si in s]) - - -@PETSc.Log.EventDecorator("Extract features") -def extract_features(config, fwd_sol, adj_sol): - """ - Extract features from the outputs of a run. - - :arg config: the configuration file - :arg fwd_sol: the forward solution - :arg adj_sol: the adjoint solution - :return: a list of feature arrays - """ - mesh = fwd_sol.function_space().mesh() - - # Coarse-grained DWR estimator - with PETSc.Log.Event("Extract estimator"): - dwr = dwr_indicator(config, mesh, fwd_sol, adj_sol) - - # Features describing the mesh element - with PETSc.Log.Event("Analyse element"): - P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) - - # Element size, orientation and shape - J = ufl.Jacobian(mesh) - JTJ = firedrake.interpolate(ufl.dot(ufl.transpose(J), J), P0_ten) - d, h1, h2 = (extract_array(p) for p in extract_components(JTJ)) - - # Is the element on the boundary? - p0test = firedrake.TestFunction(dwr.function_space()) - bnd = firedrake.assemble(p0test * ufl.ds).dat.data - - # Combine the features together - features = { - "estimator_coarse": extract_array(dwr), - "physics_drag": extract_array(config.parameters.drag(mesh)), - "physics_viscosity": extract_array(config.parameters.viscosity(mesh), project=True), - "physics_bathymetry": extract_array(config.parameters.bathymetry(mesh), project=True), - "mesh_d": d, - "mesh_h1": h1, - "mesh_h2": h2, - "mesh_bnd": bnd, - "forward_dofs": extract_array(fwd_sol, centroid=True), - "adjoint_dofs": extract_array(adj_sol, centroid=True), - } - for key, value in features.items(): - assert not np.isnan(value).any() - return features diff --git a/build/lib/nn_adapt/layout.py b/build/lib/nn_adapt/layout.py deleted file mode 100644 index 060502f..0000000 --- a/build/lib/nn_adapt/layout.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Classes for defining the layout of a neural network. -""" - - -class NetLayoutBase(object): - """ - Base class for specifying the number - of inputs, hidden neurons and outputs - in a neural network. - - The derived class should give values - for each of these parameters. - """ - - # TODO: Allow more general networks - - colours = { - "estimator": "b", - "physics": "C0", - "mesh": "deepskyblue", - "forward": "mediumturquoise", - "adjoint": "mediumseagreen", - } - - def __init__(self): - if not hasattr(self, "inputs"): - raise ValueError("Need to set self.inputs") - colours = set(self.colours.keys()) - for i in self.inputs: - okay = False - for c in colours: - if i.startswith(c): - okay = True - break - if not okay: - raise ValueError("Input names must begin with one of {colours}") - if not hasattr(self, "num_hidden_neurons"): - raise ValueError("Need to set self.num_hidden_neurons") - if not hasattr(self, "dofs_per_element"): - raise ValueError("Need to set self.dofs_per_element") - - def count_inputs(self, prefix): - """ - Count all scalar inputs that start with a given `prefix`. - """ - cnt = 0 - for i in self.inputs: - if i.startswith(prefix): - if i in ("forward_dofs", "adjoint_dofs"): - cnt += self.dofs_per_element - else: - cnt += 1 - return cnt - - @property - def num_inputs(self): - """ - The total number of scalar inputs. - """ - return self.count_inputs("") diff --git a/build/lib/nn_adapt/metric.py b/build/lib/nn_adapt/metric.py deleted file mode 100644 index 22ed97e..0000000 --- a/build/lib/nn_adapt/metric.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Functions for generating Riemannian metrics from solution -fields. -""" -from pyroteus import * -from nn_adapt.features import split_into_scalars -from nn_adapt.solving import * -from firedrake.meshadapt import RiemannianMetric -from time import perf_counter - - -def get_hessians(f, **kwargs): - """ - Compute Hessians for each component of - a :class:`Function`. - - Any keyword arguments are passed to - ``recover_hessian``. - - :arg f: the function - :return: list of Hessians of each - component - """ - kwargs.setdefault("method", "Clement") - return [ - space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") - for i, fi in split_into_scalars(f).items() - for fij in fi - ] - - -def go_metric( - mesh, - config, - enrichment_method="h", - target_complexity=4000.0, - average=True, - interpolant="Clement", - anisotropic=False, - retall=False, - convergence_checker=None, - **kwargs, -): - """ - Compute an anisotropic goal-oriented - metric field, based on a mesh and - a configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg target_complexity: target complexity - of the goal-oriented metric - :kwarg average: should the Hessian components - be combined using averaging (or intersection)? - :kwarg interpolant: which method to use to - interpolate into the target space? - :kwarg anisotropic: toggle isotropic vs. - anisotropic metric - :kwarg h_min: minimum magnitude - :kwarg h_max: maximum magnitude - :kwarg a_max: maximum anisotropy - :kwarg retall: if ``True``, the error indicator, - forward solution and adjoint solution - are returned, in addition to the metric - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - """ - h_min = kwargs.pop("h_min", 1.0e-30) - h_max = kwargs.pop("h_max", 1.0e+30) - a_max = kwargs.pop("a_max", 1.0e+30) - out = indicate_errors( - mesh, - config, - enrichment_method=enrichment_method, - retall=True, - convergence_checker=convergence_checker, - **kwargs, - ) - if retall and "adjoint" not in out: - return out - out["estimator"] = out["dwr"].vector().gather().sum() - if convergence_checker is not None: - if convergence_checker.check_estimator(out["estimator"]): - return out - - out["times"]["metric"] = -perf_counter() - with PETSc.Log.Event("Metric construction"): - if anisotropic: - hessian = combine_metrics(*get_hessians(out["forward"]), average=average) - else: - hessian = None - metric = anisotropic_metric( - out["dwr"], - hessian=hessian, - target_complexity=target_complexity, - target_space=TensorFunctionSpace(mesh, "CG", 1), - interpolant=interpolant, - ) - space_normalise(metric, target_complexity, "inf") - enforce_element_constraints(metric, h_min, h_max, a_max) - out["metric"] = RiemannianMetric(mesh) - out["metric"].assign(metric) - out["times"]["metric"] += perf_counter() - return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/model.py b/build/lib/nn_adapt/model.py deleted file mode 100644 index f3587f9..0000000 --- a/build/lib/nn_adapt/model.py +++ /dev/null @@ -1,43 +0,0 @@ -import abc - - -class Parameters(abc.ABC): - """ - Abstract base class defining the API for parameter - classes that describe PDE models. - """ - - def __init__(self): - self.case = None - if not hasattr(self, "qoi_name"): - raise NotImplementedError("qoi_name attribute must be set") - if not hasattr(self, "qoi_unit"): - raise NotImplementedError("qoi_unit attribute must be set") - - @abc.abstractmethod - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - """ - pass - - @abc.abstractmethod - def drag(self, mesh): - """ - Compute the drag coefficient on the current `mesh`. - """ - pass - - @abc.abstractmethod - def viscosity(self, mesh): - """ - Compute the viscosity coefficient on the current `mesh`. - """ - pass - - @abc.abstractmethod - def ic(self, mesh): - """ - Compute the initial condition on the current `mesh`. - """ - pass diff --git a/build/lib/nn_adapt/parse.py b/build/lib/nn_adapt/parse.py deleted file mode 100644 index 7565690..0000000 --- a/build/lib/nn_adapt/parse.py +++ /dev/null @@ -1,142 +0,0 @@ -import argparse -import git -import numpy as np - - -__all__ = ["Parser"] - - -def _check_in_range(value, typ, l, u): - tvalue = typ(value) - if not (tvalue >= l and tvalue <= u): - raise argparse.ArgumentTypeError(f"{value} is not in [{l}, {u}]") - return tvalue - - -def _check_strictly_in_range(value, typ, l, u): - tvalue = typ(value) - if not (tvalue >= l and tvalue <= u): - raise argparse.ArgumentTypeError(f"{value} is not in ({l}, {u})") - return tvalue - - -nonnegative_float = lambda value: _check_in_range(value, float, 0, np.inf) -nonnegative_int = lambda value: _check_in_range(value, int, 0, np.inf) -positive_float = lambda value: _check_strictly_in_range(value, float, 0, np.inf) -positive_int = lambda value: _check_strictly_in_range(value, int, 0, np.inf) - - -def bounded_float(l, u): - def chk(value): - return _check_in_range(value, float, l, u) - - return chk - - -def bounded_int(l, u): - def chk(value): - return _check_in_range(value, int, l, u) - - return chk - - -class Parser(argparse.ArgumentParser): - """ - Custom :class:`ArgumentParser` for `nn_adapt`. - """ - - def __init__(self, prog): - super().__init__( - self, prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - self.add_argument("model", help="The model", type=str) - self.add_argument("test_case", help="The configuration file number or name") - self.add_argument( - "--optimise", - help="Turn off plotting and debugging", - action="store_true", - ) - - def parse_convergence_criteria(self): - self.add_argument( - "--miniter", - help="Minimum number of iterations", - type=positive_int, - default=3, - ) - self.add_argument( - "--maxiter", - help="Maximum number of iterations", - type=positive_int, - default=35, - ) - self.add_argument( - "--qoi_rtol", - help="Relative tolerance for QoI", - type=positive_float, - default=0.001, - ) - self.add_argument( - "--element_rtol", - help="Element count tolerance", - type=positive_float, - default=0.001, - ) - self.add_argument( - "--estimator_rtol", - help="Error estimator tolerance", - type=positive_float, - default=0.001, - ) - - def parse_num_refinements(self, default=4): - self.add_argument( - "--num_refinements", - help="Number of mesh refinements", - type=positive_int, - default=default, - ) - - def parse_approach(self): - self.add_argument( - "-a", - "--approach", - help="Adaptive approach to consider", - choices=["isotropic", "anisotropic"], - default="anisotropic", - ) - self.add_argument( - "--transfer", - help="Transfer the solution from the previous mesh as initial guess", - action="store_true", - ) - - def parse_target_complexity(self): - self.add_argument( - "--base_complexity", - help="Base metric complexity", - type=positive_float, - default=200.0, - ) - self.add_argument( - "--target_complexity", - help="Target metric complexity", - type=positive_float, - default=4000.0, - ) - - def parse_preproc(self): - self.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", - ) - - def parse_tag(self): - self.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=git.Repo(search_parent_directories=True).head.object.hexsha, - ) diff --git a/build/lib/nn_adapt/plotting.py b/build/lib/nn_adapt/plotting.py deleted file mode 100644 index 3822c3a..0000000 --- a/build/lib/nn_adapt/plotting.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Configuration for plotting. -""" -import matplotlib -import matplotlib.pyplot as plt # noqa - - -matplotlib.rc("text", usetex=True) -matplotlib.rcParams["mathtext.fontset"] = "custom" -matplotlib.rcParams["mathtext.rm"] = "Bitstream Vera Sans" -matplotlib.rcParams["mathtext.it"] = "Bitstream Vera Sans:italic" -matplotlib.rcParams["mathtext.bf"] = "Bitstream Vera Sans:bold" -matplotlib.rcParams["mathtext.fontset"] = "stix" -matplotlib.rcParams["font.family"] = "STIXGeneral" -matplotlib.rcParams["font.size"] = 12 diff --git a/build/lib/nn_adapt/solving.py b/build/lib/nn_adapt/solving.py deleted file mode 100644 index 1ba4ee9..0000000 --- a/build/lib/nn_adapt/solving.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Functions for solving problems defined by configuration -files and performing goal-oriented error estimation. -""" -from firedrake import * -from firedrake.petsc import PETSc -from firedrake.mg.embedded import TransferManager -from pyroteus.error_estimation import get_dwr_indicator -import abc -from time import perf_counter - - -tm = TransferManager() - - -class Solver(abc.ABC): - """ - Base class that defines the API for solver objects. - """ - - @abc.abstractmethod - def __init__(self, mesh, ic, **kwargs): - """ - Setup the solver. - - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - pass - - @property - @abc.abstractmethod - def function_space(self): - """ - The function space that the PDE is solved in. - """ - pass - - @property - @abc.abstractmethod - def form(self): - """ - Return the weak form. - """ - pass - - @abc.abstractmethod - def iterate(self, **kwargs): - """ - Solve the PDE. - """ - pass - - @property - @abc.abstractmethod - def solution(self): - """ - Return the solution field. - """ - pass - - -def get_solutions( - mesh, - config, - solve_adjoint=True, - refined_mesh=None, - init=None, - convergence_checker=None, - **kwargs, -): - """ - Solve forward and adjoint equations on a - given mesh. - - This works only for steady-state problems. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg solve_adjoint: should we solve the - adjoint problem? - :kwarg refined_mesh: refined mesh to compute - enriched adjoint solution on - :kwarg init: custom initial condition function - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - :return: forward solution, adjoint solution - and enriched adjoint solution (if requested) - """ - - # Solve forward problem in base space - V = config.get_function_space(mesh) - out = {"times": {"forward": -perf_counter()}} - with PETSc.Log.Event("Forward solve"): - if init is None: - ic = config.get_initial_condition(V) - else: - ic = init(V) - solver_obj = config.Solver(mesh, ic, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - J = config.get_qoi(mesh)(q) - qoi = assemble(J) - out["times"]["forward"] += perf_counter() - out["qoi"] = qoi - out["forward"] = q - if convergence_checker is not None: - if convergence_checker.check_qoi(qoi): - return out - if not solve_adjoint: - return out - - # Solve adjoint problem in base space - out["times"]["adjoint"] = -perf_counter() - with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - q_star = Function(V) - F = solver_obj.form - dFdq = derivative(F, q, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - out["adjoint"] = q_star - out["times"]["adjoint"] += perf_counter() - - if refined_mesh is None: - return out - - # Solve adjoint problem in enriched space - out["times"]["estimator"] = -perf_counter() - with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_mesh) - q_plus = Function(V) - solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) - q_plus = solver_obj.solution - J = config.get_qoi(refined_mesh)(q_plus) - F = solver_obj.form - tm.prolong(q, q_plus) - q_star_plus = Function(V) - dFdq = derivative(F, q_plus, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q_plus, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) - out["enriched_adjoint"] = q_star_plus - out["times"]["estimator"] += perf_counter() - return out - - -def split_into_components(f): - r""" - Extend the :attr:`split` method to apply - to non-mixed :class:`Function`\s. - """ - return [f] if f.function_space().value_size == 1 else f.split() - - -def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): - """ - Indicate errors according to ``dwr_indicator``, - using the solver given in the configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg retall: if ``True``, return the forward - solution and adjoint solution in addition - to the dual-weighted residual error indicator - """ - if not enrichment_method == "h": - raise NotImplementedError # TODO - with PETSc.Log.Event("Enrichment"): - mesh, ref_mesh = MeshHierarchy(mesh, 1) - - # Solve the forward and adjoint problems - out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) - if retall and "adjoint" not in out: - return out - - out["times"]["estimator"] -= perf_counter() - with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - - # Prolong - V_plus = adj_sol_plus.function_space() - fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"], fwd_sol_plg) - adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"], adj_sol_plg) - - # Subtract prolonged adjoint solution from enriched version - adj_error = Function(V_plus) - adj_sols_plus = split_into_components(adj_sol_plus) - adj_sols_plg = split_into_components(adj_sol_plg) - for i, err in enumerate(split_into_components(adj_error)): - err += adj_sols_plus[i] - adj_sols_plg[i] - - # Evaluate errors - out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) - out["times"]["estimator"] += perf_counter() - - return out if retall else out["dwr"] - - -def dwr_indicator(config, mesh, q, q_star): - r""" - Evaluate the DWR error indicator as a :math:`\mathbb P0` field. - - :arg mesh: the current mesh - :arg q: the forward solution, transferred into enriched space - :arg q_star: the adjoint solution in enriched space - """ - mesh_plus = q.function_space().mesh() - - # Extract indicator in enriched space - solver_obj = config.Solver(mesh_plus, q) - F = solver_obj.form - V = solver_obj.function_space - dwr_plus = get_dwr_indicator(F, q_star, test_space=V) - - # Project down to base space - P0 = FunctionSpace(mesh, "DG", 0) - dwr = project(dwr_plus, P0) - dwr.interpolate(abs(dwr)) - return dwr diff --git a/build/lib/nn_adapt/solving_time.py b/build/lib/nn_adapt/solving_time.py deleted file mode 100644 index bc5bb82..0000000 --- a/build/lib/nn_adapt/solving_time.py +++ /dev/null @@ -1,279 +0,0 @@ -""" -Time dependent goal-oriented error estimation -""" -""" -Functions for solving problems defined by configuration -files and performing goal-oriented error estimation. -""" -from firedrake import * -from firedrake.petsc import PETSc -from firedrake.mg.embedded import TransferManager -from pyroteus.error_estimation import get_dwr_indicator -import abc -from time import perf_counter - - -tm = TransferManager() - - -class Solver(abc.ABC): - """ - Base class that defines the API for solver objects. - """ - - @abc.abstractmethod - def __init__(self, mesh, ic, **kwargs): - """ - Setup the solver. - - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - pass - - @property - @abc.abstractmethod - def function_space(self): - """ - The function space that the PDE is solved in. - """ - pass - - @property - @abc.abstractmethod - def form(self): - """ - Return the weak form. - """ - pass - - @abc.abstractmethod - def iterate(self, **kwargs): - """ - Solve the PDE. - """ - pass - - @property - @abc.abstractmethod - def solution(self): - """ - Return the solution field. - """ - pass - - -def get_time_solutions( - meshes, - config, - solve_adjoint=True, - refined_mesh=None, - init=None, - convergence_checker=None, - **kwargs, -): - """ - Solve forward and adjoint equations on a - given mesh. - - This works only for steady-state problems. - Trying to work it out. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg solve_adjoint: should we solve the - adjoint problem? - :kwarg refined_mesh: refined mesh to compute - enriched adjoint solution on - :kwarg init: custom initial condition function - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - :return: forward solution, adjoint solution - and enriched adjoint solution (if requested) - """ - - tt_steps = config.parameters.tt_steps - - # Solve forward problem in base space - V = [config.get_function_space(meshes[step]) for step in range(tt_steps)] - out = {"times": {"forward": -perf_counter()}} - # with PETSc.Log.Event("Forward solve"): - # if init is None: - # ic = config.get_initial_condition(V) - # else: - # ic = init(V) - solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - qoi = [] - j_list = [] - for step in range(tt_steps): - J = config.get_qoi(meshes[step])(q[step]) - j_list.append(J) - qoi.append(assemble(J)) - # qoi.append(assemble(J)) - out["times"]["forward"] += perf_counter() - out["qoi"] = qoi - out["forward"] = q - if convergence_checker is not None: - cirt = 0 - for step in range(tt_steps): - if not convergence_checker.check_qoi(qoi[step]): - cirt = 1 - if cirt == 1: - return out - if not solve_adjoint: - return out - - # Solve adjoint problem in base space - out["times"]["adjoint"] = -perf_counter() - # with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - F = solver_obj.form - adj_solution = [] - - - # q_star = Function(V[tt_steps-1]) - # dFdq = derivative(F[tt_steps-1], q[tt_steps-1], TrialFunction(V[tt_steps-1])) - # dFdq_transpose = adjoint(dFdq) - # dJdq = derivative(j_list[tt_steps-1], q[tt_steps-1], TestFunction(V[tt_steps-1])) - # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - # adj_solution.append(q_star) - - # for step in range(tt_steps-2, -1, -1): - # print(step) - # q_star_next = Function(V[step]) - # q_star_next.project(q_star) - - # q_star = Function(V[step]) - - # dFdq = derivative(F[step], q_star_next, TrialFunction(V[step])) - # dFdq_transpose = adjoint(dFdq) - # dJdq = derivative(j_list[step], q[step], TestFunction(V[step])) - # solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - - # adj_solution.append(q_star) - - - for step in range(tt_steps-1, -1, -1): - - q_star = Function(V[step]) - - q_mask = Function(V[step]) - q_mask.project(q[tt_steps-1]) - - dFdq = derivative(F[step], q[step], TrialFunction(V[step])) - dFdq_transpose = adjoint(dFdq) - - J_cmesh = Function(V[step]) - J_cmesh.project(J) - - dJdq = derivative(J_cmesh, q[step], TestFunction(V[step])) - solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - - adj_solution.append(q_star) - - out["adjoint"] = adj_solution - out["times"]["adjoint"] += perf_counter() - if refined_mesh is None: - return out - - # Solve adjoint problem in enriched space - out["times"]["estimator"] = -perf_counter() - with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_mesh) - q_plus = Function(V) - solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) - q_plus = solver_obj.solution - J = config.get_qoi(refined_mesh)(q_plus) - F = solver_obj.form - tm.prolong(q, q_plus) - q_star_plus = Function(V) - dFdq = derivative(F, q_plus, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q_plus, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) - out["enriched_adjoint"] = q_star_plus - out["times"]["estimator"] += perf_counter() - return out - - -def split_into_components(f): - r""" - Extend the :attr:`split` method to apply - to non-mixed :class:`Function`\s. - """ - return [f] if f.function_space().value_size == 1 else f.split() - - -def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): - """ - Indicate errors according to ``dwr_indicator``, - using the solver given in the configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg retall: if ``True``, return the forward - solution and adjoint solution in addition - to the dual-weighted residual error indicator - """ - if not enrichment_method == "h": - raise NotImplementedError # TODO - with PETSc.Log.Event("Enrichment"): - mesh, ref_mesh = MeshHierarchy(mesh, 1) - - # Solve the forward and adjoint problems - out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) - if retall and "adjoint" not in out: - return out - - out["times"]["estimator"] -= perf_counter() - with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - - # Prolong - V_plus = adj_sol_plus.function_space() - fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"], fwd_sol_plg) - adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"], adj_sol_plg) - - # Subtract prolonged adjoint solution from enriched version - adj_error = Function(V_plus) - adj_sols_plus = split_into_components(adj_sol_plus) - adj_sols_plg = split_into_components(adj_sol_plg) - for i, err in enumerate(split_into_components(adj_error)): - err += adj_sols_plus[i] - adj_sols_plg[i] - - # Evaluate errors - out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) - out["times"]["estimator"] += perf_counter() - - return out if retall else out["dwr"] - - -def dwr_indicator(config, mesh, q, q_star): - r""" - Evaluate the DWR error indicator as a :math:`\mathbb P0` field. - - :arg mesh: the current mesh - :arg q: the forward solution, transferred into enriched space - :arg q_star: the adjoint solution in enriched space - """ - mesh_plus = q.function_space().mesh() - - # Extract indicator in enriched space - solver_obj = config.Solver(mesh_plus, q) - F = solver_obj.form - V = solver_obj.function_space - dwr_plus = get_dwr_indicator(F, q_star, test_space=V) - - # Project down to base space - P0 = FunctionSpace(mesh, "DG", 0) - dwr = project(dwr_plus, P0) - dwr.interpolate(abs(dwr)) - return dwr diff --git a/build/lib/nn_adapt/utility.py b/build/lib/nn_adapt/utility.py deleted file mode 100644 index 229de16..0000000 --- a/build/lib/nn_adapt/utility.py +++ /dev/null @@ -1,66 +0,0 @@ -__all__ = ["ConvergenceTracker"] - - -class ConvergenceTracker: - """ - Class for checking convergence of fixed point - iteration loops. - """ - - def __init__(self, mesh, parsed_args): - self.qoi_old = None - self.elements_old = mesh.num_cells() - self.estimator_old = None - self.converged_reason = None - self.qoi_rtol = parsed_args.qoi_rtol - self.element_rtol = parsed_args.element_rtol - self.estimator_rtol = parsed_args.estimator_rtol - self.fp_iteration = 0 - self.miniter = parsed_args.miniter - self.maxiter = parsed_args.maxiter - assert self.maxiter >= self.miniter - - def check_maxiter(self): - """ - Check for reaching maximum number of iterations. - """ - converged = False - if self.fp_iteration >= self.maxiter: - self.converged_reason = "reaching maximum iteration count" - converged = True - return converged - - def _chk(self, val, old, rtol, reason): - converged = False - if old is not None and self.fp_iteration >= self.miniter: - if abs(val - old) < rtol * abs(old): - self.converged_reason = reason - converged = True - return converged - - def check_qoi(self, val): - """ - Check for QoI convergence. - """ - r = "QoI convergence" - converged = self._chk(val, self.qoi_old, self.qoi_rtol, r) - self.qoi_old = val - return converged - - def check_estimator(self, val): - """ - Check for error estimator convergence. - """ - r = "error estimator convergence" - converged = self._chk(val, self.estimator_old, self.estimator_rtol, r) - self.estimator_old = val - return converged - - def check_elements(self, val): - """ - Check for mesh element count convergence. - """ - r = "element count convergence" - converged = self._chk(val, self.elements_old, self.element_rtol, r) - self.elements_old = val - return converged diff --git a/examples/a_test.py b/examples/a_test.py index fbb422e..7e40379 100644 --- a/examples/a_test.py +++ b/examples/a_test.py @@ -16,17 +16,28 @@ tt_steps = 10 setup1 = importlib.import_module(f"burgers_try.config") -meshes = [UnitSquareMesh(30, 30) for _ in range(tt_steps)] +meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] # meshes[5] = UnitSquareMesh(28, 30) -out = get_time_solutions(meshes=meshes, config=setup1) -fig, axes = plt.subplots(10,2) +# mesh, ref_mesh = MeshHierarchy(meshes[1], 1) +# print(mesh) +# print(ref_mesh) +# fig, axes = plt.subplots(2) +# triplot(mesh, axes=axes[0]) +# triplot(ref_mesh, axes=axes[1]) +# plt.show() + +out = indicate_time_errors(meshes=meshes, config=setup1) +print(out) + +# fig, axes = plt.subplots(10,2) + +# for i in range(tt_steps): +# tricontourf(out['forward'][i], axes=axes[i][0]) +# tricontourf(out['adjoint'][i], axes=axes[i][1]) -for i in range(tt_steps): - tricontourf(out['forward'][i], axes=axes[i][0]) - tricontourf(out['adjoint'][i], axes=axes[i][1]) +# plt.savefig("test1.jpg") -plt.savefig("test1.jpg") # mesh = UnitSquareMesh(30, 30) # setup2 = importlib.import_module(f"burgers.config") diff --git a/examples/models/burgers.py b/examples/models/burgers.py index 1c70683..09f988d 100644 --- a/examples/models/burgers.py +++ b/examples/models/burgers.py @@ -3,6 +3,9 @@ import nn_adapt.model import nn_adapt.solving +from firedrake_adjoint import * +from firedrake.adjoint import get_solve_blocks + class Parameters(nn_adapt.model.Parameters): """ @@ -163,25 +166,3 @@ def qoi(sol): # Initial mesh for all test cases initial_mesh = UnitSquareMesh(30, 30) - -# # A simple pretest -# a = Solver(mesh = initial_mesh, ic = 0, kwargs='0') -# b = [] -# a.iterate() -# b.append(a.solution) -# a.iterate() -# b.append(a.solution) -# a.iterate() -# b.append(a.solution) - -# import matplotlib.pyplot as plt - -# # fig, axes = plt.subplots() -# # tricontourf(b, axes=axes) - -# fig, axes = plt.subplots(3) -# tricontourf(b[0], axes=axes[0]) -# tricontourf(b[1], axes=axes[1]) -# tricontourf(b[2], axes=axes[2]) - -# plt.show() diff --git a/examples/models/burgers_try.py b/examples/models/burgers_try.py index 194739e..cd96ea8 100644 --- a/examples/models/burgers_try.py +++ b/examples/models/burgers_try.py @@ -188,7 +188,6 @@ def adjoint_setup(self): J_form = inner(self._u, self._u)*ds(2) J = assemble(J_form) - tape = get_working_tape() g = compute_gradient(J, Control(self.nu)) solve_blocks = get_solve_blocks() @@ -202,7 +201,6 @@ def iterate(self, **kwargs): """ Get the final timestep of Burgers equation """ - stop_annotating(); # Assign initial condition V = get_function_space(self.meshes[0]) @@ -210,7 +208,10 @@ def iterate(self, **kwargs): u = Function(V) u.project(ic) - _solutions = [u.copy(deepcopy=True)] + _solutions = [] + + tape = get_working_tape() + tape.clear_tape() # solve forward step = 0 @@ -241,7 +242,6 @@ def iterate(self, **kwargs): stop_annotating(); - def get_initial_condition(function_space): """ Compute an initial condition based on the initial @@ -269,21 +269,12 @@ def qoi(sol): # # Initial mesh for all test cases -# initial_mesh = [UnitSquareMesh(30, 30), UnitSquareMesh(50, 30)] +# initial_mesh = [UnitSquareMesh(30, 30) for i in range(parameters.tt_steps)] # # A simple pretest # a = time_dependent_Solver(meshes = initial_mesh, ic = 0, kwargs='0') # a.iterate() # b = a.solution -# import matplotlib.pyplot as plt - -# # fig, axes = plt.subplots(20) -# # for i in range(20): -# # tricontourf(b[i], axes=axes[i]) - -# fig, axes = plt.subplots(2) -# tricontourf(b[0], axes=axes[0]) -# tricontourf(b[1], axes=axes[1]) -# plt.show() +# print(b[0].function_space()) diff --git a/nn_adapt/solving_time.py b/nn_adapt/solving_time.py index da17454..8c6572c 100644 --- a/nn_adapt/solving_time.py +++ b/nn_adapt/solving_time.py @@ -14,6 +14,8 @@ import abc from time import perf_counter +import matplotlib.pyplot as plt + tm = TransferManager() @@ -69,7 +71,7 @@ def get_time_solutions( meshes, config, solve_adjoint=True, - refined_mesh=None, + refined_meshes=None, init=None, convergence_checker=None, **kwargs, @@ -98,18 +100,18 @@ def get_time_solutions( tt_steps = config.parameters.tt_steps # Solve forward problem in base space - V = [config.get_function_space(meshes[step]) for step in range(tt_steps)] + V = config.get_function_space(meshes[-1]) out = {"times": {"forward": -perf_counter()}} - # with PETSc.Log.Event("Forward solve"): - # if init is None: - # ic = config.get_initial_condition(V) - # else: - # ic = init(V) - solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - J = config.get_qoi(V[tt_steps-1])(q[tt_steps-1]) - qoi = assemble(J) + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + J = config.get_qoi(V)(q[-1]) + qoi = assemble(J) out["times"]["forward"] += perf_counter() out["qoi"] = qoi @@ -119,52 +121,47 @@ def get_time_solutions( return out if not solve_adjoint: return out - + # Solve adjoint problem in base space out["times"]["adjoint"] = -perf_counter() - # with PETSc.Log.Event("Adjoint solve"): - - adj_solution = [] - # sp = config.parameters.adjoint_solver_parameters - # tape = get_working_tape() - # nu = Constant(config.parameters.viscosity_coefficient) - # g = compute_gradient(qoi, Control(nu)) - # solve_blocks = get_solve_blocks() - - # # 'Initial condition' for both adjoint - # dJdu = assemble(derivative(J, q[-1])) - - # adj_solution = [] - dJdu, solve_blocks = solver_obj.adjoint_setup() - - for step in range(tt_steps-1): - adjoint_solution = solve_blocks[step].adj_sol - adj_solution.append(adjoint_solution) - - adj_solution.append(dJdu) + with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + adj_solution = [] + dJdu, solve_blocks = solver_obj.adjoint_setup() + + for step in range(tt_steps-1): + adjoint_solution = solve_blocks[step].adj_sol + adj_solution.append(adjoint_solution) + + # initial condition for adjoint solution + adj_solution.append(dJdu) out["adjoint"] = adj_solution out["times"]["adjoint"] += perf_counter() - if refined_mesh is None: + if refined_meshes is None: return out # Solve adjoint problem in enriched space out["times"]["estimator"] = -perf_counter() with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_mesh) + V = config.get_function_space(refined_meshes[-1]) q_plus = Function(V) - solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) - q_plus = solver_obj.solution - J = config.get_qoi(refined_mesh)(q_plus) - F = solver_obj.form - tm.prolong(q, q_plus) - q_star_plus = Function(V) - dFdq = derivative(F, q_plus, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q_plus, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) - out["enriched_adjoint"] = q_star_plus + solver_obj_plus = config.time_dependent_Solver(refined_meshes, q_plus, **kwargs) + solver_obj_plus.iterate() + q_plus = solver_obj_plus.solution + # J = config.get_qoi(refined_mesh[-1])(q_plus[-1]) + adj_solution_plus = [] + dJdu_plus, solve_blocks_plus = solver_obj_plus.adjoint_setup() + + for step in range(tt_steps-1): + adjoint_solution_plus = solve_blocks_plus[step].adj_sol + adj_solution_plus.append(adjoint_solution_plus) + + adj_solution_plus.append(dJdu_plus) + + out["enriched_adjoint"] = adj_solution_plus out["times"]["estimator"] += perf_counter() + return out @@ -176,7 +173,7 @@ def split_into_components(f): return [f] if f.function_space().value_size == 1 else f.split() -def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): +def indicate_time_errors(meshes, config, enrichment_method="h", retall=False, **kwargs): """ Indicate errors according to ``dwr_indicator``, using the solver given in the configuration file. @@ -192,34 +189,44 @@ def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs) """ if not enrichment_method == "h": raise NotImplementedError # TODO - with PETSc.Log.Event("Enrichment"): - mesh, ref_mesh = MeshHierarchy(mesh, 1) + # with PETSc.Log.Event("Enrichment"): + mesh_list = [] + ref_mesh_list = [] + tt_steps = len(meshes) + for i in range(tt_steps): + mesh, ref_mesh = MeshHierarchy(meshes[i], 1) + mesh_list.append(mesh) + ref_mesh_list.append(ref_mesh) # Solve the forward and adjoint problems - out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) + out = get_time_solutions(meshes=mesh_list, config=config, refined_meshes=ref_mesh_list, **kwargs) if retall and "adjoint" not in out: return out out["times"]["estimator"] -= perf_counter() - with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - + # with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + dwr_list = [] + + for step in range(tt_steps): # Prolong - V_plus = adj_sol_plus.function_space() + V_plus = out["enriched_adjoint"][step].function_space() fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"], fwd_sol_plg) + tm.prolong(out["forward"][step], fwd_sol_plg) adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"], adj_sol_plg) + tm.prolong(out["adjoint"][step], adj_sol_plg) # Subtract prolonged adjoint solution from enriched version adj_error = Function(V_plus) - adj_sols_plus = split_into_components(adj_sol_plus) + adj_sols_plus = split_into_components(out["enriched_adjoint"][step]) adj_sols_plg = split_into_components(adj_sol_plg) for i, err in enumerate(split_into_components(adj_error)): err += adj_sols_plus[i] - adj_sols_plg[i] # Evaluate errors - out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) + out["dwr"] = dwr_list + out["times"]["estimator"] += perf_counter() return out if retall else out["dwr"] From 631caaf1d32f5b622669df001ef269be66640c33 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Fri, 29 Jul 2022 02:44:13 +0100 Subject: [PATCH 05/13] solver_one2n(one mesh for n steps) check point --- examples/a_test.py | 22 +- .../{burgers_try => burgers_n2n}/config.py | 2 +- .../{burgers_try => burgers_n2n}/meshgen.py | 0 .../{burgers_try => burgers_n2n}/network.py | 0 .../testing_cases.txt | 0 examples/burgers_one2n/config.py | 38 +++ examples/burgers_one2n/meshgen.py | 2 + examples/burgers_one2n/network.py | 43 +++ examples/burgers_one2n/testing_cases.txt | 1 + .../models/{burgers_try.py => burgers_n2n.py} | 12 +- examples/models/burgers_one2n.py | 276 ++++++++++++++++++ nn_adapt/{solving_time.py => solving_n2n.py} | 24 +- nn_adapt/solving_one2n.py | 244 ++++++++++++++++ 13 files changed, 631 insertions(+), 33 deletions(-) rename examples/{burgers_try => burgers_n2n}/config.py (97%) rename examples/{burgers_try => burgers_n2n}/meshgen.py (100%) rename examples/{burgers_try => burgers_n2n}/network.py (100%) rename examples/{burgers_try => burgers_n2n}/testing_cases.txt (100%) create mode 100644 examples/burgers_one2n/config.py create mode 100644 examples/burgers_one2n/meshgen.py create mode 100644 examples/burgers_one2n/network.py create mode 100644 examples/burgers_one2n/testing_cases.txt rename examples/models/{burgers_try.py => burgers_n2n.py} (95%) create mode 100644 examples/models/burgers_one2n.py rename nn_adapt/{solving_time.py => solving_n2n.py} (91%) create mode 100644 nn_adapt/solving_one2n.py diff --git a/examples/a_test.py b/examples/a_test.py index 7e40379..aeab83f 100644 --- a/examples/a_test.py +++ b/examples/a_test.py @@ -1,7 +1,8 @@ from nn_adapt.features import * from nn_adapt.metric import * from nn_adapt.parse import Parser -from nn_adapt.solving_time import * +from nn_adapt.solving_one2n import * +from nn_adapt.solving_n2n import * from nn_adapt.solving import * from nn_adapt.utility import ConvergenceTracker from firedrake.meshadapt import adapt @@ -15,20 +16,15 @@ tt_steps = 10 -setup1 = importlib.import_module(f"burgers_try.config") +setup1 = importlib.import_module(f"burgers_n2n.config") meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] -# meshes[5] = UnitSquareMesh(28, 30) +out1 = indicate_errors_n2n(meshes=meshes, config=setup1) +print(out1) -# mesh, ref_mesh = MeshHierarchy(meshes[1], 1) -# print(mesh) -# print(ref_mesh) -# fig, axes = plt.subplots(2) -# triplot(mesh, axes=axes[0]) -# triplot(ref_mesh, axes=axes[1]) -# plt.show() - -out = indicate_time_errors(meshes=meshes, config=setup1) -print(out) +mesh = UnitSquareMesh(20, 20) +setup2 = importlib.import_module(f"burgers_one2n.config") +out2 = indicate_errors_one2n(mesh=mesh, config=setup2) +print(out2) # fig, axes = plt.subplots(10,2) diff --git a/examples/burgers_try/config.py b/examples/burgers_n2n/config.py similarity index 97% rename from examples/burgers_try/config.py rename to examples/burgers_n2n/config.py index c290e52..cff2483 100644 --- a/examples/burgers_try/config.py +++ b/examples/burgers_n2n/config.py @@ -1,4 +1,4 @@ -from models.burgers_try import * +from models.burgers_n2n import * from nn_adapt.ann import sample_uniform import numpy as np diff --git a/examples/burgers_try/meshgen.py b/examples/burgers_n2n/meshgen.py similarity index 100% rename from examples/burgers_try/meshgen.py rename to examples/burgers_n2n/meshgen.py diff --git a/examples/burgers_try/network.py b/examples/burgers_n2n/network.py similarity index 100% rename from examples/burgers_try/network.py rename to examples/burgers_n2n/network.py diff --git a/examples/burgers_try/testing_cases.txt b/examples/burgers_n2n/testing_cases.txt similarity index 100% rename from examples/burgers_try/testing_cases.txt rename to examples/burgers_n2n/testing_cases.txt diff --git a/examples/burgers_one2n/config.py b/examples/burgers_one2n/config.py new file mode 100644 index 0000000..45debeb --- /dev/null +++ b/examples/burgers_one2n/config.py @@ -0,0 +1,38 @@ +from models.burgers_one2n import * +from nn_adapt.ann import sample_uniform +import numpy as np + + +testing_cases = ["demo"] + + +def initialise(case, discrete=False): + """ + Given some training case (for which ``case`` + is an integer) or testing case (for which + ``case`` is a string), set up the physical + problems defining the Burgers problem. + + For training data, these values are chosen + randomly. + """ + parameters.case = case + parameters.discrete = discrete + if isinstance(case, int): + parameters.turbine_coords = [] + np.random.seed(100 * case) + + # Random initial speed from 0.01 m/s to 6 m/s + parameters.initial_speed = sample_uniform(0.01, 6.0) + + # Random viscosity from 0.00001 m^2/s to 1 m^2/s + parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + return + elif "demo" in case: + parameters.viscosity_coefficient = 0.0001 + parameters.initial_speed = 1.0 + else: + raise ValueError(f"Test case {test_case} not recognised") + + if "reversed" in case: + parameters.initial_speed *= -1 diff --git a/examples/burgers_one2n/meshgen.py b/examples/burgers_one2n/meshgen.py new file mode 100644 index 0000000..3467cea --- /dev/null +++ b/examples/burgers_one2n/meshgen.py @@ -0,0 +1,2 @@ +def generate_geo(config, reverse=False): + return diff --git a/examples/burgers_one2n/network.py b/examples/burgers_one2n/network.py new file mode 100644 index 0000000..b7f9907 --- /dev/null +++ b/examples/burgers_one2n/network.py @@ -0,0 +1,43 @@ +from nn_adapt.layout import NetLayoutBase + + +class NetLayout(NetLayoutBase): + """ + Default configuration + ===================== + + Input layer: + ------------ + [coarse-grained DWR] + + [viscosity coefficient] + + [element size] + + [element orientation] + + [element shape] + + [boundary element?] + + [12 forward DoFs per element] + + [12 adjoint DoFs per element] + = 30 + + Hidden layer: + ------------- + + 60 neurons + + Output layer: + ------------- + + [1 error indicator value] + """ + + inputs = ( + "estimator_coarse", + "physics_viscosity", + "mesh_d", + "mesh_h1", + "mesh_h2", + "mesh_bnd", + "forward_dofs", + "adjoint_dofs", + ) + num_hidden_neurons = 60 + dofs_per_element = 12 diff --git a/examples/burgers_one2n/testing_cases.txt b/examples/burgers_one2n/testing_cases.txt new file mode 100644 index 0000000..1549b67 --- /dev/null +++ b/examples/burgers_one2n/testing_cases.txt @@ -0,0 +1 @@ +demo diff --git a/examples/models/burgers_try.py b/examples/models/burgers_n2n.py similarity index 95% rename from examples/models/burgers_try.py rename to examples/models/burgers_n2n.py index cd96ea8..a21b6e7 100644 --- a/examples/models/burgers_try.py +++ b/examples/models/burgers_n2n.py @@ -146,7 +146,7 @@ def iterate(self, **kwargs): self._solver.solve() -class time_dependent_Solver(nn_adapt.solving.Solver): +class Solver_n2n(nn_adapt.solving.Solver): """ Solver object based on current mesh and state. """ @@ -214,8 +214,6 @@ def iterate(self, **kwargs): tape.clear_tape() # solve forward - step = 0 - for step in range(self.tt_steps): # Define P2 function space and corresponding test function V = get_function_space(self.meshes[step]) @@ -261,15 +259,13 @@ def get_qoi(mesh): """ def qoi(sol): - sol_temp = Function(mesh) - sol_temp.project(sol) - return inner(sol_temp, sol_temp) * ds(2) + return inner(sol, sol) * ds(2) return qoi -# # Initial mesh for all test cases -# initial_mesh = [UnitSquareMesh(30, 30) for i in range(parameters.tt_steps)] +# Initial mesh for all test cases +initial_mesh = [UnitSquareMesh(30, 30) for i in range(parameters.tt_steps)] # # A simple pretest diff --git a/examples/models/burgers_one2n.py b/examples/models/burgers_one2n.py new file mode 100644 index 0000000..7075421 --- /dev/null +++ b/examples/models/burgers_one2n.py @@ -0,0 +1,276 @@ +from copy import deepcopy +from firedrake import * +from firedrake.petsc import PETSc +from firedrake_adjoint import * +from firedrake.adjoint import get_solve_blocks +import nn_adapt.model +import nn_adapt.solving + +''' +A memory hungry method solving time dependent PDE. +''' + +class Parameters(nn_adapt.model.Parameters): + """ + Class encapsulating all parameters required for a simple + Burgers equation test case. + """ + + qoi_name = "right boundary integral" + qoi_unit = r"m\,s^{-1}" + + # Adaptation parameters + h_min = 1.0e-10 # Minimum metric magnitude + h_max = 1.0 # Maximum metric magnitude + + # Physical parameters + viscosity_coefficient = 0.0001 + initial_speed = 1.0 + + # Timestepping parameters + timestep = 0.05 + tt_steps = 10 + + solver_parameters = {} + adjoint_solver_parameters = {} + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def drag(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + def ic(self, mesh): + """ + Initial condition + """ + x, y = SpatialCoordinate(mesh) + expr = self.initial_speed * sin(pi * x) + yside = self.initial_speed * sin(pi * y) + yside = 0 + return as_vector([expr, yside]) + + +PETSc.Sys.popErrorHandler() +parameters = Parameters() + + +def get_function_space(mesh): + r""" + Construct the :math:`\mathbb P2` finite element space + used for the prognostic solution. + """ + return VectorFunctionSpace(mesh, "CG", 2) + + +class Solver(nn_adapt.solving.Solver): + """ + Solver object based on current mesh and state. + """ + + def __init__(self, mesh, ic, **kwargs): + """ + :arg mesh: the mesh to define the solver on + :arg ic: the current state / initial condition + """ + self.mesh = mesh + + # Collect parameters + dt = Constant(parameters.timestep) + nu = parameters.viscosity(mesh) + + # Define variational formulation + V = self.function_space + u = Function(V) + u_ = Function(V) + v = TestFunction(V) + self._form = ( + inner((u - u_) / dt, v) * dx + + inner(dot(u, nabla_grad(u)), v) * dx + + nu * inner(grad(u), grad(v)) * dx + ) + problem = NonlinearVariationalProblem(self._form, u) + + # Set initial condition + u_.project(parameters.ic(mesh)) + + # Create solver + self._solver = NonlinearVariationalSolver(problem) + self._solution = u + + @property + def function_space(self): + r""" + The :math:`\mathbb P2` finite element space. + """ + return get_function_space(self.mesh) + + @property + def form(self): + """ + The weak form of Burgers equation + """ + return self._form + + @property + def solution(self): + return self._solution + + def iterate(self, **kwargs): + """ + Take a single timestep of Burgers equation + """ + self._solver.solve() + + +class Solver_one2n(nn_adapt.solving.Solver): + """ + Solver object based on current mesh and state. + """ + + def __init__(self, mesh, ic, **kwargs): + """ + :arg mesh: the mesh to define the solver on + :arg ic: the current state / initial condition + """ + self.mesh = mesh + + # Collect parameters + self.tt_steps = parameters.tt_steps + dt = Constant(parameters.timestep) + + # Physical parameters + nu = parameters.viscosity(mesh) + self.nu = nu + + # Define variational formulation + V = self.function_space + self.u = Function(V) + self.u_ = Function(V) + self.v = TestFunction(V) + + self._form = ( + inner((self.u - self.u_) / dt, self.v) * dx + + inner(dot(self.u, nabla_grad(self.u)), self.v) * dx + + nu * inner(grad(self.u), grad(self.v)) * dx + ) + + # Define initial conditions + ic = parameters.ic(self.mesh) + self.u.project(ic) + + # Set solutions + self._solutions = [] + + @property + def function_space(self): + r""" + The :math:`\mathbb P2` finite element space. + """ + return get_function_space(self.mesh) + + @property + def form(self): + """ + The weak form of Burgers equation + """ + return self._form + + @property + def solution(self): + return self._solutions + + @property + def adj_solution(self): + return self._adj_solution + + def adjoint_iteration(self): + """ + Get the forward solutions of Burgers equation + """ + J_form = inner(self.u, self.u)*ds(2) + J = assemble(J_form) + + g = compute_gradient(J, Control(self.nu)) + + solve_blocks = get_solve_blocks() + + # 'Initial condition' for both adjoint + dJdu = assemble(derivative(J_form, self.u)) + + self._adj_solution = [] + for step in range(self.tt_steps-1): + adj_sol = solve_blocks[step].adj_sol + self._adj_solution.append(adj_sol) + self._adj_solution.append(dJdu) + + def iterate(self, **kwargs): + """ + Get the forward solutions of Burgers equation + """ + tape = get_working_tape() + tape.clear_tape() + + # solve forward + for _ in range(self.tt_steps): + + # Create Functions for the solution and time-lagged solution + self.u_.project(self.u) + + solve(self._form == 0, self.u) + + # Store forward solution at exports so we can plot again later + self._solutions.append(self.u.copy(deepcopy=True)) + + stop_annotating(); + + +def get_initial_condition(function_space): + """ + Compute an initial condition based on the initial + speed parameter. + """ + u = Function(function_space) + u.interpolate(parameters.ic(function_space.mesh())) + return u + + +def get_qoi(mesh): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + + def qoi(sol): + return inner(sol, sol) * ds(2) + + return qoi + + +# Initial mesh for all test cases +initial_mesh = UnitSquareMesh(30, 30) diff --git a/nn_adapt/solving_time.py b/nn_adapt/solving_n2n.py similarity index 91% rename from nn_adapt/solving_time.py rename to nn_adapt/solving_n2n.py index 8c6572c..02f59ad 100644 --- a/nn_adapt/solving_time.py +++ b/nn_adapt/solving_n2n.py @@ -67,7 +67,7 @@ def solution(self): pass -def get_time_solutions( +def get_solutions_n2n( meshes, config, solve_adjoint=True, @@ -107,11 +107,15 @@ def get_time_solutions( ic = config.get_initial_condition(V) else: ic = init(V) - solver_obj = config.time_dependent_Solver(meshes, ic=0, **kwargs) + solver_obj = config.Solver_n2n(meshes, ic=0, **kwargs) solver_obj.iterate() q = solver_obj.solution - J = config.get_qoi(V)(q[-1]) - qoi = assemble(J) + # Calculate QoI + qoi = 0 + for step in range(tt_steps): + J = config.get_qoi(V)(q[-1]) + qoi += assemble(J) + qoi = qoi / tt_steps out["times"]["forward"] += perf_counter() out["qoi"] = qoi @@ -135,8 +139,7 @@ def get_time_solutions( # initial condition for adjoint solution adj_solution.append(dJdu) - - out["adjoint"] = adj_solution + out["adjoint"] = adj_solution out["times"]["adjoint"] += perf_counter() if refined_meshes is None: return out @@ -146,7 +149,7 @@ def get_time_solutions( with PETSc.Log.Event("Enrichment"): V = config.get_function_space(refined_meshes[-1]) q_plus = Function(V) - solver_obj_plus = config.time_dependent_Solver(refined_meshes, q_plus, **kwargs) + solver_obj_plus = config.Solver_n2n(refined_meshes, q_plus, **kwargs) solver_obj_plus.iterate() q_plus = solver_obj_plus.solution # J = config.get_qoi(refined_mesh[-1])(q_plus[-1]) @@ -158,8 +161,7 @@ def get_time_solutions( adj_solution_plus.append(adjoint_solution_plus) adj_solution_plus.append(dJdu_plus) - - out["enriched_adjoint"] = adj_solution_plus + out["enriched_adjoint"] = adj_solution_plus out["times"]["estimator"] += perf_counter() return out @@ -173,7 +175,7 @@ def split_into_components(f): return [f] if f.function_space().value_size == 1 else f.split() -def indicate_time_errors(meshes, config, enrichment_method="h", retall=False, **kwargs): +def indicate_errors_n2n(meshes, config, enrichment_method="h", retall=False, **kwargs): """ Indicate errors according to ``dwr_indicator``, using the solver given in the configuration file. @@ -199,7 +201,7 @@ def indicate_time_errors(meshes, config, enrichment_method="h", retall=False, ** ref_mesh_list.append(ref_mesh) # Solve the forward and adjoint problems - out = get_time_solutions(meshes=mesh_list, config=config, refined_meshes=ref_mesh_list, **kwargs) + out = get_solutions_n2n(meshes=mesh_list, config=config, refined_meshes=ref_mesh_list, **kwargs) if retall and "adjoint" not in out: return out diff --git a/nn_adapt/solving_one2n.py b/nn_adapt/solving_one2n.py new file mode 100644 index 0000000..2c2bf74 --- /dev/null +++ b/nn_adapt/solving_one2n.py @@ -0,0 +1,244 @@ +""" +Time dependent goal-oriented error estimation +""" +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from firedrake_adjoint import * +from firedrake.adjoint import get_solve_blocks +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + +import matplotlib.pyplot as plt + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_solutions_one2n( + mesh, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + Trying to work it out. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + tt_steps = config.parameters.tt_steps + + # Solve forward problem in base space + V = config.get_function_space(mesh) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver_one2n(mesh, ic, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + # Calculate QoI + qoi = 0 + for step in range(tt_steps): + J = config.get_qoi(V)(q[step]) + qoi += assemble(J) + qoi = qoi / tt_steps + + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if not convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + solver_obj.adjoint_iteration() + out["adjoint"] = solver_obj.adj_solution + + out["times"]["adjoint"] += perf_counter() + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj_plus = config.Solver_one2n(refined_mesh, q_plus, **kwargs) + solver_obj_plus.iterate() + q_plus = solver_obj_plus.solution + # J = config.get_qoi(refined_mesh[-1])(q_plus[-1]) + adj_solution_plus = [] + solver_obj_plus.adjoint_iteration() + adj_solution_plus = solver_obj_plus.adj_solution + out["enriched_adjoint"] = adj_solution_plus + + out["times"]["estimator"] += perf_counter() + + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors_one2n(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + tt_steps = config.parameters.tt_steps + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions_one2n(mesh=mesh, config=config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + dwr = 0 + dwr_list = [] + + for step in range(tt_steps): + # Prolong + V_plus = adj_sol_plus[step].function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"][step], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"][step], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus[step]) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + dwr += dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) + + out["dwr"] = dwr_list + + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr From 4f1a3a6371609da682140aafba815d4de2396ed7 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Fri, 5 Aug 2022 04:27:47 +0100 Subject: [PATCH 06/13] one2n and n2n examples, also some changes to nn_adapt and adaptation --- .DS_Store | Bin 0 -> 6148 bytes adaptation_n2n/.DS_Store | Bin 0 -> 6148 bytes adaptation_n2n/a_test.py | 36 ++ .../burgers_n2n/config.py | 0 .../burgers_n2n/meshgen.py | 0 .../burgers_n2n/network.py | 0 .../burgers_n2n/testing_cases.txt | 0 adaptation_n2n/compute_importance.py | 102 ++++++ adaptation_n2n/makefile | 319 ++++++++++++++++++ adaptation_n2n/meshgen.py | 34 ++ .../models/burgers_n2n.py | 0 adaptation_n2n/plot_config.py | 65 ++++ adaptation_n2n/plot_convergence.py | 179 ++++++++++ adaptation_n2n/plot_importance.py | 75 ++++ adaptation_n2n/plot_progress.py | 48 +++ adaptation_n2n/plot_timings.py | 72 ++++ adaptation_n2n/run_adapt.py | 153 +++++++++ adaptation_n2n/run_adapt_ml.py | 167 +++++++++ adaptation_n2n/run_adaptation_loop.py | 154 +++++++++ adaptation_n2n/run_adaptation_loop_ml.py | 195 +++++++++++ adaptation_n2n/run_fixed_mesh.py | 48 +++ adaptation_n2n/run_uniform_refinement.py | 76 +++++ adaptation_n2n/test_and_train.py | 266 +++++++++++++++ adaptation_one2n/.DS_Store | Bin 0 -> 6148 bytes adaptation_one2n/a_test.py | 36 ++ adaptation_one2n/burgers_one2n/config.py | 160 +++++++++ .../burgers_one2n/meshgen.py | 0 .../burgers_one2n/network.py | 0 .../burgers_one2n/testing_cases.txt | 0 adaptation_one2n/compute_importance.py | 102 ++++++ adaptation_one2n/makefile | 319 ++++++++++++++++++ adaptation_one2n/meshgen.py | 34 ++ .../models/burgers_one2n.py | 0 adaptation_one2n/plot_config.py | 65 ++++ adaptation_one2n/plot_convergence.py | 179 ++++++++++ adaptation_one2n/plot_importance.py | 75 ++++ adaptation_one2n/plot_progress.py | 48 +++ adaptation_one2n/plot_timings.py | 72 ++++ adaptation_one2n/run_adapt.py | 149 ++++++++ adaptation_one2n/run_adapt_ml.py | 167 +++++++++ adaptation_one2n/run_adaptation_loop.py | 154 +++++++++ adaptation_one2n/run_adaptation_loop_ml.py | 195 +++++++++++ adaptation_one2n/run_fixed_mesh.py | 48 +++ adaptation_one2n/run_uniform_refinement.py | 76 +++++ adaptation_one2n/test_and_train.py | 266 +++++++++++++++ examples/.DS_Store | Bin 0 -> 6148 bytes examples/a_test.py | 46 --- examples/burgers/meshgen.py | 99 +++++- examples/burgers_one2n/config.py | 38 --- examples/makefile | 2 +- examples/models/.DS_Store | Bin 0 -> 6148 bytes examples/models/burgers.py | 8 + examples/run_adapt.py | 7 - nn_adapt/features.py | 9 + nn_adapt/metric_one2n.py | 112 ++++++ nn_adapt/solving_n2n.py | 4 - nn_adapt/solving_one2n.py | 24 +- 57 files changed, 4375 insertions(+), 108 deletions(-) create mode 100644 .DS_Store create mode 100644 adaptation_n2n/.DS_Store create mode 100644 adaptation_n2n/a_test.py rename {examples => adaptation_n2n}/burgers_n2n/config.py (100%) rename {examples => adaptation_n2n}/burgers_n2n/meshgen.py (100%) rename {examples => adaptation_n2n}/burgers_n2n/network.py (100%) rename {examples => adaptation_n2n}/burgers_n2n/testing_cases.txt (100%) create mode 100644 adaptation_n2n/compute_importance.py create mode 100644 adaptation_n2n/makefile create mode 100644 adaptation_n2n/meshgen.py rename {examples => adaptation_n2n}/models/burgers_n2n.py (100%) create mode 100644 adaptation_n2n/plot_config.py create mode 100644 adaptation_n2n/plot_convergence.py create mode 100644 adaptation_n2n/plot_importance.py create mode 100644 adaptation_n2n/plot_progress.py create mode 100644 adaptation_n2n/plot_timings.py create mode 100644 adaptation_n2n/run_adapt.py create mode 100644 adaptation_n2n/run_adapt_ml.py create mode 100644 adaptation_n2n/run_adaptation_loop.py create mode 100644 adaptation_n2n/run_adaptation_loop_ml.py create mode 100644 adaptation_n2n/run_fixed_mesh.py create mode 100644 adaptation_n2n/run_uniform_refinement.py create mode 100644 adaptation_n2n/test_and_train.py create mode 100644 adaptation_one2n/.DS_Store create mode 100644 adaptation_one2n/a_test.py create mode 100644 adaptation_one2n/burgers_one2n/config.py rename {examples => adaptation_one2n}/burgers_one2n/meshgen.py (100%) rename {examples => adaptation_one2n}/burgers_one2n/network.py (100%) rename {examples => adaptation_one2n}/burgers_one2n/testing_cases.txt (100%) create mode 100644 adaptation_one2n/compute_importance.py create mode 100644 adaptation_one2n/makefile create mode 100644 adaptation_one2n/meshgen.py rename {examples => adaptation_one2n}/models/burgers_one2n.py (100%) create mode 100644 adaptation_one2n/plot_config.py create mode 100644 adaptation_one2n/plot_convergence.py create mode 100644 adaptation_one2n/plot_importance.py create mode 100644 adaptation_one2n/plot_progress.py create mode 100644 adaptation_one2n/plot_timings.py create mode 100644 adaptation_one2n/run_adapt.py create mode 100644 adaptation_one2n/run_adapt_ml.py create mode 100644 adaptation_one2n/run_adaptation_loop.py create mode 100644 adaptation_one2n/run_adaptation_loop_ml.py create mode 100644 adaptation_one2n/run_fixed_mesh.py create mode 100644 adaptation_one2n/run_uniform_refinement.py create mode 100644 adaptation_one2n/test_and_train.py create mode 100644 examples/.DS_Store delete mode 100644 examples/a_test.py delete mode 100644 examples/burgers_one2n/config.py create mode 100644 examples/models/.DS_Store create mode 100644 nn_adapt/metric_one2n.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..47e942be35914e107650bf0d163d4d66d15d1344 GIT binary patch literal 6148 zcmeHKK~BR!475uRK}aY^j{Al3f=~r<us{{DB|9kq7Vr{=}8>+GQh{un`h$tnZ1S&Y#$FM(dk{X=vo&J=xg2AqL)2KMwamHL0$-~X=%`IR%^4E!qwc$iP} zF;cSH+DT4oZGc`uMZ~XBT!vs0OEG+<6dysOz#e1*%nTbvSRno*5NYti8TeHOJ^@oS BW(WWP literal 0 HcmV?d00001 diff --git a/adaptation_n2n/.DS_Store b/adaptation_n2n/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6a931b9e14b9dd13623164089ed4b57dd30d02b5 GIT binary patch literal 6148 zcmeHKy-EZz5S}?#Sg^U$GNtXkK|I#h=4wIC6)kqjHP`0_d@CQ!Z+;XP1A>K|G6Tss z$;{;ETQBMyo)VTtZ@!V|J{Dut3;CffmZ1Vz7myKe=Ca ztcDg&?8yiF%+BdmsXK8tj5-(y29^w*S#vG-{|$bbW|F@QiB>QW4E!?&xEprE z7Mt?B^~?6;u1zTSC?eulMS(zHJOa>=bL3KuW>2ESFFRI48AbXv92gG)B_yg~;1?Kp E1KXJ=KL7v# literal 0 HcmV?d00001 diff --git a/adaptation_n2n/a_test.py b/adaptation_n2n/a_test.py new file mode 100644 index 0000000..f3281b2 --- /dev/null +++ b/adaptation_n2n/a_test.py @@ -0,0 +1,36 @@ +from nn_adapt.features import * +from nn_adapt.features import extract_array +from nn_adapt.metric import * +from nn_adapt.parse import Parser +from nn_adapt.solving_one2n import * +from nn_adapt.solving_n2n import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt +from firedrake.petsc import PETSc + +import importlib +import numpy as np + +tt_steps = 10 + +# setup1 = importlib.import_module(f"burgers_n2n.config") +# meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] +# out1 = indicate_errors_n2n(meshes=meshes, config=setup1) +# print(out1) + +# mesh = UnitSquareMesh(20, 20) +# setup2 = importlib.import_module(f"burgers_one2n.config") +# out2 = indicate_errors_one2n(mesh=mesh, config=setup2) +# print(out2) + +# mesh = UnitSquareMesh(20, 20) +# setup2 = importlib.import_module(f"burgers_one2n.config") +# out2 = get_solutions_one2n(mesh=mesh, config=setup2) +# test_array = time_integrate(out2["forward"]) +# print(extract_array(test_array, centroid=True)) + +a = None +b = 1 +print(a or b) + diff --git a/examples/burgers_n2n/config.py b/adaptation_n2n/burgers_n2n/config.py similarity index 100% rename from examples/burgers_n2n/config.py rename to adaptation_n2n/burgers_n2n/config.py diff --git a/examples/burgers_n2n/meshgen.py b/adaptation_n2n/burgers_n2n/meshgen.py similarity index 100% rename from examples/burgers_n2n/meshgen.py rename to adaptation_n2n/burgers_n2n/meshgen.py diff --git a/examples/burgers_n2n/network.py b/adaptation_n2n/burgers_n2n/network.py similarity index 100% rename from examples/burgers_n2n/network.py rename to adaptation_n2n/burgers_n2n/network.py diff --git a/examples/burgers_n2n/testing_cases.txt b/adaptation_n2n/burgers_n2n/testing_cases.txt similarity index 100% rename from examples/burgers_n2n/testing_cases.txt rename to adaptation_n2n/burgers_n2n/testing_cases.txt diff --git a/adaptation_n2n/compute_importance.py b/adaptation_n2n/compute_importance.py new file mode 100644 index 0000000..3e2bf4c --- /dev/null +++ b/adaptation_n2n/compute_importance.py @@ -0,0 +1,102 @@ +""" +Compute the sensitivities of a network trained on a +particular ``model`` to its input parameters. +""" +from nn_adapt.ann import * +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import git +import importlib +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="compute_importance.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "num_training_cases", + help="The number of training cases", + type=positive_int, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=git.Repo(search_parent_directories=True).head.object.hexsha, +) +parsed_args = parser.parse_args() +model = parsed_args.model +preproc = parsed_args.preproc +tag = parsed_args.tag + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() +loss_fn = Loss() + +# Compute (averaged) sensitivities of the network to the inputs +dJdm = torch.zeros(layout.num_inputs) +data_dir = f"{model}/data" +approaches = parsed_args.approaches +values = np.zeros((0, layout.num_inputs)) +for step in range(parsed_args.adaptation_steps): + for approach in approaches: + for test_case in range(1, parsed_args.num_training_cases + 1): + if test_case == 1 and approach != approaches[0]: + continue + suffix = f"{test_case}_GO{approach}_{step}" + + # Load some data and mark inputs as independent + data = { + key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") + for key in layout.inputs + } + features = collect_features(data, layout) + values = np.vstack((values, features)) + features = torch.from_numpy(features).type(torch.float32) + features.requires_grad_(True) + + # Run the model and sum the outputs + out = nn(features).sum(axis=0) + + # Backpropagate to get the gradient of the outputs w.r.t. the inputs + out.backward() + dJdm += features.grad.mean(axis=0) + +# Compute representative values for each parameter +dm = np.abs(np.mean(values, axis=0)) + +# Multiply by the variability +sensitivity = dJdm.abs().detach().numpy() * dm +np.save(f"{model}/data/sensitivities_{tag}.npy", sensitivity) diff --git a/adaptation_n2n/makefile b/adaptation_n2n/makefile new file mode 100644 index 0000000..d26d52a --- /dev/null +++ b/adaptation_n2n/makefile @@ -0,0 +1,319 @@ +all: setup network test + +# --- Configurable parameters + +APPROACHES = anisotropic +MODEL = burgers_one2n +NUM_TRAINING_CASES = 1 +TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) +PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 +TAG = all + +# --- Parameters that should not need modifying + +TRAINING_CASES = $(shell seq 1 ${NUM_TRAINING_CASES}) +CASES = ${TRAINING_CASES} ${TESTING_CASES} + +# --- Setup directories and meshes + +setup: dir mesh plot_config + +# Create the directory structure +# ============================== +# +# $(MODEL) +#    ├── data +#    ├── outputs +# │    └── $(TESTING_CASES) +#    └── plots +dir: + mkdir -p $(MODEL)/data + mkdir -p $(MODEL)/outputs + mkdir -p $(MODEL)/plots + for case in $(TESTING_CASES); do \ + mkdir -p $(MODEL)/outputs/$$case; \ + done + +# Generate meshes +# =============== +# +# Meshes are generated for all training and testing cases. +# * First, a gmsh geometry file is generated using the +# `meshgen.py` script. The definitions of these cases +# are based on the contents of $(MODEL)/config.py. +# For the `turbine` case, the training data is generated +# randomly. +# * Then the geometry files are used to construct meshes +# in the .msh format. +# +# Gmsh is set to use the "pack" algorithm, which means that +# the initial meshes are quasi-uniform. That is, they are as +# close to uniform as they can be, given that the turbines +# are to be explicitly meshed. +mesh: + touch timing.log + d=$$(date +%s) && \ + for case in $(CASES); do \ + python3 meshgen.py $(MODEL) $$case; \ + if [ -e $(MODEL)/meshes/$$case.geo ]; then \ + gmsh -2 -algo pack $(MODEL)/meshes/$$case.geo -o $(MODEL)/meshes/$$case.msh; \ + fi; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Meshes built in $$(($$(date +%s)-d)) seconds" >> timing.log + +# Plot configurations +# =================== +# +# Plot the configurations for a subset of the training cases +# and the testing cases that are listed in $(MODEL)/config.py. +# The domain geometry and turbine locations are shown, along +# with the physical parameters used. +plot_config: + python3 plot_config.py $(MODEL) 'train' + python3 plot_config.py $(MODEL) 'test' + +# Clean the model directory +# ========================= +# +# Delete all logs, data, outputs, plots and compiled code associated +# with the model. Note that this is a very destructive thing to do! +clean: + rm -rf timing.log + rm -rf $(MODEL)/data + rm -rf $(MODEL)/outputs + rm -rf $(MODEL)/plots + rm -rf $(MODEL)/__pycache__ + +# --- Construct the neural network + +network: features train plot_progress plot_importance + +# Generate feature data +# ===================== +# +# This involves applying mesh adaptation to all of the cases in the +# training data. In each case, feature data and "target" error indicator +# data are extracted and saved to file. +features: + touch timing.log + d=$$(date +%s) && \ + for case in $(TRAINING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt.py $(MODEL) $$case -a $$approach --no_outputs $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Features generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Train the network +# ================= +# +# Train a neural network based on the feature and target data that has +# been saved to file, for a specified number of training cases. The +# network is tagged (using the environment variable $(TAG)) to distinguish +# the model and its outputs. +train: + touch timing.log + d=$$(date +%s) && \ + python3 test_and_train.py -m $(MODEL) -n $(NUM_TRAINING_CASES) --tag $(TAG) && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Training completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ + echo "" >> timing.log + +# Plot loss functions +# =================== +# +# Once the network has been trained, plot the training and validation loss +# curves against iteration count. +plot_progress: + python3 plot_progress.py $(MODEL) --tag $(TAG) + +# Feature importance experiment +# ============================= +# +# Perform an experiment that tests how sensitive the trained network is to +# each of its inputs (i.e. the features). If it is particularly sensitive to +# one of the features then we deduce that the feature is in some sense +# "important" to the network. +plot_importance: + python3 compute_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) + python3 plot_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) + +# --- Test the neural network + +test: snapshot_go snapshot_ml uniform go ml plot_convergence + +# Apply goal-oriented adaptation to the test cases +# ================================================ +# +# Apply goal-oriented mesh adaptation to the testing cases, thereby +# generating lots of output data in Paraview format. These include +# the meshes, solution fields, error indicators and metrics. +snapshot_go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Apply data-driven adaptation to the test cases +# ============================================== +# +# Apply data-driven adaptation based on the trained network to the testing +# cases, thereby generating lots of output data in Paraview format. These +# include the meshes, solution fields, error indicators and metrics. +snapshot_ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for uniform refinement +# =========================================== +# +# Run the model on a sequence of uniformly refined meshes. +uniform: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_uniform_refinement.py $(MODEL) $$case; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Uniform refinement completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for goal-oriented adaptation +# ================================================= +# +# Run the model with the standard goal-oriented approach for +# a range of target metric complexities. +go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adaptation_loop.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for data-driven adaptation +# =============================================== +# +# Run the model with the data-driven approach based on the +# trained network for a range of target metric complexities. +ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adaptation_loop_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Plot convergence curves +# ======================= +# +# Plot the data points generated during the `uniform`, `go` and +# `ml` recipes and annotate with lines of best fit, where appropriate. +plot_convergence: + for case in $(TESTING_CASES); do \ + python3 plot_convergence.py $(MODEL) $$case --tag $(TAG); \ + done + +# --- Profiling experiments + +# NOTE: The following recipes are somewhat redundant. Similar information +# can be obtained from the outputs of the `uniform`, `go` and `ml` +# recipes by running `plot_timings.py` with the appropriate input +# parameters. + +# Profiling for uniform refinement +# ================================ +# +# Run the model on a fine fixed mesh generated by refining the initial +# mesh four times and output the PETSc logging information in a format +# that can be then turned into a flamegraph. +profile_uni: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_fixed_mesh.py $(MODEL) $$case --optimise --num_refinements 4 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Uniform refinement profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Uniform refinement ($$case)" logview.txt > $(MODEL)/outputs/$$case/uni.svg && \ + rm logview.txt; \ + done + +# Profiling for goal-oriented adaptation +# ====================================== +# +# Run the model using the standard goal-oriented approach with a fairly +# high target metric complexity of 64,000 and output the PETSc logging +# information in a format that can be then turned into a flamegraph. +profile_go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_adapt.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Goal-oriented adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/go.svg && \ + rm logview.txt; \ + done + +# Profiling for data-driven adaptation +# ==================================== +# +# Run the model using the data-driven adaptation approach based on the +# trained network with a fairly high target metric complexity of 64,000 +# and output the PETSc logging information in a format that can be then +# turned into a flamegraph. +profile_ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_adapt_ml.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) --tag all -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Data-driven adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/ml.svg && \ + rm logview.txt; \ + done diff --git a/adaptation_n2n/meshgen.py b/adaptation_n2n/meshgen.py new file mode 100644 index 0000000..1067e7c --- /dev/null +++ b/adaptation_n2n/meshgen.py @@ -0,0 +1,34 @@ +""" +Generate the mesh for configuration ``case`` +of a given ``model``. +""" +import argparse +import importlib +import sys + + +# Parse for test case +parser = argparse.ArgumentParser(prog="meshgen.py") +parser.add_argument("model", help="The model") +parser.add_argument("case", help="The configuration file name") +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +reverse = False +try: + case = int(parsed_args.case) + assert case > 0 +except ValueError: + case = parsed_args.case + reverse = "reversed" in case + +# Load setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(case) +meshgen = importlib.import_module(f"{model}.meshgen") + +# Write geometry file +code = meshgen.generate_geo(setup, reverse=reverse) +if code is None: + sys.exit(0) +with open(f"{model}/meshes/{case}.geo", "w+") as meshfile: + meshfile.write(code) diff --git a/examples/models/burgers_n2n.py b/adaptation_n2n/models/burgers_n2n.py similarity index 100% rename from examples/models/burgers_n2n.py rename to adaptation_n2n/models/burgers_n2n.py diff --git a/adaptation_n2n/plot_config.py b/adaptation_n2n/plot_config.py new file mode 100644 index 0000000..159b4d7 --- /dev/null +++ b/adaptation_n2n/plot_config.py @@ -0,0 +1,65 @@ +""" +Plot the problem configurations for a given ``model``. +The ``mode`` is chosen from 'train' and 'test'. +""" +from firedrake import Mesh +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import importlib + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_config.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "mode", + help="Training or testing?", + type=str, + choices=["train", "test"], +) +parser.add_argument( + "--num_cols", + help="Number of columns in the plot", + type=positive_int, + default=4, +) +parser.add_argument( + "--num_rows", + help="Number of rows in the plot", + type=positive_int, + default=4, +) +parsed_args = parser.parse_args() +model = parsed_args.model +mode = parsed_args.mode +setup = importlib.import_module(f"{model}.config") +cases = setup.testing_cases +ncols = parsed_args.num_cols +if mode == "test": + ncols = len(cases) +nrows = parsed_args.num_rows +if mode == "test": + nrows = 1 +N = ncols * nrows +if mode == "train": + cases = range(1, N + 1) +p = importlib.import_module(f"{model}.plotting") + +# Plot all configurations +fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(3 * ncols, 1.5 * nrows)) +for i, case in enumerate(cases): + ax = axes[i] if nrows == 1 else axes[i // ncols, i % nrows] + setup.initialise(case, discrete=True) + mesh = Mesh(f"{model}/meshes/{case}.msh") + p.plot_config(setup, mesh, ax) +plt.tight_layout() +plt.savefig(f"{model}/plots/{mode}_config.pdf") diff --git a/adaptation_n2n/plot_convergence.py b/adaptation_n2n/plot_convergence.py new file mode 100644 index 0000000..d8e2255 --- /dev/null +++ b/adaptation_n2n/plot_convergence.py @@ -0,0 +1,179 @@ +""" +Plot QoI convergence curves under uniform refinement, +goal-oriented mesh adaptation and data-driven mesh +adaptation, for a given ``test_case`` and ``model``. +""" +from nn_adapt.parse import Parser +from nn_adapt.plotting import * + +import importlib +from matplotlib.ticker import FormatStrFormatter +import numpy as np +import os +import sys + + +# Parse user input +parser = Parser("plot_convergence.py") +parser.parse_tag() +parsed_args = parser.parse_args() +model = parsed_args.model +test_case = parsed_args.test_case +tag = parsed_args.tag + +# Formatting +matplotlib.rcParams["font.size"] = 20 +approaches = { + "uniform": { + "label": "Uniform refinement", + "color": "cornflowerblue", + "marker": "x", + "linestyle": "-", + }, + "GOanisotropic": { + "label": "Goal-oriented adaptation", + "color": "orange", + "marker": "o", + "linestyle": "-", + }, + "MLanisotropic": { + "label": "Data-driven adaptation", + "color": "g", + "marker": "^", + "linestyle": "-", + }, +} +xlim = { + "dofs": [3.0e03, 3.0e06], + "times": [1.0e0, 2.0e03], +} + +# Load configuration +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +qoi_name = setup.parameters.qoi_name.capitalize() + +# Load outputs +dofs, qois, times, niter = {}, {}, {}, {} +for approach in approaches.copy(): + ext = f"_{tag}" if approach[:2] == "ML" else "" + try: + dofs[approach] = np.load(f"{model}/data/dofs_{approach}_{test_case}{ext}.npy") + qois[approach] = np.load(f"{model}/data/qois_{approach}_{test_case}{ext}.npy") + times[approach] = np.load(f"{model}/data/times_all_{approach}_{test_case}{ext}.npy") + niter[approach] = np.load(f"{model}/data/niter_{approach}_{test_case}{ext}.npy") + print(f"Iteration count for {approach}: {niter[approach]}") + except IOError: + print(f"Cannot load {approach} data for test case {test_case}") + approaches.pop(approach) + continue +if len(approaches.keys()) == 0: + print("Nothing to plot.") + sys.exit(0) + +# Drop first iteration because timings include compilation # FIXME: Why? +dofs["uniform"] = dofs["uniform"][1:] +qois["uniform"] = qois["uniform"][1:] +times["uniform"] = times["uniform"][1:] +niter["uniform"] = niter["uniform"][1:] + +# Plot QoI curves against DoF count +fig, axes = plt.subplots() +start = max(np.load(f"{model}/data/qois_uniform_{test_case}.npy")) +conv = np.load(f"{model}/data/qois_uniform_{test_case}.npy")[-1] +axes.hlines(conv, *xlim["dofs"], "k", label="Converged QoI") +for approach, metadata in approaches.items(): + axes.semilogx(dofs[approach], qois[approach], **metadata) +axes.set_xlim(xlim["dofs"]) +if test_case in ["aligned", "offset"]: + axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) +axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) +axes.set_xlabel("DoF count") +axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + r"}$)") +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_vs_dofs_{test_case}_{tag}.pdf") + +# Plot QoI curves against CPU time +fig, axes = plt.subplots() +axes.hlines(conv, *xlim["times"], "k", label="Converged QoI") +for approach, metadata in approaches.items(): + axes.semilogx(times[approach], qois[approach], **metadata) + for n, t, q in zip(niter[approach], times[approach], qois[approach]): + axes.annotate(str(n), (1.1 * t, q), color=metadata["color"], fontsize=14) +axes.set_xlim(xlim["times"]) +if test_case in ["aligned", "offset"]: + axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) +axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) +axes.set_xlabel(r"CPU time ($\mathrm{s}$)") +axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + "}$)") +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_vs_cputime_{test_case}_{tag}.pdf") +plt.close() + +# Plot CPU time curves against DoF count +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + axes.loglog(dofs[approach], times[approach], **metadata) + for n, t, d in zip(niter[approach], times[approach], dofs[approach]): + axes.annotate(str(n), (1.1 * d, t), color=metadata["color"], fontsize=14) +axes.set_xlabel("DoF count") +axes.set_ylabel(r"CPU time ($\mathrm{s}$)") +axes.set_xlim(xlim["dofs"]) +axes.set_ylim(xlim["times"]) +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/cputime_vs_dofs_{test_case}_{tag}.pdf") +plt.close() + +qois["uniform"] = qois["uniform"][:-1] +dofs["uniform"] = dofs["uniform"][:-1] +times["uniform"] = times["uniform"][:-1] + +# Plot QoI error curves against DoF count +errors = {} +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + errors[approach] = np.abs((qois[approach] - conv) / conv) + x, y = dofs[approach], errors[approach] + a, b = np.polyfit(np.log(x), np.log(y), 1) + print(f"QoI error vs. DoFs {approach}: gradient {a:.2f}") + axes.scatter(x, y, **metadata) + axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) +axes.set_xlabel("DoF count") +axes.set_ylabel(r"QoI error ($\%$)") +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_error_vs_dofs_{test_case}_{tag}.pdf") +plt.close() + +# Plot legend +fname = f"{model}/plots/legend.pdf" +if not os.path.exists(fname): + fig2, axes2 = plt.subplots() + lines, labels = axes.get_legend_handles_labels() + legend = axes2.legend(lines, labels, frameon=False, ncol=3) + fig2.canvas.draw() + axes2.set_axis_off() + bbox = legend.get_window_extent().transformed(fig2.dpi_scale_trans.inverted()) + plt.savefig(fname, bbox_inches=bbox) + +# Plot QoI error curves against CPU time +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + x, y = times[approach], errors[approach] + if approach == "uniform": + a, b = np.polyfit(np.log(x), np.log(y), 1) + print(f"QoI error vs. time {approach}: gradient {a:.2f}") + axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) + axes.scatter(x, y, **metadata) + for n, t, e in zip(niter[approach], x, errors[approach]): + axes.annotate(str(n), (1.1 * t, e), color=metadata["color"], fontsize=14) +axes.set_xlabel(r"CPU time ($\mathrm{s}$)") +axes.set_ylabel(r"QoI error ($\%$)") +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_error_vs_cputime_{test_case}_{tag}.pdf") +plt.close() diff --git a/adaptation_n2n/plot_importance.py b/adaptation_n2n/plot_importance.py new file mode 100644 index 0000000..596d80f --- /dev/null +++ b/adaptation_n2n/plot_importance.py @@ -0,0 +1,75 @@ +""" +Plot the sensitivities of a network trained on a +particular ``model`` to its input parameters. +""" +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import git +import importlib +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_importance.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "num_training_cases", + help="The number of training cases", + type=positive_int, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=None, +) +parsed_args = parser.parse_args() +model = parsed_args.model +tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha + +# Separate sensitivity information by variable +data = np.load(f"{model}/data/sensitivities_{tag}.npy") +layout = importlib.import_module(f"{model}.network").NetLayout() +p = importlib.import_module(f"{model}.plotting") +sensitivities = p.process_sensitivities(data, layout) + +# Plot increases as a stacked bar chart +colours = ("b", "C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "0.3") +deriv = ("", "_x", "_y", "_{xx}", "_{xy}", "_{yy}") +N = len(sensitivities.keys()) +bottom = np.zeros(N) +fig, axes = plt.subplots(figsize=(1.5 * N, 4)) +for i, colour in enumerate(colours): + arr = np.array([S[i] for S in sensitivities.values()]) + label = r"$f%s(\mathbf x_K)$" % deriv[i] + axes.bar(sensitivities.keys(), arr, bottom=bottom, color=colour, label=label) + bottom += arr +xlim = axes.get_xlim() +axes.set_xlabel("Input parameters") +axes.set_ylabel("Network sensitivity") +axes.legend(ncol=2) +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/importance_{tag}.pdf") diff --git a/adaptation_n2n/plot_progress.py b/adaptation_n2n/plot_progress.py new file mode 100644 index 0000000..09482a3 --- /dev/null +++ b/adaptation_n2n/plot_progress.py @@ -0,0 +1,48 @@ +""" +Plot the training and validation loss curves for a network +trained on a particular ``model``. +""" +from nn_adapt.parse import argparse +from nn_adapt.plotting import * + +import git +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_progress.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine", "burgers"], +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=None, +) +parsed_args = parser.parse_args() +model = parsed_args.model +tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha + +# Load data +train_losses = np.load(f"{model}/data/train_losses_{tag}.npy") +validation_losses = np.load(f"{model}/data/validation_losses_{tag}.npy") +epochs = np.arange(len(train_losses)) + 1 + +# Plot losses +fig, axes = plt.subplots() +kw = dict(linewidth=0.5) +axes.loglog(epochs, train_losses, label="Training", color="deepskyblue", **kw) +axes.loglog(epochs, validation_losses, label="Validation", color="darkgreen", **kw) +axes.set_xlabel("Number of epochs") +axes.set_ylabel("Average loss") +axes.legend() +axes.grid(True) +axes.set_xlim([1, epochs[-1]]) +plt.tight_layout() +plt.savefig(f"{model}/plots/losses_{tag}.pdf") diff --git a/adaptation_n2n/plot_timings.py b/adaptation_n2n/plot_timings.py new file mode 100644 index 0000000..d482a6e --- /dev/null +++ b/adaptation_n2n/plot_timings.py @@ -0,0 +1,72 @@ +from nn_adapt.parse import Parser, nonnegative_int +from nn_adapt.plotting import * +import numpy as np + + +def get_times(model, approach, case, it, tag=None): + """ + Gather the timing data for some approach applied + to a given test case. + + :arg model: the PDE being solved + :arg approach: the mesh adaptation approach + :arg case: the test case name or number + :arg it: the run + :kwarg tag: the tag for the network + """ + ext = f"_{tag}" if approach[:2] == "ML" else "" + qoi = np.load(f"{model}/data/qois_{approach}_{case}{ext}.npy")[it] + conv = np.load(f"{model}/data/qois_uniform_{case}.npy")[-1] + print(f"{approach} QoI error: {abs((qoi-conv)/conv)*100:.3f} %") + split = { + "Forward solve": np.load(f"{model}/data/times_forward_{approach}_{case}{ext}.npy")[it], + "Adjoint solve": np.load(f"{model}/data/times_adjoint_{approach}_{case}{ext}.npy")[it], + "Error estimation": np.load(f"{model}/data/times_estimator_{approach}_{case}{ext}.npy")[it], + "Metric construction": np.load(f"{model}/data/times_metric_{approach}_{case}{ext}.npy")[it], + "Mesh adaptation": np.load(f"{model}/data/times_adapt_{approach}_{case}{ext}.npy")[it], + } + total = sum(split.values()) + for key, value in split.items(): + print(f"{approach} {key}: {value/total*100:.3f} %") + niter = np.load(f"{model}/data/niter_{approach}_{case}{ext}.npy")[it] + print(f"niter = {niter}") + return split + + +# Parse user input +parser = Parser(prog="plot_timings.py") +parser.parse_tag() +parser.add_argument( + "--iter", + help="Iteration", + type=nonnegative_int, + default=21, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +tag = parsed_args.tag +it = parsed_args.iter +approaches = ["GOanisotropic", "MLanisotropic"] + +# Plot bar chart +fig, axes = plt.subplots(figsize=(6, 4.5)) +colours = ["C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "darkgreen", "0.3"] +data = { + "Goal-oriented": get_times(model, "GOanisotropic", test_case, it, tag=tag), + "Data-driven": get_times(model, "MLanisotropic", test_case, it, tag=tag), +} +bottom = np.zeros(len(data.keys())) +for i, key in enumerate(data["Goal-oriented"].keys()): + arr = np.array([d[key] for d in data.values()]) + axes.bar(data.keys(), arr, bottom=bottom, label=key, color=colours[i]) + bottom += arr +axes.bar_label(axes.containers[-1]) +axes.legend(loc="upper right") +axes.set_ylabel("Runtime [seconds]") +plt.tight_layout() +plt.savefig(f"{model}/plots/timings_{test_case}_{it}_{tag}.pdf") diff --git a/adaptation_n2n/run_adapt.py b/adaptation_n2n/run_adapt.py new file mode 100644 index 0000000..9055cf3 --- /dev/null +++ b/adaptation_n2n/run_adapt.py @@ -0,0 +1,153 @@ +""" +Run a given ``test_case`` of a ``model`` using goal-oriented +mesh adaptation in a fixed point iteration loop. + +This is the script where feature data is harvested to train +the neural network on. +""" +from nn_adapt.features import * +from nn_adapt.metric_one2n import * +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from nn_adapt.solving_one2n import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt +from firedrake.petsc import PETSc + +import importlib +import numpy as np +from time import perf_counter +import matplotlib.pyplot as plt + + +set_log_level(ERROR) + +# Parse for test case and number of refinements +parser = Parser("run_adapt.py") +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_target_complexity() +parser.add_argument("--no_outputs", help="Turn off file outputs", action="store_true") +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +base_complexity = parsed_args.base_complexity +target_complexity = parsed_args.target_complexity +optimise = parsed_args.optimise +no_outputs = parsed_args.no_outputs or optimise +if not no_outputs: + from pyroteus.utility import File + +# Setup +start_time = perf_counter() +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + +# Run adaptation loop +kwargs = { + "interpolant": "Clement", + "enrichment_method": "h", + "average": True, + "anisotropic": approach == "anisotropic", + "retall": True, + "h_min": setup.parameters.h_min, + "h_max": setup.parameters.h_max, + "a_max": 1.0e5, +} +ct = ConvergenceTracker(mesh, parsed_args) +tt_steps = setup.parameters.tt_steps +if not no_outputs: + output_dir = f"{model}/outputs/{test_case}/GO/{approach}" + fwd_file = [File(f"{output_dir}/forward{step}.pvd") for step in range(tt_steps)] + adj_file = [File(f"{output_dir}/adjoint{step}.pvd") for step in range(tt_steps)] + ee_file = File(f"{output_dir}/estimator.pvd") + metric_file = File(f"{output_dir}/metric.pvd") + mesh_file = File(f"{output_dir}/mesh.pvd") + mesh_file.write(mesh.coordinates) +print(f"Test case {test_case}") +print(" Mesh 0") +print(f" Element count = {ct.elements_old}") +data_dir = f"{model}/data" +for ct.fp_iteration in range(ct.maxiter + 1): + suffix = f"{test_case}_GO{approach}_{ct.fp_iteration}" + + # Ramp up the target complexity + kwargs["target_complexity"] = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Compute goal-oriented metric + out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + if "adjoint" not in out: + break + estimator = out["estimator"] + print(f" Error estimator = {estimator}") + if "metric" not in out: + break + adj_sol, dwr, metric = out["adjoint"], out["dwr"], out["metric"] + + # fig, axes = plt.subplots(1,2) + # tricontourf(fwd_sol, axes=axes[0]) + # tricontourf(adj_sol, axes=axes[1]) + # plt.savefig("out.jpg") + + if not no_outputs: + for step in range(tt_steps): + fwd_file[step].write(*fwd_sol[step].split()) + adj_file[step].write(*adj_sol[step].split()) + ee_file.write(dwr) + metric_file.write(metric.function) + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol[-1]) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol[-1].split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # # Extract features + # if not optimise: + # features = extract_features(setup, fwd_sol, adj_sol) + # target = dwr.dat.data.flatten() + # assert not np.isnan(target).any() + # for key, value in features.items(): + # np.save(f"{data_dir}/feature_{key}_{suffix}", value) + # np.save(f"{data_dir}/target_{suffix}", target) + + # Adapt the mesh and check for element count convergence + with PETSc.Log.Event("Mesh adaptation"): + mesh = adapt(mesh, metric) + if not no_outputs: + mesh_file.write(mesh.coordinates) + elements = mesh.num_cells() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element count = {elements}") + if ct.check_elements(elements): + break + ct.check_maxiter() +print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") +print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_adapt_ml.py b/adaptation_n2n/run_adapt_ml.py new file mode 100644 index 0000000..9c79d29 --- /dev/null +++ b/adaptation_n2n/run_adapt_ml.py @@ -0,0 +1,167 @@ +""" +Run a given ``test_case`` of a ``model`` using data-driven +mesh adaptation in a fixed point iteration loop. +""" +from nn_adapt.ann import * +from nn_adapt.features import * +from nn_adapt.parse import Parser +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import * + +import importlib +from time import perf_counter + + +# Parse user input +parser = Parser("run_adapt_ml.py") +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_preproc() +parser.parse_target_complexity() +parser.parse_tag() +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +base_complexity = parsed_args.base_complexity +target_complexity = parsed_args.target_complexity +preproc = parsed_args.preproc +optimise = parsed_args.optimise +tag = parsed_args.tag +if not optimise: + from pyroteus.utility import File + +# Setup +start_time = perf_counter() +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() + +# Run adaptation loop +ct = ConvergenceTracker(mesh, parsed_args) +if not optimise: + output_dir = f"{model}/outputs/{test_case}/ML/{approach}/{tag}" + fwd_file = File(f"{output_dir}/forward.pvd") + adj_file = File(f"{output_dir}/adjoint.pvd") + ee_file = File(f"{output_dir}/estimator.pvd") + metric_file = File(f"{output_dir}/metric.pvd") + mesh_file = File(f"{output_dir}/mesh.pvd") + mesh_file.write(mesh.coordinates) +kwargs = {} +print(f"Test case {test_case}") +print(" Mesh 0") +print(f" Element count = {ct.elements_old}") +for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + target_ramp = ramp_complexity(base_complexity, target_complexity, ct.fp_iteration) + + # Solve forward and adjoint and compute Hessians + out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + if "adjoint" not in out: + break + adj_sol = out["adjoint"] + if not optimise: + fwd_file.write(*fwd_sol.split()) + adj_file.write(*adj_sol.split()) + P0 = FunctionSpace(mesh, "DG", 0) + P1_ten = TensorFunctionSpace(mesh, "CG", 1) + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Extract features + with PETSc.Log.Event("Network"): + features = collect_features(extract_features(setup, fwd_sol, adj_sol), layout) + + # Run model + with PETSc.Log.Event("Propagate"): + test_targets = np.array([]) + with torch.no_grad(): + for i in range(features.shape[0]): + test_x = torch.Tensor(features[i]).to(device) + test_prediction = nn(test_x) + test_targets = np.concatenate( + (test_targets, np.array(test_prediction.cpu())) + ) + dwr = Function(P0) + dwr.dat.data[:] = np.abs(test_targets) + + # Check for error estimator convergence + with PETSc.Log.Event("Error estimation"): + estimator = dwr.vector().gather().sum() + print(f" Error estimator = {estimator}") + if ct.check_estimator(estimator): + break + if not optimise: + ee_file.write(dwr) + + # Construct metric + with PETSc.Log.Event("Metric construction"): + if approach == "anisotropic": + hessian = combine_metrics(*get_hessians(fwd_sol), average=True) + else: + hessian = None + M = anisotropic_metric( + dwr, + hessian=hessian, + target_complexity=target_ramp, + target_space=P1_ten, + interpolant="Clement", + ) + space_normalise(M, target_ramp, "inf") + enforce_element_constraints( + M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 + ) + metric = RiemannianMetric(mesh) + metric.assign(M) + if not optimise: + metric_file.write(M) + + # Adapt the mesh and check for element count convergence + with PETSc.Log.Event("Mesh adaptation"): + mesh = adapt(mesh, metric) + if not optimise: + mesh_file.write(mesh.coordinates) + elements = mesh.num_cells() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element count = {elements}") + if ct.check_elements(elements): + break + ct.check_maxiter() +print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") +print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_adaptation_loop.py b/adaptation_n2n/run_adaptation_loop.py new file mode 100644 index 0000000..126616a --- /dev/null +++ b/adaptation_n2n/run_adaptation_loop.py @@ -0,0 +1,154 @@ +""" +Run a given ``test_case`` of a ``model`` using goal-oriented +mesh adaptation in a fixed point iteration loop, for a sequence +of increasing target metric complexities, +""" +from nn_adapt.features import * +from nn_adapt.parse import Parser, positive_float +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt + +import importlib +import numpy as np +from time import perf_counter + + +set_log_level(ERROR) + +# Parse user input +parser = Parser("run_adaptation_loop.py") +parser.parse_num_refinements(default=24) +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_target_complexity() +parser.add_argument( + "--factor", + help="Power by which to increase target metric complexity", + type=positive_float, + default=0.25, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +num_refinements = parsed_args.num_refinements +base_complexity = parsed_args.base_complexity +f = parsed_args.factor + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit + +# Run adaptation loop +qois, dofs, elements, estimators, niter = [], [], [], [], [] +components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = {c: [] for c in components} +times["all"] = [] +print(f"Test case {test_case}") +for i in range(num_refinements + 1): + try: + target_complexity = 100.0 * 2 ** (f * i) + kwargs = { + "enrichment_method": "h", + "interpolant": "Clement", + "average": True, + "anisotropic": approach == "anisotropic", + "retall": True, + "h_min": setup.parameters.h_min, + "h_max": setup.parameters.h_max, + "a_max": 1.0e5, + } + if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh + else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + ct = ConvergenceTracker(mesh, parsed_args) + print(f" Target {target_complexity}\n Mesh 0") + print(f" Element count = {ct.elements_old}") + times["all"].append(-perf_counter()) + for c in components: + times[c].append(0.0) + for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + kwargs["target_complexity"] = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Compute goal-oriented metric + out = go_metric(mesh, setup, convergence_checker=ct, **kwargs) + qoi = out["qoi"] + times["forward"][-1] += out["times"]["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + if "adjoint" not in out: + break + estimator = out["estimator"] + times["adjoint"][-1] += out["times"]["adjoint"] + times["estimator"][-1] += out["times"]["estimator"] + print(f" Error estimator = {estimator}") + if "metric" not in out: + break + times["metric"][-1] += out["times"]["metric"] + fwd_sol, adj_sol = ( + out["forward"], + out["adjoint"], + ) + dwr, metric = out["dwr"], out["metric"] + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Adapt the mesh + out["times"]["adapt"] = -perf_counter() + mesh = adapt(mesh, metric) + out["times"]["adapt"] += perf_counter() + times["adapt"][-1] += out["times"]["adapt"] + print(f" Mesh {ct.fp_iteration+1}") + cells = mesh.num_cells() + print(f" Element count = {cells}") + if ct.check_elements(cells): + break + ct.check_maxiter() + print( + f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" + ) + times["all"][-1] += perf_counter() + qois.append(qoi) + dofs.append(dof) + elements.append(cells) + estimators.append(estimator) + niter.append(ct.fp_iteration + 1) + np.save(f"{model}/data/qois_GO{approach}_{test_case}", qois) + np.save(f"{model}/data/dofs_GO{approach}_{test_case}", dofs) + np.save(f"{model}/data/elements_GO{approach}_{test_case}", elements) + np.save(f"{model}/data/estimators_GO{approach}_{test_case}", estimators) + np.save(f"{model}/data/niter_GO{approach}_{test_case}", niter) + np.save(f"{model}/data/times_all_GO{approach}_{test_case}", times["all"]) + for c in components: + np.save(f"{model}/data/times_{c}_GO{approach}_{test_case}", times[c]) + except ConvergenceError: + print("Skipping due to convergence error") + continue diff --git a/adaptation_n2n/run_adaptation_loop_ml.py b/adaptation_n2n/run_adaptation_loop_ml.py new file mode 100644 index 0000000..018329b --- /dev/null +++ b/adaptation_n2n/run_adaptation_loop_ml.py @@ -0,0 +1,195 @@ +""" +Run a given ``test_case`` of a ``model`` using data-driven +mesh adaptation in a fixed point iteration loop, for a sequence +of increasing target metric complexities, +""" +from nn_adapt.ann import * +from nn_adapt.features import * +from nn_adapt.parse import Parser, positive_float +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import * + +import importlib +import numpy as np +from time import perf_counter + + +set_log_level(ERROR) + +# Parse user input +parser = Parser("run_adaptation_loop_ml.py") +parser.parse_num_refinements(default=24) +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_preproc() +parser.parse_tag() +parser.parse_target_complexity() +parser.add_argument( + "--factor", + help="Power by which to increase target metric complexity", + type=positive_float, + default=0.25, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +num_refinements = parsed_args.num_refinements +preproc = parsed_args.preproc +tag = parsed_args.tag +base_complexity = parsed_args.base_complexity +f = parsed_args.factor + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() + +# Run adaptation loop +qois, dofs, elements, estimators, niter = [], [], [], [], [] +components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = {c: [] for c in components} +times["all"] = [] +print(f"Test case {test_case}") +for i in range(num_refinements + 1): + try: + target_complexity = 100.0 * 2 ** (f * i) + if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh + else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + ct = ConvergenceTracker(mesh, parsed_args) + kwargs = {} + print(f" Target {target_complexity}\n Mesh 0") + print(f" Element count = {ct.elements_old}") + times["all"].append(-perf_counter()) + for c in components: + times[c].append(0.0) + for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + target_ramp = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Solve forward and adjoint and compute Hessians + out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + qoi = out["qoi"] + times["forward"][-1] += out["times"]["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + if "adjoint" not in out: + break + times["adjoint"][-1] += out["times"]["adjoint"] + fwd_sol, adj_sol = out["forward"], out["adjoint"] + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Extract features + out["times"]["estimator"] = -perf_counter() + features = extract_features(setup, fwd_sol, adj_sol) + features = collect_features(features, layout) + + # Run model + test_targets = np.array([]) + with torch.no_grad(): + for i in range(features.shape[0]): + test_x = torch.Tensor(features[i]).to(device) + test_prediction = nn(test_x) + test_targets = np.concatenate( + (test_targets, np.array(test_prediction.cpu())) + ) + P0 = FunctionSpace(mesh, "DG", 0) + dwr = Function(P0) + dwr.dat.data[:] = np.abs(test_targets) + + # Check for error estimator convergence + estimator = dwr.vector().gather().sum() + out["times"]["estimator"] += perf_counter() + times["estimator"][-1] += out["times"]["estimator"] + print(f" Error estimator = {estimator}") + if ct.check_estimator(estimator): + break + + # Construct metric + out["times"]["metric"] = -perf_counter() + if approach == "anisotropic": + hessian = combine_metrics(*get_hessians(fwd_sol), average=True) + else: + hessian = None + P1_ten = TensorFunctionSpace(mesh, "CG", 1) + M = anisotropic_metric( + dwr, + hessian=hessian, + target_complexity=target_ramp, + target_space=P1_ten, + interpolant="Clement", + ) + space_normalise(M, target_ramp, "inf") + enforce_element_constraints( + M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 + ) + metric = RiemannianMetric(mesh) + metric.assign(M) + out["times"]["metric"] += perf_counter() + times["metric"][-1] += out["times"]["metric"] + + # Adapt the mesh and check for element count convergence + out["times"]["adapt"] = -perf_counter() + mesh = adapt(mesh, metric) + out["times"]["adapt"] += perf_counter() + times["adapt"][-1] += out["times"]["adapt"] + print(f" Mesh {ct.fp_iteration+1}") + cells = mesh.num_cells() + print(f" Element count = {cells}") + if ct.check_elements(cells): + break + ct.check_maxiter() + print( + f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" + ) + times["all"][-1] += perf_counter() + qois.append(qoi) + dofs.append(dof) + elements.append(cells) + estimators.append(estimator) + niter.append(ct.fp_iteration + 1) + np.save(f"{model}/data/qois_ML{approach}_{test_case}_{tag}", qois) + np.save(f"{model}/data/dofs_ML{approach}_{test_case}_{tag}", dofs) + np.save(f"{model}/data/elements_ML{approach}_{test_case}_{tag}", elements) + np.save(f"{model}/data/estimators_ML{approach}_{test_case}_{tag}", estimators) + np.save(f"{model}/data/niter_ML{approach}_{test_case}_{tag}", niter) + np.save(f"{model}/data/times_all_ML{approach}_{test_case}_{tag}", times["all"]) + for c in components: + np.save(f"{model}/data/times_{c}_ML{approach}_{test_case}_{tag}", times[c]) + except ConvergenceError: + print("Skipping due to convergence error") + continue diff --git a/adaptation_n2n/run_fixed_mesh.py b/adaptation_n2n/run_fixed_mesh.py new file mode 100644 index 0000000..f8ad698 --- /dev/null +++ b/adaptation_n2n/run_fixed_mesh.py @@ -0,0 +1,48 @@ +""" +Run a given ``test_case`` of a ``model`` on the initial mesh alone. +""" +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from thetis import print_output +from firedrake.petsc import PETSc +import importlib +from time import perf_counter + + +start_time = perf_counter() + +# Parse user input +parser = Parser("run_fixed_mesh.py") +parser.parse_num_refinements(default=0) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") +if parsed_args.num_refinements > 0: + with PETSc.Log.Event("Hierarchy"): + mesh = MeshHierarchy(mesh, parsed_args.num_refinements)[-1] + +# Solve and evaluate QoI +out = get_solutions(mesh, setup, solve_adjoint=not parsed_args.optimise) +qoi = out["qoi"] +print_output(f"QoI for test case {test_case} = {qoi:.8f} {unit}") +if not parsed_args.optimise: + File(f"{model}/outputs/{test_case}/fixed/forward.pvd").write( + *out["forward"].split() + ) + File(f"{model}/outputs/{test_case}/fixed/adjoint.pvd").write( + *out["adjoint"].split() + ) +print_output(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_uniform_refinement.py b/adaptation_n2n/run_uniform_refinement.py new file mode 100644 index 0000000..be3b3a8 --- /dev/null +++ b/adaptation_n2n/run_uniform_refinement.py @@ -0,0 +1,76 @@ +""" +Run a given ``test_case`` of a ``model`` on a sequence of +uniformly refined meshes generated from the initial mesh. +""" +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from thetis import print_output +import importlib +import numpy as np +from time import perf_counter + + +start_time = perf_counter() + +# Parse user input +parser = Parser("run_uniform_refinement.py") +parser.parse_num_refinements(default=3) +parser.add_argument( + "--prolong", help="Use previous solution as initial guess", action="store_true" +) +parsed_args = parser.parse_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +num_refinements = parsed_args.num_refinements + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +mesh = Mesh(f"{model}/meshes/{test_case}.msh") +mh = [mesh] + list(MeshHierarchy(mesh, num_refinements)) +tm = TransferManager() +kwargs = {} + +# Run uniform refinement +qois, dofs, elements, times, niter = [], [], [], [], [] +setup_time = perf_counter() - start_time +print_output(f"Test case {test_case}") +print_output(f"Setup time: {setup_time:.2f} seconds") +for i, mesh in enumerate(mh): + start_time = perf_counter() + print_output(f" Mesh {i}") + print_output(f" Element count = {mesh.num_cells()}") + out = get_solutions(mesh, setup, solve_adjoint=False, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + + def prolong(V): + """ + After the first iteration, prolong the previous + solution as the initial guess. + """ + ic = Function(V) + tm.prolong(fwd_sol, ic) + return ic + + if parsed_args.prolong: + kwargs["init"] = prolong + fs = fwd_sol.function_space() + time = perf_counter() - start_time + print_output(f" Quantity of Interest = {qoi} {unit}") + print_output(f" Runtime: {time:.2f} seconds") + qois.append(qoi) + dofs.append(sum(fs.dof_count)) + times.append(time) + elements.append(mesh.num_cells()) + niter.append(1) + np.save(f"{model}/data/qois_uniform_{test_case}", qois) + np.save(f"{model}/data/dofs_uniform_{test_case}", dofs) + np.save(f"{model}/data/elements_uniform_{test_case}", elements) + np.save(f"{model}/data/times_all_uniform_{test_case}", times) + np.save(f"{model}/data/niter_uniform_{test_case}", niter) +print_output(f"Setup time: {setup_time:.2f} seconds") diff --git a/adaptation_n2n/test_and_train.py b/adaptation_n2n/test_and_train.py new file mode 100644 index 0000000..e3cfd9b --- /dev/null +++ b/adaptation_n2n/test_and_train.py @@ -0,0 +1,266 @@ +""" +Train a network on ``num_training_cases`` problem +specifications of a given ``model``. +""" +from nn_adapt.ann import * +from nn_adapt.parse import argparse, bounded_float, nonnegative_int, positive_float, positive_int + +import git +import importlib +import numpy as np +import os +from sklearn import model_selection +from time import perf_counter +import torch.optim.lr_scheduler as lr_scheduler + + +# Configuration +pwd = os.path.abspath(os.path.dirname(__file__)) +models = [ + name for name in os.listdir(pwd) + if os.path.isdir(name) and name not in ("__pycache__", "models") +] +parser = argparse.ArgumentParser( + prog="test_and_train.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "-m", + "--model", + help="The equation set being solved", + type=str, + choices=models, + default="steady_turbine", +) +parser.add_argument( + "-n", + "--num_training_cases", + help="The number of test cases to train on", + type=positive_int, + default=100, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--lr", + help="Initial learning rate", + type=positive_float, + default=1.0e-03, +) +parser.add_argument( + "--lr_adapt_num_steps", + help="Frequency of learning rate adaptation", + type=nonnegative_int, + default=0, +) +parser.add_argument( + "--lr_adapt_factor", + help="Learning rate reduction factor", + type=bounded_float(0, 1), + default=0.99, +) +parser.add_argument( + "--lr_adapt_threshold", + help="Learning rate threshold", + type=bounded_float(0, 1), + default=1.0e-04, +) +parser.add_argument( + "--lr_adapt_patience", + help="The number of iterations before early adapting the learning rate", + type=positive_int, + default=np.inf, +) +parser.add_argument( + "--num_epochs", + help="The number of iterations", + type=positive_int, + default=2000, +) +parser.add_argument( + "--stopping_patience", + help="The number of iterations before early stopping", + type=positive_int, + default=np.inf, +) +parser.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", +) +parser.add_argument( + "--batch_size", + help="Data points per training iteration", + type=positive_int, + default=500, +) +parser.add_argument( + "--test_batch_size", + help="Data points per validation iteration", + type=positive_int, + default=500, +) +parser.add_argument( + "--test_size", + help="Data proportion for validation", + type=bounded_float(0, 1), + default=0.3, +) +parser.add_argument( + "--seed", + help="Seed for random number generator", + type=positive_int, + default=42, +) +parser.add_argument( + "--tag", + help="Tag for labelling the model (defaults to current git sha)", + type=str, + default=git.Repo(search_parent_directories=True).head.object.hexsha, +) +parsed_args = parser.parse_args() +model = parsed_args.model +approaches = parsed_args.approaches +preproc = parsed_args.preproc +num_epochs = parsed_args.num_epochs +lr = parsed_args.lr +lr_adapt_num_steps = parsed_args.lr_adapt_num_steps +lr_adapt_factor = parsed_args.lr_adapt_factor +lr_adapt_threshold = parsed_args.lr_adapt_threshold +lr_adapt_patience = parsed_args.lr_adapt_patience +stopping_patience = parsed_args.stopping_patience +test_size = parsed_args.test_size +batch_size = parsed_args.batch_size +test_batch_size = parsed_args.test_batch_size +seed = parsed_args.seed +tag = parsed_args.tag + +# Load network layout +layout = importlib.import_module(f"{model}.network").NetLayout() + +# Setup model +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +optimizer = torch.optim.Adam(nn.parameters(), lr=lr) +scheduler1 = lr_scheduler.ReduceLROnPlateau( + optimizer, + factor=lr_adapt_factor, + threshold=lr_adapt_threshold, + patience=lr_adapt_patience, + verbose=True, +) +if lr_adapt_num_steps > 0: + scheduler2 = lr_scheduler.StepLR( + optimizer, + lr_adapt_num_steps, + gamma=lr_adapt_factor + ) +else: + scheduler2 = None +criterion = Loss() + +# Increase batch size if running on GPU +cuda = all(p.is_cuda for p in nn.parameters()) +print(f"Model parameters are{'' if cuda else ' not'} using GPU cores.") +if cuda: + dtype = torch.float32 + batch_size *= 4 + test_batch_size *= 4 +else: + dtype = torch.float + +# Load data +concat = lambda a, b: b if a is None else np.concatenate((a, b), axis=0) +features = None +targets = None +data_dir = f"{model}/data" +for step in range(parsed_args.adaptation_steps): + for approach in approaches: + for case in range(1, parsed_args.num_training_cases + 1): + if case == 1 and approach != approaches[0]: + continue + suffix = f"{case}_GO{approach}_{step}" + feature = { + key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") + for key in layout.inputs + } + features = concat(features, collect_features(feature)) + target = np.load(f"{data_dir}/target_{suffix}.npy") + targets = concat(targets, target) +print(f"Total number of features: {len(features.flatten())}") +print(f"Total number of targets: {len(targets)}") +features = torch.from_numpy(features).type(dtype) +targets = torch.from_numpy(targets).type(dtype) + +# Get train and validation datasets +xtrain, xval, ytrain, yval = model_selection.train_test_split( + features, targets, test_size=test_size, random_state=seed +) +train_data = torch.utils.data.TensorDataset(torch.Tensor(xtrain), torch.Tensor(ytrain)) +train_loader = torch.utils.data.DataLoader( + train_data, batch_size=batch_size, shuffle=True, num_workers=0 +) +validate_data = torch.utils.data.TensorDataset(torch.Tensor(xval), torch.Tensor(yval)) +validate_loader = torch.utils.data.DataLoader( + validate_data, batch_size=test_batch_size, shuffle=False, num_workers=0 +) + +# Train +train_losses, validation_losses, lr_adapt_steps = [], [], [] +set_seed(seed) +previous_loss = np.inf +trigger_times = 0 +for epoch in range(num_epochs): + + # Training step + start_time = perf_counter() + train = propagate(train_loader, nn, criterion, optimizer) + mid_time = perf_counter() + train_time = mid_time - start_time + + # Validation step + val = propagate(validate_loader, nn, criterion) + validation_time = perf_counter() - mid_time + + # Adapt learning rate + scheduler1.step(val) + if scheduler2 is not None: + scheduler2.step() + if epoch % lr_adapt_num_steps == 0: + lr_adapt_steps.append(epoch) + np.save(f"{model}/data/lr_adapt_steps_{tag}", lr_adapt_steps) + + # Stash progress + print( + f"Epoch {epoch:4d}/{num_epochs:d}" + f" avg loss: {train:.4e} / {val:.4e}" + f" wallclock: {train_time:.2f}s / {validation_time:.2f}s" + ) + train_losses.append(train) + validation_losses.append(val) + np.save(f"{model}/data/train_losses_{tag}", train_losses) + np.save(f"{model}/data/validation_losses_{tag}", validation_losses) + torch.save(nn.state_dict(), f"{model}/model_{tag}.pt") + + # Test for convergence + if val > previous_loss: + trigger_times += 1 + if trigger_times >= stopping_patience: + print("Early stopping") + break + else: + trigger_times = 0 + previous_loss = val diff --git a/adaptation_one2n/.DS_Store b/adaptation_one2n/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5acd31712950cf5014acdc43701c916eb3dbb5d2 GIT binary patch literal 6148 zcmeHKy-ou$47Q<&lUO=3<`wz|p}MXtT^Ue01jC)urF-5D-l;2}e?Xjwgv5XfvZds6 zVmm+I>5`a;czqaFL`x#-Py|_&hDdkQbmq<}|xx?H4prp@AN$ zwf;}msr3DBEIY(>-Q~;i?)LR&9zT`g8;||-A7kh=zgw&-7zhS}fneb04B*Zd>8=c; z4hDjOVBmuRIUf>=V0NsAdUT-D6aXmCXcg#^OGr#|%#PI%76@A?&_dZ$47PCeC-=*a z)zHF;J^5gt`8|1IogL#Rbtle-Q3nITz?^||YcA#f-{6;NCi(M_Xaxhoz&~Sv7yY(h zV^e;&e%YSfwF%`OMMV6nC=lqgM*tdfj$EtJ>`8R^WyfkLqe#Do1LGl}ghUk#`~m}S DgX1Y# literal 0 HcmV?d00001 diff --git a/adaptation_one2n/a_test.py b/adaptation_one2n/a_test.py new file mode 100644 index 0000000..f3281b2 --- /dev/null +++ b/adaptation_one2n/a_test.py @@ -0,0 +1,36 @@ +from nn_adapt.features import * +from nn_adapt.features import extract_array +from nn_adapt.metric import * +from nn_adapt.parse import Parser +from nn_adapt.solving_one2n import * +from nn_adapt.solving_n2n import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt +from firedrake.petsc import PETSc + +import importlib +import numpy as np + +tt_steps = 10 + +# setup1 = importlib.import_module(f"burgers_n2n.config") +# meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] +# out1 = indicate_errors_n2n(meshes=meshes, config=setup1) +# print(out1) + +# mesh = UnitSquareMesh(20, 20) +# setup2 = importlib.import_module(f"burgers_one2n.config") +# out2 = indicate_errors_one2n(mesh=mesh, config=setup2) +# print(out2) + +# mesh = UnitSquareMesh(20, 20) +# setup2 = importlib.import_module(f"burgers_one2n.config") +# out2 = get_solutions_one2n(mesh=mesh, config=setup2) +# test_array = time_integrate(out2["forward"]) +# print(extract_array(test_array, centroid=True)) + +a = None +b = 1 +print(a or b) + diff --git a/adaptation_one2n/burgers_one2n/config.py b/adaptation_one2n/burgers_one2n/config.py new file mode 100644 index 0000000..344f728 --- /dev/null +++ b/adaptation_one2n/burgers_one2n/config.py @@ -0,0 +1,160 @@ +from models.burgers_one2n import * +from nn_adapt.ann import sample_uniform +import numpy as np + + +testing_cases = ["demo"] + + +def initialise(case, discrete=False): + """ + Given some training case (for which ``case`` + is an integer) or testing case (for which + ``case`` is a string), set up the physical + problems defining the Burgers problem. + + For training data, these values are chosen + randomly. + """ + parameters.case = case + parameters.discrete = discrete + if isinstance(case, int): + parameters.turbine_coords = [] + np.random.seed(100 * case) + + # Random initial speed from 0.01 m/s to 6 m/s + parameters.initial_speed = sample_uniform(0.01, 6.0) + + # Random viscosity from 0.00001 m^2/s to 1 m^2/s + parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + return + elif "demo" in case: + parameters.viscosity_coefficient = 0.0001 + parameters.initial_speed = 1.0 + else: + raise ValueError(f"Test case {test_case} not recognised") + + if "reversed" in case: + parameters.initial_speed *= -1 + + +# def l2dist(xy, xyt): +# r""" +# Usual :math:`\ell_2` distance between +# two points in Euclidean space. +# """ +# diff = np.array(xy) - np.array(xyt) +# return np.sqrt(np.dot(diff, diff)) + + +# def initialise(case, discrete=False): +# """ +# Given some training case (for which ``case`` +# is an integer) or testing case (for which +# ``case`` is a string), set up the physical +# problems and turbine locations defining the +# tidal farm modelling problem. + +# For training data, these values are chosen +# randomly. +# """ +# parameters.case = case +# parameters.discrete = discrete +# if isinstance(case, int): +# parameters.turbine_coords = [] +# np.random.seed(100 * case) + +# # Random depth from 20m to 100m +# parameters.depth = sample_uniform(20.0, 100.0) + +# # Random inflow speed from 0.5 m/s to 6 m/s +# parameters.inflow_speed = sample_uniform(0.5, 6.0) + +# # Random viscosity from 0.1 m^2/s to 1 m^2/s +# parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) + +# # Randomise turbine configuration such that all +# # turbines are at least 50m from the domain +# # boundaries and each other +# num_turbines = np.random.randint(1, 8) +# tc = parameters.turbine_coords +# i = 0 +# while i < num_turbines: +# x = 50.0 + 1100.0 * np.random.rand() +# y = 50.0 + 400.0 * np.random.rand() +# valid = True +# for xyt in tc: +# if l2dist((x, y), xyt) < 50.0: +# valid = False +# if valid: +# tc.append((x, y)) +# i += 1 +# return +# elif "aligned" in case: +# parameters.viscosity_coefficient = 0.5 +# parameters.depth = 40.0 +# parameters.inflow_speed = 5.0 +# parameters.turbine_coords = [(456, 250), (744, 250)] +# elif "offset" in case: +# parameters.viscosity_coefficient = 0.5 +# parameters.depth = 40.0 +# parameters.inflow_speed = 5.0 +# parameters.turbine_coords = [(456, 232), (744, 268)] +# elif "trench" in case: +# bmin, bmax = Constant(160.0), Constant(200.0) +# w = Constant(500.0) + +# def bathy(mesh): +# y = SpatialCoordinate(mesh)[1] / w +# P0 = FunctionSpace(mesh, "DG", 0) +# b = Function(P0) +# b.interpolate(bmin + (bmax - bmin) * y * (1 - y)) +# return b + +# parameters.viscosity_coefficient = 2.0 +# parameters.bathymetry = bathy +# parameters.inflow_speed = 10.0 +# parameters.turbine_coords = [(456, 232), (744, 268)] +# elif "headland" in case: +# parameters.viscosity_coefficient = 100.0 +# parameters.depth = 40.0 +# parameters.inflow_speed = 5.0 +# parameters.turbine_diameter = 80.0 +# parameters.turbine_width = 100.0 +# parameters.turbine_coords = [(600, 250)] +# parameters.correct_thrust = False +# parameters.solver_parameters = { +# "mat_type": "aij", +# "snes_type": "newtonls", +# "snes_linesearch_type": "bt", +# "snes_rtol": 1.0e-08, +# "snes_max_it": 100, +# "snes_monitor": None, +# "ksp_type": "preonly", +# "ksp_converged_reason": None, +# "pc_type": "lu", +# "pc_factor_mat_solver_type": "mumps", +# } +# elif "pipe" in case: +# u_in = Constant(5.0) +# parameters.inflow_speed = u_in +# w = Constant(200.0) + +# def inflow(mesh): +# y = SpatialCoordinate(mesh)[1] / w +# yy = ((y - 0.5) / 0.5) ** 2 +# u_expr = conditional(yy < 1, exp(1 - 1 / (1 - yy)), 0) +# return as_vector([u_expr, 0]) + +# parameters.viscosity_coefficient = 20.0 +# parameters.depth = 40.0 +# parameters.u_inflow = inflow +# parameters.ic = lambda mesh: as_vector([u_in, 0.0]) +# parameters.turbine_coords = [(550, 300), (620, 390)] +# parameters.qoi_unit = "kW" +# parameters.density = Constant(1030.0 * 1.0e-03) +# else: +# raise ValueError(f"Test case {test_case} not recognised") + +# if "reversed" in case: +# parameters.inflow_speed *= -1 diff --git a/examples/burgers_one2n/meshgen.py b/adaptation_one2n/burgers_one2n/meshgen.py similarity index 100% rename from examples/burgers_one2n/meshgen.py rename to adaptation_one2n/burgers_one2n/meshgen.py diff --git a/examples/burgers_one2n/network.py b/adaptation_one2n/burgers_one2n/network.py similarity index 100% rename from examples/burgers_one2n/network.py rename to adaptation_one2n/burgers_one2n/network.py diff --git a/examples/burgers_one2n/testing_cases.txt b/adaptation_one2n/burgers_one2n/testing_cases.txt similarity index 100% rename from examples/burgers_one2n/testing_cases.txt rename to adaptation_one2n/burgers_one2n/testing_cases.txt diff --git a/adaptation_one2n/compute_importance.py b/adaptation_one2n/compute_importance.py new file mode 100644 index 0000000..3e2bf4c --- /dev/null +++ b/adaptation_one2n/compute_importance.py @@ -0,0 +1,102 @@ +""" +Compute the sensitivities of a network trained on a +particular ``model`` to its input parameters. +""" +from nn_adapt.ann import * +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import git +import importlib +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="compute_importance.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "num_training_cases", + help="The number of training cases", + type=positive_int, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=git.Repo(search_parent_directories=True).head.object.hexsha, +) +parsed_args = parser.parse_args() +model = parsed_args.model +preproc = parsed_args.preproc +tag = parsed_args.tag + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() +loss_fn = Loss() + +# Compute (averaged) sensitivities of the network to the inputs +dJdm = torch.zeros(layout.num_inputs) +data_dir = f"{model}/data" +approaches = parsed_args.approaches +values = np.zeros((0, layout.num_inputs)) +for step in range(parsed_args.adaptation_steps): + for approach in approaches: + for test_case in range(1, parsed_args.num_training_cases + 1): + if test_case == 1 and approach != approaches[0]: + continue + suffix = f"{test_case}_GO{approach}_{step}" + + # Load some data and mark inputs as independent + data = { + key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") + for key in layout.inputs + } + features = collect_features(data, layout) + values = np.vstack((values, features)) + features = torch.from_numpy(features).type(torch.float32) + features.requires_grad_(True) + + # Run the model and sum the outputs + out = nn(features).sum(axis=0) + + # Backpropagate to get the gradient of the outputs w.r.t. the inputs + out.backward() + dJdm += features.grad.mean(axis=0) + +# Compute representative values for each parameter +dm = np.abs(np.mean(values, axis=0)) + +# Multiply by the variability +sensitivity = dJdm.abs().detach().numpy() * dm +np.save(f"{model}/data/sensitivities_{tag}.npy", sensitivity) diff --git a/adaptation_one2n/makefile b/adaptation_one2n/makefile new file mode 100644 index 0000000..d26d52a --- /dev/null +++ b/adaptation_one2n/makefile @@ -0,0 +1,319 @@ +all: setup network test + +# --- Configurable parameters + +APPROACHES = anisotropic +MODEL = burgers_one2n +NUM_TRAINING_CASES = 1 +TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) +PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 +TAG = all + +# --- Parameters that should not need modifying + +TRAINING_CASES = $(shell seq 1 ${NUM_TRAINING_CASES}) +CASES = ${TRAINING_CASES} ${TESTING_CASES} + +# --- Setup directories and meshes + +setup: dir mesh plot_config + +# Create the directory structure +# ============================== +# +# $(MODEL) +#    ├── data +#    ├── outputs +# │    └── $(TESTING_CASES) +#    └── plots +dir: + mkdir -p $(MODEL)/data + mkdir -p $(MODEL)/outputs + mkdir -p $(MODEL)/plots + for case in $(TESTING_CASES); do \ + mkdir -p $(MODEL)/outputs/$$case; \ + done + +# Generate meshes +# =============== +# +# Meshes are generated for all training and testing cases. +# * First, a gmsh geometry file is generated using the +# `meshgen.py` script. The definitions of these cases +# are based on the contents of $(MODEL)/config.py. +# For the `turbine` case, the training data is generated +# randomly. +# * Then the geometry files are used to construct meshes +# in the .msh format. +# +# Gmsh is set to use the "pack" algorithm, which means that +# the initial meshes are quasi-uniform. That is, they are as +# close to uniform as they can be, given that the turbines +# are to be explicitly meshed. +mesh: + touch timing.log + d=$$(date +%s) && \ + for case in $(CASES); do \ + python3 meshgen.py $(MODEL) $$case; \ + if [ -e $(MODEL)/meshes/$$case.geo ]; then \ + gmsh -2 -algo pack $(MODEL)/meshes/$$case.geo -o $(MODEL)/meshes/$$case.msh; \ + fi; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Meshes built in $$(($$(date +%s)-d)) seconds" >> timing.log + +# Plot configurations +# =================== +# +# Plot the configurations for a subset of the training cases +# and the testing cases that are listed in $(MODEL)/config.py. +# The domain geometry and turbine locations are shown, along +# with the physical parameters used. +plot_config: + python3 plot_config.py $(MODEL) 'train' + python3 plot_config.py $(MODEL) 'test' + +# Clean the model directory +# ========================= +# +# Delete all logs, data, outputs, plots and compiled code associated +# with the model. Note that this is a very destructive thing to do! +clean: + rm -rf timing.log + rm -rf $(MODEL)/data + rm -rf $(MODEL)/outputs + rm -rf $(MODEL)/plots + rm -rf $(MODEL)/__pycache__ + +# --- Construct the neural network + +network: features train plot_progress plot_importance + +# Generate feature data +# ===================== +# +# This involves applying mesh adaptation to all of the cases in the +# training data. In each case, feature data and "target" error indicator +# data are extracted and saved to file. +features: + touch timing.log + d=$$(date +%s) && \ + for case in $(TRAINING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt.py $(MODEL) $$case -a $$approach --no_outputs $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Features generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Train the network +# ================= +# +# Train a neural network based on the feature and target data that has +# been saved to file, for a specified number of training cases. The +# network is tagged (using the environment variable $(TAG)) to distinguish +# the model and its outputs. +train: + touch timing.log + d=$$(date +%s) && \ + python3 test_and_train.py -m $(MODEL) -n $(NUM_TRAINING_CASES) --tag $(TAG) && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Training completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ + echo "" >> timing.log + +# Plot loss functions +# =================== +# +# Once the network has been trained, plot the training and validation loss +# curves against iteration count. +plot_progress: + python3 plot_progress.py $(MODEL) --tag $(TAG) + +# Feature importance experiment +# ============================= +# +# Perform an experiment that tests how sensitive the trained network is to +# each of its inputs (i.e. the features). If it is particularly sensitive to +# one of the features then we deduce that the feature is in some sense +# "important" to the network. +plot_importance: + python3 compute_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) + python3 plot_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) + +# --- Test the neural network + +test: snapshot_go snapshot_ml uniform go ml plot_convergence + +# Apply goal-oriented adaptation to the test cases +# ================================================ +# +# Apply goal-oriented mesh adaptation to the testing cases, thereby +# generating lots of output data in Paraview format. These include +# the meshes, solution fields, error indicators and metrics. +snapshot_go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Apply data-driven adaptation to the test cases +# ============================================== +# +# Apply data-driven adaptation based on the trained network to the testing +# cases, thereby generating lots of output data in Paraview format. These +# include the meshes, solution fields, error indicators and metrics. +snapshot_ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adapt_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for uniform refinement +# =========================================== +# +# Run the model on a sequence of uniformly refined meshes. +uniform: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_uniform_refinement.py $(MODEL) $$case; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Uniform refinement completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for goal-oriented adaptation +# ================================================= +# +# Run the model with the standard goal-oriented approach for +# a range of target metric complexities. +go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adaptation_loop.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Convergence analysis for data-driven adaptation +# =============================================== +# +# Run the model with the data-driven approach based on the +# trained network for a range of target metric complexities. +ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 run_adaptation_loop_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ + done; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + +# Plot convergence curves +# ======================= +# +# Plot the data points generated during the `uniform`, `go` and +# `ml` recipes and annotate with lines of best fit, where appropriate. +plot_convergence: + for case in $(TESTING_CASES); do \ + python3 plot_convergence.py $(MODEL) $$case --tag $(TAG); \ + done + +# --- Profiling experiments + +# NOTE: The following recipes are somewhat redundant. Similar information +# can be obtained from the outputs of the `uniform`, `go` and `ml` +# recipes by running `plot_timings.py` with the appropriate input +# parameters. + +# Profiling for uniform refinement +# ================================ +# +# Run the model on a fine fixed mesh generated by refining the initial +# mesh four times and output the PETSc logging information in a format +# that can be then turned into a flamegraph. +profile_uni: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_fixed_mesh.py $(MODEL) $$case --optimise --num_refinements 4 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Uniform refinement profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Uniform refinement ($$case)" logview.txt > $(MODEL)/outputs/$$case/uni.svg && \ + rm logview.txt; \ + done + +# Profiling for goal-oriented adaptation +# ====================================== +# +# Run the model using the standard goal-oriented approach with a fairly +# high target metric complexity of 64,000 and output the PETSc logging +# information in a format that can be then turned into a flamegraph. +profile_go: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_adapt.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Goal-oriented adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Goal-oriented adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/go.svg && \ + rm logview.txt; \ + done + +# Profiling for data-driven adaptation +# ==================================== +# +# Run the model using the data-driven adaptation approach based on the +# trained network with a fairly high target metric complexity of 64,000 +# and output the PETSc logging information in a format that can be then +# turned into a flamegraph. +profile_ml: + touch timing.log + d=$$(date +%s) && \ + for case in $(TESTING_CASES); do \ + python3 run_adapt_ml.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) --tag all -log_view :logview.txt:ascii_flamegraph; \ + done && \ + date >> timing.log && \ + git log -n 1 --oneline >> timing.log && \ + echo "Data-driven adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log + echo "" >> timing.log + for case in $(TESTING_CASES); do \ + flamegraph.pl --title "Data-driven adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/ml.svg && \ + rm logview.txt; \ + done diff --git a/adaptation_one2n/meshgen.py b/adaptation_one2n/meshgen.py new file mode 100644 index 0000000..1067e7c --- /dev/null +++ b/adaptation_one2n/meshgen.py @@ -0,0 +1,34 @@ +""" +Generate the mesh for configuration ``case`` +of a given ``model``. +""" +import argparse +import importlib +import sys + + +# Parse for test case +parser = argparse.ArgumentParser(prog="meshgen.py") +parser.add_argument("model", help="The model") +parser.add_argument("case", help="The configuration file name") +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +reverse = False +try: + case = int(parsed_args.case) + assert case > 0 +except ValueError: + case = parsed_args.case + reverse = "reversed" in case + +# Load setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(case) +meshgen = importlib.import_module(f"{model}.meshgen") + +# Write geometry file +code = meshgen.generate_geo(setup, reverse=reverse) +if code is None: + sys.exit(0) +with open(f"{model}/meshes/{case}.geo", "w+") as meshfile: + meshfile.write(code) diff --git a/examples/models/burgers_one2n.py b/adaptation_one2n/models/burgers_one2n.py similarity index 100% rename from examples/models/burgers_one2n.py rename to adaptation_one2n/models/burgers_one2n.py diff --git a/adaptation_one2n/plot_config.py b/adaptation_one2n/plot_config.py new file mode 100644 index 0000000..159b4d7 --- /dev/null +++ b/adaptation_one2n/plot_config.py @@ -0,0 +1,65 @@ +""" +Plot the problem configurations for a given ``model``. +The ``mode`` is chosen from 'train' and 'test'. +""" +from firedrake import Mesh +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import importlib + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_config.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "mode", + help="Training or testing?", + type=str, + choices=["train", "test"], +) +parser.add_argument( + "--num_cols", + help="Number of columns in the plot", + type=positive_int, + default=4, +) +parser.add_argument( + "--num_rows", + help="Number of rows in the plot", + type=positive_int, + default=4, +) +parsed_args = parser.parse_args() +model = parsed_args.model +mode = parsed_args.mode +setup = importlib.import_module(f"{model}.config") +cases = setup.testing_cases +ncols = parsed_args.num_cols +if mode == "test": + ncols = len(cases) +nrows = parsed_args.num_rows +if mode == "test": + nrows = 1 +N = ncols * nrows +if mode == "train": + cases = range(1, N + 1) +p = importlib.import_module(f"{model}.plotting") + +# Plot all configurations +fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(3 * ncols, 1.5 * nrows)) +for i, case in enumerate(cases): + ax = axes[i] if nrows == 1 else axes[i // ncols, i % nrows] + setup.initialise(case, discrete=True) + mesh = Mesh(f"{model}/meshes/{case}.msh") + p.plot_config(setup, mesh, ax) +plt.tight_layout() +plt.savefig(f"{model}/plots/{mode}_config.pdf") diff --git a/adaptation_one2n/plot_convergence.py b/adaptation_one2n/plot_convergence.py new file mode 100644 index 0000000..d8e2255 --- /dev/null +++ b/adaptation_one2n/plot_convergence.py @@ -0,0 +1,179 @@ +""" +Plot QoI convergence curves under uniform refinement, +goal-oriented mesh adaptation and data-driven mesh +adaptation, for a given ``test_case`` and ``model``. +""" +from nn_adapt.parse import Parser +from nn_adapt.plotting import * + +import importlib +from matplotlib.ticker import FormatStrFormatter +import numpy as np +import os +import sys + + +# Parse user input +parser = Parser("plot_convergence.py") +parser.parse_tag() +parsed_args = parser.parse_args() +model = parsed_args.model +test_case = parsed_args.test_case +tag = parsed_args.tag + +# Formatting +matplotlib.rcParams["font.size"] = 20 +approaches = { + "uniform": { + "label": "Uniform refinement", + "color": "cornflowerblue", + "marker": "x", + "linestyle": "-", + }, + "GOanisotropic": { + "label": "Goal-oriented adaptation", + "color": "orange", + "marker": "o", + "linestyle": "-", + }, + "MLanisotropic": { + "label": "Data-driven adaptation", + "color": "g", + "marker": "^", + "linestyle": "-", + }, +} +xlim = { + "dofs": [3.0e03, 3.0e06], + "times": [1.0e0, 2.0e03], +} + +# Load configuration +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +qoi_name = setup.parameters.qoi_name.capitalize() + +# Load outputs +dofs, qois, times, niter = {}, {}, {}, {} +for approach in approaches.copy(): + ext = f"_{tag}" if approach[:2] == "ML" else "" + try: + dofs[approach] = np.load(f"{model}/data/dofs_{approach}_{test_case}{ext}.npy") + qois[approach] = np.load(f"{model}/data/qois_{approach}_{test_case}{ext}.npy") + times[approach] = np.load(f"{model}/data/times_all_{approach}_{test_case}{ext}.npy") + niter[approach] = np.load(f"{model}/data/niter_{approach}_{test_case}{ext}.npy") + print(f"Iteration count for {approach}: {niter[approach]}") + except IOError: + print(f"Cannot load {approach} data for test case {test_case}") + approaches.pop(approach) + continue +if len(approaches.keys()) == 0: + print("Nothing to plot.") + sys.exit(0) + +# Drop first iteration because timings include compilation # FIXME: Why? +dofs["uniform"] = dofs["uniform"][1:] +qois["uniform"] = qois["uniform"][1:] +times["uniform"] = times["uniform"][1:] +niter["uniform"] = niter["uniform"][1:] + +# Plot QoI curves against DoF count +fig, axes = plt.subplots() +start = max(np.load(f"{model}/data/qois_uniform_{test_case}.npy")) +conv = np.load(f"{model}/data/qois_uniform_{test_case}.npy")[-1] +axes.hlines(conv, *xlim["dofs"], "k", label="Converged QoI") +for approach, metadata in approaches.items(): + axes.semilogx(dofs[approach], qois[approach], **metadata) +axes.set_xlim(xlim["dofs"]) +if test_case in ["aligned", "offset"]: + axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) +axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) +axes.set_xlabel("DoF count") +axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + r"}$)") +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_vs_dofs_{test_case}_{tag}.pdf") + +# Plot QoI curves against CPU time +fig, axes = plt.subplots() +axes.hlines(conv, *xlim["times"], "k", label="Converged QoI") +for approach, metadata in approaches.items(): + axes.semilogx(times[approach], qois[approach], **metadata) + for n, t, q in zip(niter[approach], times[approach], qois[approach]): + axes.annotate(str(n), (1.1 * t, q), color=metadata["color"], fontsize=14) +axes.set_xlim(xlim["times"]) +if test_case in ["aligned", "offset"]: + axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) +axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) +axes.set_xlabel(r"CPU time ($\mathrm{s}$)") +axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + "}$)") +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_vs_cputime_{test_case}_{tag}.pdf") +plt.close() + +# Plot CPU time curves against DoF count +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + axes.loglog(dofs[approach], times[approach], **metadata) + for n, t, d in zip(niter[approach], times[approach], dofs[approach]): + axes.annotate(str(n), (1.1 * d, t), color=metadata["color"], fontsize=14) +axes.set_xlabel("DoF count") +axes.set_ylabel(r"CPU time ($\mathrm{s}$)") +axes.set_xlim(xlim["dofs"]) +axes.set_ylim(xlim["times"]) +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/cputime_vs_dofs_{test_case}_{tag}.pdf") +plt.close() + +qois["uniform"] = qois["uniform"][:-1] +dofs["uniform"] = dofs["uniform"][:-1] +times["uniform"] = times["uniform"][:-1] + +# Plot QoI error curves against DoF count +errors = {} +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + errors[approach] = np.abs((qois[approach] - conv) / conv) + x, y = dofs[approach], errors[approach] + a, b = np.polyfit(np.log(x), np.log(y), 1) + print(f"QoI error vs. DoFs {approach}: gradient {a:.2f}") + axes.scatter(x, y, **metadata) + axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) +axes.set_xlabel("DoF count") +axes.set_ylabel(r"QoI error ($\%$)") +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_error_vs_dofs_{test_case}_{tag}.pdf") +plt.close() + +# Plot legend +fname = f"{model}/plots/legend.pdf" +if not os.path.exists(fname): + fig2, axes2 = plt.subplots() + lines, labels = axes.get_legend_handles_labels() + legend = axes2.legend(lines, labels, frameon=False, ncol=3) + fig2.canvas.draw() + axes2.set_axis_off() + bbox = legend.get_window_extent().transformed(fig2.dpi_scale_trans.inverted()) + plt.savefig(fname, bbox_inches=bbox) + +# Plot QoI error curves against CPU time +fig, axes = plt.subplots() +for approach, metadata in approaches.items(): + x, y = times[approach], errors[approach] + if approach == "uniform": + a, b = np.polyfit(np.log(x), np.log(y), 1) + print(f"QoI error vs. time {approach}: gradient {a:.2f}") + axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) + axes.scatter(x, y, **metadata) + for n, t, e in zip(niter[approach], x, errors[approach]): + axes.annotate(str(n), (1.1 * t, e), color=metadata["color"], fontsize=14) +axes.set_xlabel(r"CPU time ($\mathrm{s}$)") +axes.set_ylabel(r"QoI error ($\%$)") +axes.grid(True, which="both") +plt.tight_layout() +plt.savefig(f"{model}/plots/qoi_error_vs_cputime_{test_case}_{tag}.pdf") +plt.close() diff --git a/adaptation_one2n/plot_importance.py b/adaptation_one2n/plot_importance.py new file mode 100644 index 0000000..596d80f --- /dev/null +++ b/adaptation_one2n/plot_importance.py @@ -0,0 +1,75 @@ +""" +Plot the sensitivities of a network trained on a +particular ``model`` to its input parameters. +""" +from nn_adapt.parse import argparse, positive_int +from nn_adapt.plotting import * + +import git +import importlib +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_importance.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine"], +) +parser.add_argument( + "num_training_cases", + help="The number of training cases", + type=positive_int, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=None, +) +parsed_args = parser.parse_args() +model = parsed_args.model +tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha + +# Separate sensitivity information by variable +data = np.load(f"{model}/data/sensitivities_{tag}.npy") +layout = importlib.import_module(f"{model}.network").NetLayout() +p = importlib.import_module(f"{model}.plotting") +sensitivities = p.process_sensitivities(data, layout) + +# Plot increases as a stacked bar chart +colours = ("b", "C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "0.3") +deriv = ("", "_x", "_y", "_{xx}", "_{xy}", "_{yy}") +N = len(sensitivities.keys()) +bottom = np.zeros(N) +fig, axes = plt.subplots(figsize=(1.5 * N, 4)) +for i, colour in enumerate(colours): + arr = np.array([S[i] for S in sensitivities.values()]) + label = r"$f%s(\mathbf x_K)$" % deriv[i] + axes.bar(sensitivities.keys(), arr, bottom=bottom, color=colour, label=label) + bottom += arr +xlim = axes.get_xlim() +axes.set_xlabel("Input parameters") +axes.set_ylabel("Network sensitivity") +axes.legend(ncol=2) +axes.grid(True) +plt.tight_layout() +plt.savefig(f"{model}/plots/importance_{tag}.pdf") diff --git a/adaptation_one2n/plot_progress.py b/adaptation_one2n/plot_progress.py new file mode 100644 index 0000000..09482a3 --- /dev/null +++ b/adaptation_one2n/plot_progress.py @@ -0,0 +1,48 @@ +""" +Plot the training and validation loss curves for a network +trained on a particular ``model``. +""" +from nn_adapt.parse import argparse +from nn_adapt.plotting import * + +import git +import numpy as np + + +# Parse model +parser = argparse.ArgumentParser( + prog="plot_progress.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "model", + help="The model", + type=str, + choices=["steady_turbine", "burgers"], +) +parser.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=None, +) +parsed_args = parser.parse_args() +model = parsed_args.model +tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha + +# Load data +train_losses = np.load(f"{model}/data/train_losses_{tag}.npy") +validation_losses = np.load(f"{model}/data/validation_losses_{tag}.npy") +epochs = np.arange(len(train_losses)) + 1 + +# Plot losses +fig, axes = plt.subplots() +kw = dict(linewidth=0.5) +axes.loglog(epochs, train_losses, label="Training", color="deepskyblue", **kw) +axes.loglog(epochs, validation_losses, label="Validation", color="darkgreen", **kw) +axes.set_xlabel("Number of epochs") +axes.set_ylabel("Average loss") +axes.legend() +axes.grid(True) +axes.set_xlim([1, epochs[-1]]) +plt.tight_layout() +plt.savefig(f"{model}/plots/losses_{tag}.pdf") diff --git a/adaptation_one2n/plot_timings.py b/adaptation_one2n/plot_timings.py new file mode 100644 index 0000000..d482a6e --- /dev/null +++ b/adaptation_one2n/plot_timings.py @@ -0,0 +1,72 @@ +from nn_adapt.parse import Parser, nonnegative_int +from nn_adapt.plotting import * +import numpy as np + + +def get_times(model, approach, case, it, tag=None): + """ + Gather the timing data for some approach applied + to a given test case. + + :arg model: the PDE being solved + :arg approach: the mesh adaptation approach + :arg case: the test case name or number + :arg it: the run + :kwarg tag: the tag for the network + """ + ext = f"_{tag}" if approach[:2] == "ML" else "" + qoi = np.load(f"{model}/data/qois_{approach}_{case}{ext}.npy")[it] + conv = np.load(f"{model}/data/qois_uniform_{case}.npy")[-1] + print(f"{approach} QoI error: {abs((qoi-conv)/conv)*100:.3f} %") + split = { + "Forward solve": np.load(f"{model}/data/times_forward_{approach}_{case}{ext}.npy")[it], + "Adjoint solve": np.load(f"{model}/data/times_adjoint_{approach}_{case}{ext}.npy")[it], + "Error estimation": np.load(f"{model}/data/times_estimator_{approach}_{case}{ext}.npy")[it], + "Metric construction": np.load(f"{model}/data/times_metric_{approach}_{case}{ext}.npy")[it], + "Mesh adaptation": np.load(f"{model}/data/times_adapt_{approach}_{case}{ext}.npy")[it], + } + total = sum(split.values()) + for key, value in split.items(): + print(f"{approach} {key}: {value/total*100:.3f} %") + niter = np.load(f"{model}/data/niter_{approach}_{case}{ext}.npy")[it] + print(f"niter = {niter}") + return split + + +# Parse user input +parser = Parser(prog="plot_timings.py") +parser.parse_tag() +parser.add_argument( + "--iter", + help="Iteration", + type=nonnegative_int, + default=21, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +tag = parsed_args.tag +it = parsed_args.iter +approaches = ["GOanisotropic", "MLanisotropic"] + +# Plot bar chart +fig, axes = plt.subplots(figsize=(6, 4.5)) +colours = ["C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "darkgreen", "0.3"] +data = { + "Goal-oriented": get_times(model, "GOanisotropic", test_case, it, tag=tag), + "Data-driven": get_times(model, "MLanisotropic", test_case, it, tag=tag), +} +bottom = np.zeros(len(data.keys())) +for i, key in enumerate(data["Goal-oriented"].keys()): + arr = np.array([d[key] for d in data.values()]) + axes.bar(data.keys(), arr, bottom=bottom, label=key, color=colours[i]) + bottom += arr +axes.bar_label(axes.containers[-1]) +axes.legend(loc="upper right") +axes.set_ylabel("Runtime [seconds]") +plt.tight_layout() +plt.savefig(f"{model}/plots/timings_{test_case}_{it}_{tag}.pdf") diff --git a/adaptation_one2n/run_adapt.py b/adaptation_one2n/run_adapt.py new file mode 100644 index 0000000..9f04f8b --- /dev/null +++ b/adaptation_one2n/run_adapt.py @@ -0,0 +1,149 @@ +""" +Run a given ``test_case`` of a ``model`` using goal-oriented +mesh adaptation in a fixed point iteration loop. + +This is the script where feature data is harvested to train +the neural network on. +""" +from nn_adapt.features import * +from nn_adapt.metric_one2n import * +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from nn_adapt.solving_one2n import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt +from firedrake.petsc import PETSc + +import importlib +import numpy as np +from time import perf_counter +import matplotlib.pyplot as plt + + +set_log_level(ERROR) + +# Parse for test case and number of refinements +parser = Parser("run_adapt.py") +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_target_complexity() +parser.add_argument("--no_outputs", help="Turn off file outputs", action="store_true") +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +base_complexity = parsed_args.base_complexity +target_complexity = parsed_args.target_complexity +optimise = parsed_args.optimise +no_outputs = parsed_args.no_outputs or optimise +if not no_outputs: + from pyroteus.utility import File + +# Setup +start_time = perf_counter() +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + +# Run adaptation loop +kwargs = { + "interpolant": "Clement", + "enrichment_method": "h", + "average": True, + "anisotropic": approach == "anisotropic", + "retall": True, + "h_min": setup.parameters.h_min, + "h_max": setup.parameters.h_max, + "a_max": 1.0e5, +} +ct = ConvergenceTracker(mesh, parsed_args) +tt_steps = setup.parameters.tt_steps +if not no_outputs: + output_dir = f"{model}/outputs/{test_case}/GO/{approach}" + fwd_file = [File(f"{output_dir}/forward{step}.pvd") for step in range(tt_steps)] + adj_file = [File(f"{output_dir}/adjoint{step}.pvd") for step in range(tt_steps)] + ee_file = File(f"{output_dir}/estimator.pvd") + metric_file = File(f"{output_dir}/metric.pvd") + mesh_file = File(f"{output_dir}/mesh.pvd") + mesh_file.write(mesh.coordinates) +print(f"Test case {test_case}") +print(" Mesh 0") +print(f" Element count = {ct.elements_old}") +data_dir = f"{model}/data" +for ct.fp_iteration in range(ct.maxiter + 1): + suffix = f"{test_case}_GO{approach}_{ct.fp_iteration}" + + # Ramp up the target complexity + kwargs["target_complexity"] = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Compute goal-oriented metric + out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + if "adjoint" not in out: + break + estimator = out["estimator"] + print(f" Error estimator = {estimator}") + if "metric" not in out: + break + adj_sol, dwr, metric = out["adjoint"], out["dwr"], out["metric"] + if not no_outputs: + for step in range(tt_steps): + fwd_file[step].write(*fwd_sol[step].split()) + adj_file[step].write(*adj_sol[step].split()) + ee_file.write(dwr) + metric_file.write(metric.function) + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol[-1]) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol[-1].split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Extract features + if not optimise: + fwd_sol_integrate = time_integrate(fwd_sol) + adj_sol_integrate = time_integrate(adj_sol) + features = extract_features(setup, fwd_sol_integrate, adj_sol_integrate) + target = dwr.dat.data.flatten() + assert not np.isnan(target).any() + for key, value in features.items(): + np.save(f"{data_dir}/feature_{key}_{suffix}", value) + np.save(f"{data_dir}/target_{suffix}", target) + + # Adapt the mesh and check for element count convergence + with PETSc.Log.Event("Mesh adaptation"): + mesh = adapt(mesh, metric) + if not no_outputs: + mesh_file.write(mesh.coordinates) + elements = mesh.num_cells() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element count = {elements}") + if ct.check_elements(elements): + break + ct.check_maxiter() +print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") +print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_adapt_ml.py b/adaptation_one2n/run_adapt_ml.py new file mode 100644 index 0000000..9c79d29 --- /dev/null +++ b/adaptation_one2n/run_adapt_ml.py @@ -0,0 +1,167 @@ +""" +Run a given ``test_case`` of a ``model`` using data-driven +mesh adaptation in a fixed point iteration loop. +""" +from nn_adapt.ann import * +from nn_adapt.features import * +from nn_adapt.parse import Parser +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import * + +import importlib +from time import perf_counter + + +# Parse user input +parser = Parser("run_adapt_ml.py") +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_preproc() +parser.parse_target_complexity() +parser.parse_tag() +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +base_complexity = parsed_args.base_complexity +target_complexity = parsed_args.target_complexity +preproc = parsed_args.preproc +optimise = parsed_args.optimise +tag = parsed_args.tag +if not optimise: + from pyroteus.utility import File + +# Setup +start_time = perf_counter() +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() + +# Run adaptation loop +ct = ConvergenceTracker(mesh, parsed_args) +if not optimise: + output_dir = f"{model}/outputs/{test_case}/ML/{approach}/{tag}" + fwd_file = File(f"{output_dir}/forward.pvd") + adj_file = File(f"{output_dir}/adjoint.pvd") + ee_file = File(f"{output_dir}/estimator.pvd") + metric_file = File(f"{output_dir}/metric.pvd") + mesh_file = File(f"{output_dir}/mesh.pvd") + mesh_file.write(mesh.coordinates) +kwargs = {} +print(f"Test case {test_case}") +print(" Mesh 0") +print(f" Element count = {ct.elements_old}") +for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + target_ramp = ramp_complexity(base_complexity, target_complexity, ct.fp_iteration) + + # Solve forward and adjoint and compute Hessians + out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + if "adjoint" not in out: + break + adj_sol = out["adjoint"] + if not optimise: + fwd_file.write(*fwd_sol.split()) + adj_file.write(*adj_sol.split()) + P0 = FunctionSpace(mesh, "DG", 0) + P1_ten = TensorFunctionSpace(mesh, "CG", 1) + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Extract features + with PETSc.Log.Event("Network"): + features = collect_features(extract_features(setup, fwd_sol, adj_sol), layout) + + # Run model + with PETSc.Log.Event("Propagate"): + test_targets = np.array([]) + with torch.no_grad(): + for i in range(features.shape[0]): + test_x = torch.Tensor(features[i]).to(device) + test_prediction = nn(test_x) + test_targets = np.concatenate( + (test_targets, np.array(test_prediction.cpu())) + ) + dwr = Function(P0) + dwr.dat.data[:] = np.abs(test_targets) + + # Check for error estimator convergence + with PETSc.Log.Event("Error estimation"): + estimator = dwr.vector().gather().sum() + print(f" Error estimator = {estimator}") + if ct.check_estimator(estimator): + break + if not optimise: + ee_file.write(dwr) + + # Construct metric + with PETSc.Log.Event("Metric construction"): + if approach == "anisotropic": + hessian = combine_metrics(*get_hessians(fwd_sol), average=True) + else: + hessian = None + M = anisotropic_metric( + dwr, + hessian=hessian, + target_complexity=target_ramp, + target_space=P1_ten, + interpolant="Clement", + ) + space_normalise(M, target_ramp, "inf") + enforce_element_constraints( + M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 + ) + metric = RiemannianMetric(mesh) + metric.assign(M) + if not optimise: + metric_file.write(M) + + # Adapt the mesh and check for element count convergence + with PETSc.Log.Event("Mesh adaptation"): + mesh = adapt(mesh, metric) + if not optimise: + mesh_file.write(mesh.coordinates) + elements = mesh.num_cells() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element count = {elements}") + if ct.check_elements(elements): + break + ct.check_maxiter() +print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") +print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_adaptation_loop.py b/adaptation_one2n/run_adaptation_loop.py new file mode 100644 index 0000000..126616a --- /dev/null +++ b/adaptation_one2n/run_adaptation_loop.py @@ -0,0 +1,154 @@ +""" +Run a given ``test_case`` of a ``model`` using goal-oriented +mesh adaptation in a fixed point iteration loop, for a sequence +of increasing target metric complexities, +""" +from nn_adapt.features import * +from nn_adapt.parse import Parser, positive_float +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import adapt + +import importlib +import numpy as np +from time import perf_counter + + +set_log_level(ERROR) + +# Parse user input +parser = Parser("run_adaptation_loop.py") +parser.parse_num_refinements(default=24) +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_target_complexity() +parser.add_argument( + "--factor", + help="Power by which to increase target metric complexity", + type=positive_float, + default=0.25, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +num_refinements = parsed_args.num_refinements +base_complexity = parsed_args.base_complexity +f = parsed_args.factor + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit + +# Run adaptation loop +qois, dofs, elements, estimators, niter = [], [], [], [], [] +components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = {c: [] for c in components} +times["all"] = [] +print(f"Test case {test_case}") +for i in range(num_refinements + 1): + try: + target_complexity = 100.0 * 2 ** (f * i) + kwargs = { + "enrichment_method": "h", + "interpolant": "Clement", + "average": True, + "anisotropic": approach == "anisotropic", + "retall": True, + "h_min": setup.parameters.h_min, + "h_max": setup.parameters.h_max, + "a_max": 1.0e5, + } + if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh + else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + ct = ConvergenceTracker(mesh, parsed_args) + print(f" Target {target_complexity}\n Mesh 0") + print(f" Element count = {ct.elements_old}") + times["all"].append(-perf_counter()) + for c in components: + times[c].append(0.0) + for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + kwargs["target_complexity"] = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Compute goal-oriented metric + out = go_metric(mesh, setup, convergence_checker=ct, **kwargs) + qoi = out["qoi"] + times["forward"][-1] += out["times"]["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + if "adjoint" not in out: + break + estimator = out["estimator"] + times["adjoint"][-1] += out["times"]["adjoint"] + times["estimator"][-1] += out["times"]["estimator"] + print(f" Error estimator = {estimator}") + if "metric" not in out: + break + times["metric"][-1] += out["times"]["metric"] + fwd_sol, adj_sol = ( + out["forward"], + out["adjoint"], + ) + dwr, metric = out["dwr"], out["metric"] + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Adapt the mesh + out["times"]["adapt"] = -perf_counter() + mesh = adapt(mesh, metric) + out["times"]["adapt"] += perf_counter() + times["adapt"][-1] += out["times"]["adapt"] + print(f" Mesh {ct.fp_iteration+1}") + cells = mesh.num_cells() + print(f" Element count = {cells}") + if ct.check_elements(cells): + break + ct.check_maxiter() + print( + f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" + ) + times["all"][-1] += perf_counter() + qois.append(qoi) + dofs.append(dof) + elements.append(cells) + estimators.append(estimator) + niter.append(ct.fp_iteration + 1) + np.save(f"{model}/data/qois_GO{approach}_{test_case}", qois) + np.save(f"{model}/data/dofs_GO{approach}_{test_case}", dofs) + np.save(f"{model}/data/elements_GO{approach}_{test_case}", elements) + np.save(f"{model}/data/estimators_GO{approach}_{test_case}", estimators) + np.save(f"{model}/data/niter_GO{approach}_{test_case}", niter) + np.save(f"{model}/data/times_all_GO{approach}_{test_case}", times["all"]) + for c in components: + np.save(f"{model}/data/times_{c}_GO{approach}_{test_case}", times[c]) + except ConvergenceError: + print("Skipping due to convergence error") + continue diff --git a/adaptation_one2n/run_adaptation_loop_ml.py b/adaptation_one2n/run_adaptation_loop_ml.py new file mode 100644 index 0000000..018329b --- /dev/null +++ b/adaptation_one2n/run_adaptation_loop_ml.py @@ -0,0 +1,195 @@ +""" +Run a given ``test_case`` of a ``model`` using data-driven +mesh adaptation in a fixed point iteration loop, for a sequence +of increasing target metric complexities, +""" +from nn_adapt.ann import * +from nn_adapt.features import * +from nn_adapt.parse import Parser, positive_float +from nn_adapt.metric import * +from nn_adapt.solving import * +from nn_adapt.utility import ConvergenceTracker +from firedrake.meshadapt import * + +import importlib +import numpy as np +from time import perf_counter + + +set_log_level(ERROR) + +# Parse user input +parser = Parser("run_adaptation_loop_ml.py") +parser.parse_num_refinements(default=24) +parser.parse_approach() +parser.parse_convergence_criteria() +parser.parse_preproc() +parser.parse_tag() +parser.parse_target_complexity() +parser.add_argument( + "--factor", + help="Power by which to increase target metric complexity", + type=positive_float, + default=0.25, +) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +approach = parsed_args.approach +num_refinements = parsed_args.num_refinements +preproc = parsed_args.preproc +tag = parsed_args.tag +base_complexity = parsed_args.base_complexity +f = parsed_args.factor + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit + +# Load the model +layout = importlib.import_module(f"{model}.network").NetLayout() +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) +nn.eval() + +# Run adaptation loop +qois, dofs, elements, estimators, niter = [], [], [], [], [] +components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = {c: [] for c in components} +times["all"] = [] +print(f"Test case {test_case}") +for i in range(num_refinements + 1): + try: + target_complexity = 100.0 * 2 ** (f * i) + if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh + else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") + ct = ConvergenceTracker(mesh, parsed_args) + kwargs = {} + print(f" Target {target_complexity}\n Mesh 0") + print(f" Element count = {ct.elements_old}") + times["all"].append(-perf_counter()) + for c in components: + times[c].append(0.0) + for ct.fp_iteration in range(ct.maxiter + 1): + + # Ramp up the target complexity + target_ramp = ramp_complexity( + base_complexity, target_complexity, ct.fp_iteration + ) + + # Solve forward and adjoint and compute Hessians + out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + qoi = out["qoi"] + times["forward"][-1] += out["times"]["forward"] + print(f" Quantity of Interest = {qoi} {unit}") + if "adjoint" not in out: + break + times["adjoint"][-1] += out["times"]["adjoint"] + fwd_sol, adj_sol = out["forward"], out["adjoint"] + dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + print(f" DoF count = {dof}") + + def proj(V): + """ + After the first iteration, project the previous + solution as the initial guess. + """ + ic = Function(V) + try: + ic.project(fwd_sol) + except NotImplementedError: + for c_init, c in zip(ic.split(), fwd_sol.split()): + c_init.project(c) + return ic + + # Use previous solution for initial guess + if parsed_args.transfer: + kwargs["init"] = proj + + # Extract features + out["times"]["estimator"] = -perf_counter() + features = extract_features(setup, fwd_sol, adj_sol) + features = collect_features(features, layout) + + # Run model + test_targets = np.array([]) + with torch.no_grad(): + for i in range(features.shape[0]): + test_x = torch.Tensor(features[i]).to(device) + test_prediction = nn(test_x) + test_targets = np.concatenate( + (test_targets, np.array(test_prediction.cpu())) + ) + P0 = FunctionSpace(mesh, "DG", 0) + dwr = Function(P0) + dwr.dat.data[:] = np.abs(test_targets) + + # Check for error estimator convergence + estimator = dwr.vector().gather().sum() + out["times"]["estimator"] += perf_counter() + times["estimator"][-1] += out["times"]["estimator"] + print(f" Error estimator = {estimator}") + if ct.check_estimator(estimator): + break + + # Construct metric + out["times"]["metric"] = -perf_counter() + if approach == "anisotropic": + hessian = combine_metrics(*get_hessians(fwd_sol), average=True) + else: + hessian = None + P1_ten = TensorFunctionSpace(mesh, "CG", 1) + M = anisotropic_metric( + dwr, + hessian=hessian, + target_complexity=target_ramp, + target_space=P1_ten, + interpolant="Clement", + ) + space_normalise(M, target_ramp, "inf") + enforce_element_constraints( + M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 + ) + metric = RiemannianMetric(mesh) + metric.assign(M) + out["times"]["metric"] += perf_counter() + times["metric"][-1] += out["times"]["metric"] + + # Adapt the mesh and check for element count convergence + out["times"]["adapt"] = -perf_counter() + mesh = adapt(mesh, metric) + out["times"]["adapt"] += perf_counter() + times["adapt"][-1] += out["times"]["adapt"] + print(f" Mesh {ct.fp_iteration+1}") + cells = mesh.num_cells() + print(f" Element count = {cells}") + if ct.check_elements(cells): + break + ct.check_maxiter() + print( + f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" + ) + times["all"][-1] += perf_counter() + qois.append(qoi) + dofs.append(dof) + elements.append(cells) + estimators.append(estimator) + niter.append(ct.fp_iteration + 1) + np.save(f"{model}/data/qois_ML{approach}_{test_case}_{tag}", qois) + np.save(f"{model}/data/dofs_ML{approach}_{test_case}_{tag}", dofs) + np.save(f"{model}/data/elements_ML{approach}_{test_case}_{tag}", elements) + np.save(f"{model}/data/estimators_ML{approach}_{test_case}_{tag}", estimators) + np.save(f"{model}/data/niter_ML{approach}_{test_case}_{tag}", niter) + np.save(f"{model}/data/times_all_ML{approach}_{test_case}_{tag}", times["all"]) + for c in components: + np.save(f"{model}/data/times_{c}_ML{approach}_{test_case}_{tag}", times[c]) + except ConvergenceError: + print("Skipping due to convergence error") + continue diff --git a/adaptation_one2n/run_fixed_mesh.py b/adaptation_one2n/run_fixed_mesh.py new file mode 100644 index 0000000..f8ad698 --- /dev/null +++ b/adaptation_one2n/run_fixed_mesh.py @@ -0,0 +1,48 @@ +""" +Run a given ``test_case`` of a ``model`` on the initial mesh alone. +""" +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from thetis import print_output +from firedrake.petsc import PETSc +import importlib +from time import perf_counter + + +start_time = perf_counter() + +# Parse user input +parser = Parser("run_fixed_mesh.py") +parser.parse_num_refinements(default=0) +parsed_args, unknown_args = parser.parse_known_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") +if parsed_args.num_refinements > 0: + with PETSc.Log.Event("Hierarchy"): + mesh = MeshHierarchy(mesh, parsed_args.num_refinements)[-1] + +# Solve and evaluate QoI +out = get_solutions(mesh, setup, solve_adjoint=not parsed_args.optimise) +qoi = out["qoi"] +print_output(f"QoI for test case {test_case} = {qoi:.8f} {unit}") +if not parsed_args.optimise: + File(f"{model}/outputs/{test_case}/fixed/forward.pvd").write( + *out["forward"].split() + ) + File(f"{model}/outputs/{test_case}/fixed/adjoint.pvd").write( + *out["adjoint"].split() + ) +print_output(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_uniform_refinement.py b/adaptation_one2n/run_uniform_refinement.py new file mode 100644 index 0000000..be3b3a8 --- /dev/null +++ b/adaptation_one2n/run_uniform_refinement.py @@ -0,0 +1,76 @@ +""" +Run a given ``test_case`` of a ``model`` on a sequence of +uniformly refined meshes generated from the initial mesh. +""" +from nn_adapt.parse import Parser +from nn_adapt.solving import * +from thetis import print_output +import importlib +import numpy as np +from time import perf_counter + + +start_time = perf_counter() + +# Parse user input +parser = Parser("run_uniform_refinement.py") +parser.parse_num_refinements(default=3) +parser.add_argument( + "--prolong", help="Use previous solution as initial guess", action="store_true" +) +parsed_args = parser.parse_args() +model = parsed_args.model +try: + test_case = int(parsed_args.test_case) + assert test_case > 0 +except ValueError: + test_case = parsed_args.test_case +num_refinements = parsed_args.num_refinements + +# Setup +setup = importlib.import_module(f"{model}.config") +setup.initialise(test_case) +unit = setup.parameters.qoi_unit +mesh = Mesh(f"{model}/meshes/{test_case}.msh") +mh = [mesh] + list(MeshHierarchy(mesh, num_refinements)) +tm = TransferManager() +kwargs = {} + +# Run uniform refinement +qois, dofs, elements, times, niter = [], [], [], [], [] +setup_time = perf_counter() - start_time +print_output(f"Test case {test_case}") +print_output(f"Setup time: {setup_time:.2f} seconds") +for i, mesh in enumerate(mh): + start_time = perf_counter() + print_output(f" Mesh {i}") + print_output(f" Element count = {mesh.num_cells()}") + out = get_solutions(mesh, setup, solve_adjoint=False, **kwargs) + qoi, fwd_sol = out["qoi"], out["forward"] + + def prolong(V): + """ + After the first iteration, prolong the previous + solution as the initial guess. + """ + ic = Function(V) + tm.prolong(fwd_sol, ic) + return ic + + if parsed_args.prolong: + kwargs["init"] = prolong + fs = fwd_sol.function_space() + time = perf_counter() - start_time + print_output(f" Quantity of Interest = {qoi} {unit}") + print_output(f" Runtime: {time:.2f} seconds") + qois.append(qoi) + dofs.append(sum(fs.dof_count)) + times.append(time) + elements.append(mesh.num_cells()) + niter.append(1) + np.save(f"{model}/data/qois_uniform_{test_case}", qois) + np.save(f"{model}/data/dofs_uniform_{test_case}", dofs) + np.save(f"{model}/data/elements_uniform_{test_case}", elements) + np.save(f"{model}/data/times_all_uniform_{test_case}", times) + np.save(f"{model}/data/niter_uniform_{test_case}", niter) +print_output(f"Setup time: {setup_time:.2f} seconds") diff --git a/adaptation_one2n/test_and_train.py b/adaptation_one2n/test_and_train.py new file mode 100644 index 0000000..e3cfd9b --- /dev/null +++ b/adaptation_one2n/test_and_train.py @@ -0,0 +1,266 @@ +""" +Train a network on ``num_training_cases`` problem +specifications of a given ``model``. +""" +from nn_adapt.ann import * +from nn_adapt.parse import argparse, bounded_float, nonnegative_int, positive_float, positive_int + +import git +import importlib +import numpy as np +import os +from sklearn import model_selection +from time import perf_counter +import torch.optim.lr_scheduler as lr_scheduler + + +# Configuration +pwd = os.path.abspath(os.path.dirname(__file__)) +models = [ + name for name in os.listdir(pwd) + if os.path.isdir(name) and name not in ("__pycache__", "models") +] +parser = argparse.ArgumentParser( + prog="test_and_train.py", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument( + "-m", + "--model", + help="The equation set being solved", + type=str, + choices=models, + default="steady_turbine", +) +parser.add_argument( + "-n", + "--num_training_cases", + help="The number of test cases to train on", + type=positive_int, + default=100, +) +parser.add_argument( + "-a", + "--approaches", + nargs="+", + help="Adaptive approaches to consider", + choices=["isotropic", "anisotropic"], + default=["anisotropic"], +) +parser.add_argument( + "--adaptation_steps", + help="Steps to learn from", + type=positive_int, + default=3, +) +parser.add_argument( + "--lr", + help="Initial learning rate", + type=positive_float, + default=1.0e-03, +) +parser.add_argument( + "--lr_adapt_num_steps", + help="Frequency of learning rate adaptation", + type=nonnegative_int, + default=0, +) +parser.add_argument( + "--lr_adapt_factor", + help="Learning rate reduction factor", + type=bounded_float(0, 1), + default=0.99, +) +parser.add_argument( + "--lr_adapt_threshold", + help="Learning rate threshold", + type=bounded_float(0, 1), + default=1.0e-04, +) +parser.add_argument( + "--lr_adapt_patience", + help="The number of iterations before early adapting the learning rate", + type=positive_int, + default=np.inf, +) +parser.add_argument( + "--num_epochs", + help="The number of iterations", + type=positive_int, + default=2000, +) +parser.add_argument( + "--stopping_patience", + help="The number of iterations before early stopping", + type=positive_int, + default=np.inf, +) +parser.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", +) +parser.add_argument( + "--batch_size", + help="Data points per training iteration", + type=positive_int, + default=500, +) +parser.add_argument( + "--test_batch_size", + help="Data points per validation iteration", + type=positive_int, + default=500, +) +parser.add_argument( + "--test_size", + help="Data proportion for validation", + type=bounded_float(0, 1), + default=0.3, +) +parser.add_argument( + "--seed", + help="Seed for random number generator", + type=positive_int, + default=42, +) +parser.add_argument( + "--tag", + help="Tag for labelling the model (defaults to current git sha)", + type=str, + default=git.Repo(search_parent_directories=True).head.object.hexsha, +) +parsed_args = parser.parse_args() +model = parsed_args.model +approaches = parsed_args.approaches +preproc = parsed_args.preproc +num_epochs = parsed_args.num_epochs +lr = parsed_args.lr +lr_adapt_num_steps = parsed_args.lr_adapt_num_steps +lr_adapt_factor = parsed_args.lr_adapt_factor +lr_adapt_threshold = parsed_args.lr_adapt_threshold +lr_adapt_patience = parsed_args.lr_adapt_patience +stopping_patience = parsed_args.stopping_patience +test_size = parsed_args.test_size +batch_size = parsed_args.batch_size +test_batch_size = parsed_args.test_batch_size +seed = parsed_args.seed +tag = parsed_args.tag + +# Load network layout +layout = importlib.import_module(f"{model}.network").NetLayout() + +# Setup model +nn = SingleLayerFCNN(layout, preproc=preproc).to(device) +optimizer = torch.optim.Adam(nn.parameters(), lr=lr) +scheduler1 = lr_scheduler.ReduceLROnPlateau( + optimizer, + factor=lr_adapt_factor, + threshold=lr_adapt_threshold, + patience=lr_adapt_patience, + verbose=True, +) +if lr_adapt_num_steps > 0: + scheduler2 = lr_scheduler.StepLR( + optimizer, + lr_adapt_num_steps, + gamma=lr_adapt_factor + ) +else: + scheduler2 = None +criterion = Loss() + +# Increase batch size if running on GPU +cuda = all(p.is_cuda for p in nn.parameters()) +print(f"Model parameters are{'' if cuda else ' not'} using GPU cores.") +if cuda: + dtype = torch.float32 + batch_size *= 4 + test_batch_size *= 4 +else: + dtype = torch.float + +# Load data +concat = lambda a, b: b if a is None else np.concatenate((a, b), axis=0) +features = None +targets = None +data_dir = f"{model}/data" +for step in range(parsed_args.adaptation_steps): + for approach in approaches: + for case in range(1, parsed_args.num_training_cases + 1): + if case == 1 and approach != approaches[0]: + continue + suffix = f"{case}_GO{approach}_{step}" + feature = { + key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") + for key in layout.inputs + } + features = concat(features, collect_features(feature)) + target = np.load(f"{data_dir}/target_{suffix}.npy") + targets = concat(targets, target) +print(f"Total number of features: {len(features.flatten())}") +print(f"Total number of targets: {len(targets)}") +features = torch.from_numpy(features).type(dtype) +targets = torch.from_numpy(targets).type(dtype) + +# Get train and validation datasets +xtrain, xval, ytrain, yval = model_selection.train_test_split( + features, targets, test_size=test_size, random_state=seed +) +train_data = torch.utils.data.TensorDataset(torch.Tensor(xtrain), torch.Tensor(ytrain)) +train_loader = torch.utils.data.DataLoader( + train_data, batch_size=batch_size, shuffle=True, num_workers=0 +) +validate_data = torch.utils.data.TensorDataset(torch.Tensor(xval), torch.Tensor(yval)) +validate_loader = torch.utils.data.DataLoader( + validate_data, batch_size=test_batch_size, shuffle=False, num_workers=0 +) + +# Train +train_losses, validation_losses, lr_adapt_steps = [], [], [] +set_seed(seed) +previous_loss = np.inf +trigger_times = 0 +for epoch in range(num_epochs): + + # Training step + start_time = perf_counter() + train = propagate(train_loader, nn, criterion, optimizer) + mid_time = perf_counter() + train_time = mid_time - start_time + + # Validation step + val = propagate(validate_loader, nn, criterion) + validation_time = perf_counter() - mid_time + + # Adapt learning rate + scheduler1.step(val) + if scheduler2 is not None: + scheduler2.step() + if epoch % lr_adapt_num_steps == 0: + lr_adapt_steps.append(epoch) + np.save(f"{model}/data/lr_adapt_steps_{tag}", lr_adapt_steps) + + # Stash progress + print( + f"Epoch {epoch:4d}/{num_epochs:d}" + f" avg loss: {train:.4e} / {val:.4e}" + f" wallclock: {train_time:.2f}s / {validation_time:.2f}s" + ) + train_losses.append(train) + validation_losses.append(val) + np.save(f"{model}/data/train_losses_{tag}", train_losses) + np.save(f"{model}/data/validation_losses_{tag}", validation_losses) + torch.save(nn.state_dict(), f"{model}/model_{tag}.pt") + + # Test for convergence + if val > previous_loss: + trigger_times += 1 + if trigger_times >= stopping_patience: + print("Early stopping") + break + else: + trigger_times = 0 + previous_loss = val diff --git a/examples/.DS_Store b/examples/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Fri, 12 Aug 2022 14:37:51 +0100 Subject: [PATCH 07/13] some tests --- adaptation_one2n/a_test.py | 49 +++- adaptation_one2n/burgers_one2n/meshgen.py | 99 +++++++- adaptation_one2n/makefile | 2 +- adaptation_one2n/models/burgers_one2n.py | 282 ++++++++++++++++++--- adaptation_one2n/run_adapt.py | 1 - adaptation_one2n/run_adaptation_loop.py | 13 +- adaptation_one2n/run_adaptation_loop_ml.py | 16 +- build/lib/nn_adapt/__init__.py | 0 build/lib/nn_adapt/ann.py | 144 +++++++++++ build/lib/nn_adapt/features.py | 259 +++++++++++++++++++ build/lib/nn_adapt/layout.py | 61 +++++ build/lib/nn_adapt/metric.py | 107 ++++++++ build/lib/nn_adapt/metric_one2n.py | 112 ++++++++ build/lib/nn_adapt/model.py | 43 ++++ build/lib/nn_adapt/parse.py | 142 +++++++++++ build/lib/nn_adapt/plotting.py | 15 ++ build/lib/nn_adapt/solving.py | 227 +++++++++++++++++ build/lib/nn_adapt/solving_n2n.py | 253 ++++++++++++++++++ build/lib/nn_adapt/solving_one2n.py | 246 ++++++++++++++++++ build/lib/nn_adapt/utility.py | 66 +++++ examples/a_text.py | 117 +++++++++ examples/burgers/plot_pipe.py | 32 +++ examples/burgers/plotting.py | 83 ++++++ examples/makefile | 6 +- examples/models/burgers.py | 4 +- examples/run_adaptation_loop.py | 1 + examples/run_adaptation_loop_ml.py | 2 +- examples/turbine/.DS_Store | Bin 0 -> 6148 bytes nn_adapt/ann.py | 13 +- 29 files changed, 2333 insertions(+), 62 deletions(-) create mode 100644 build/lib/nn_adapt/__init__.py create mode 100644 build/lib/nn_adapt/ann.py create mode 100644 build/lib/nn_adapt/features.py create mode 100644 build/lib/nn_adapt/layout.py create mode 100644 build/lib/nn_adapt/metric.py create mode 100644 build/lib/nn_adapt/metric_one2n.py create mode 100644 build/lib/nn_adapt/model.py create mode 100644 build/lib/nn_adapt/parse.py create mode 100644 build/lib/nn_adapt/plotting.py create mode 100644 build/lib/nn_adapt/solving.py create mode 100644 build/lib/nn_adapt/solving_n2n.py create mode 100644 build/lib/nn_adapt/solving_one2n.py create mode 100644 build/lib/nn_adapt/utility.py create mode 100644 examples/a_text.py create mode 100644 examples/burgers/plot_pipe.py create mode 100644 examples/burgers/plotting.py create mode 100644 examples/turbine/.DS_Store diff --git a/adaptation_one2n/a_test.py b/adaptation_one2n/a_test.py index f3281b2..f8d8872 100644 --- a/adaptation_one2n/a_test.py +++ b/adaptation_one2n/a_test.py @@ -24,13 +24,46 @@ # out2 = indicate_errors_one2n(mesh=mesh, config=setup2) # print(out2) -# mesh = UnitSquareMesh(20, 20) -# setup2 = importlib.import_module(f"burgers_one2n.config") -# out2 = get_solutions_one2n(mesh=mesh, config=setup2) -# test_array = time_integrate(out2["forward"]) -# print(extract_array(test_array, centroid=True)) +mesh = UnitSquareMesh(20, 20) +setup2 = importlib.import_module(f"burgers_one2n.config") +out2 = get_solutions_one2n(mesh=mesh, config=setup2) +fwd_sol = out2["forward"] + + +# Adjoint solver +sp = { + "mat_type": "aij", + "snes_type": "newtonls", + "snes_linesearch_type": "bt", + "snes_rtol": 1.0e-08, + "snes_max_it": 100, + "ksp_type": "preonly", + "pc_type": "lu", + "pc_factor_mat_solver_type": "mumps", +} + +V = fwd_sol[-1].function_space() +q_star = Function(V) +F = setup2.Solver_one2n(mesh=mesh, ic=0, config=setup2).form +sol_temp = Function(V) +sol_temp.assign(fwd_sol[-1]) +J = setup2.get_qoi(mesh)(fwd_sol[-1]) +dJdq = derivative(J, fwd_sol[-1], TestFunction(V)) +q_star = [] +for i in range(1, 11): + V = fwd_sol[-i].function_space() + q_star = Function(V) + dFdq = derivative(F, fwd_sol[-i], TrialFunction(V)) + print(dFdq) + dFdq_transpose = adjoint(dFdq) + print("this step") + solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + q_star.append(sol_temp) + + +ee_file = File(f"out/adjoint.pvd") +# ee_file.write(*q_star.split()) -a = None -b = 1 -print(a or b) +for i in range(len(q_star)): + ee_file.write(*q_star[i].split()) diff --git a/adaptation_one2n/burgers_one2n/meshgen.py b/adaptation_one2n/burgers_one2n/meshgen.py index 3467cea..d25c405 100644 --- a/adaptation_one2n/burgers_one2n/meshgen.py +++ b/adaptation_one2n/burgers_one2n/meshgen.py @@ -1,2 +1,99 @@ def generate_geo(config, reverse=False): - return + """ + Given a configuration object for a given training + or testing case, generate a gmsh geometry file + defining the initial mesh, with all turbines + meshed explicitly. + + :arg config: the configuration file + :kwarg reverse: should the flow direction be reversed? + """ + if config.parameters.case in ("pipe", "headland"): + return + tc = config.parameters.turbine_coords + num_turbines = len(tc) + f = """// Domain and turbine specification +L = 1200.0; +W = 500.0; +D = 18.0; +dx_outer = 20.0; +dx_inner = 20.0; +""" + for i, xy in enumerate(tc): + f += "xt%d = %f; // x-location of turbine %d\n" % (i, xy[0], i) + f += "yt%d = %f; // y-location of turbine %d\n" % (i, xy[1], i) + f += """ +// Domain and turbine footprints +Point(1) = {0, 0, 0, dx_outer}; +Point(2) = {L, 0, 0, dx_outer}; +Point(3) = {L, W, 0, dx_outer}; +Point(4) = {0, W, 0, dx_outer}; +Line(1) = {1, 2}; +Line(2) = {2, 3}; +Line(3) = {3, 4}; +Line(4) = {4, 1}; +Physical Line(1) = {%d}; // Left boundary +Physical Line(2) = {%d}; // Right boundary +Physical Line(3) = {1, 3}; // Sides +Line Loop(1) = {1, 2, 3, 4}; // outside loop +""" % ( + 2 if reverse else 4, + 4 if reverse else 2, + ) + i = 5 + j = 2 + for k in range(num_turbines): + f += """ +Point(%d) = {xt%d-D/2, yt%d-D/2, 0., dx_inner}; +Point(%d) = {xt%d+D/2, yt%d-D/2, 0., dx_inner}; +Point(%d) = {xt%d+D/2, yt%d+D/2, 0., dx_inner}; +Point(%d) = {xt%d-D/2, yt%d+D/2, 0., dx_inner}; +Line(%d) = {%d, %d}; +Line(%d) = {%d, %d}; +Line(%d) = {%d, %d}; +Line(%d) = {%d, %d}; +Line Loop(%d) = {%d, %d, %d, %d}; +""" % ( + i, + k, + k, + i + 1, + k, + k, + i + 2, + k, + k, + i + 3, + k, + k, + i, + i, + i + 1, + i + 1, + i + 1, + i + 2, + i + 2, + i + 2, + i + 3, + i + 3, + i + 3, + i, + j, + i, + i + 1, + i + 2, + i + 3, + ) + i += 4 + j += 1 + f += """ +// Surfaces +Plane Surface(1) = %s; +Physical Surface(1) = {1}; // outside turbines +""" % set( + range(1, num_turbines + 2) + ) + for i in range(1, num_turbines + 1): + f += "Plane Surface(%d) = {%d};\n" % (i + 1, i + 1) + f += "Physical Surface(%d) = {%d}; // inside turbine %d\n" % (i + 1, i + 1, i) + return f[:-1] diff --git a/adaptation_one2n/makefile b/adaptation_one2n/makefile index d26d52a..f785aa0 100644 --- a/adaptation_one2n/makefile +++ b/adaptation_one2n/makefile @@ -4,7 +4,7 @@ all: setup network test APPROACHES = anisotropic MODEL = burgers_one2n -NUM_TRAINING_CASES = 1 +NUM_TRAINING_CASES = 100 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/adaptation_one2n/models/burgers_one2n.py b/adaptation_one2n/models/burgers_one2n.py index 7075421..22d88fa 100644 --- a/adaptation_one2n/models/burgers_one2n.py +++ b/adaptation_one2n/models/burgers_one2n.py @@ -5,64 +5,141 @@ from firedrake.adjoint import get_solve_blocks import nn_adapt.model import nn_adapt.solving +from thetis import * + ''' A memory hungry method solving time dependent PDE. ''' - class Parameters(nn_adapt.model.Parameters): """ - Class encapsulating all parameters required for a simple - Burgers equation test case. + Class encapsulating all parameters required for the tidal + farm modelling test case. """ - qoi_name = "right boundary integral" - qoi_unit = r"m\,s^{-1}" + discrete = False + + qoi_name = "power output" + qoi_unit = "MW" # Adaptation parameters - h_min = 1.0e-10 # Minimum metric magnitude - h_max = 1.0 # Maximum metric magnitude + h_min = 1.0e-08 + h_max = 500.0 + + # time steps + tt_steps = 10 + timestep = 0.1 # Physical parameters + viscosity_coefficient = 0.5 + depth = 40.0 + drag_coefficient = Constant(0.0025) + inflow_speed = 5.0 + density = Constant(1030.0 * 1.0e-06) + + # Additional setup viscosity_coefficient = 0.0001 initial_speed = 1.0 - # Timestepping parameters - timestep = 0.05 - tt_steps = 10 + # Turbine parameters + turbine_diameter = 18.0 + turbine_width = None + turbine_coords = [] + thrust_coefficient = 0.8 + correct_thrust = True + + # Solver parameters + solver_parameters = { + "mat_type": "aij", + "snes_type": "newtonls", + "snes_linesearch_type": "bt", + "snes_rtol": 1.0e-08, + "snes_max_it": 100, + "ksp_type": "preonly", + "pc_type": "lu", + "pc_factor_mat_solver_type": "mumps", + } + adjoint_solver_parameters = solver_parameters - solver_parameters = {} - adjoint_solver_parameters = {} + @property + def num_turbines(self): + """ + Count the number of turbines based on the number + of coordinates. + """ + return len(self.turbine_coords) - def bathymetry(self, mesh): + @property + def turbine_ids(self): """ - Compute the bathymetry field on the current `mesh`. + Generate the list of turbine IDs, i.e. cell tags used + in the gmsh geometry file. + """ + if self.discrete: + return list(2 + np.arange(self.num_turbines, dtype=np.int32)) + else: + return ["everywhere"] - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. + @property + def footprint_area(self): """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) + Calculate the area of the turbine footprint in the horizontal. + """ + d = self.turbine_diameter + w = self.turbine_width or d + return d * w - def drag(self, mesh): + @property + def swept_area(self): """ - Compute the bathymetry field on the current `mesh`. + Calculate the area swept by the turbine in the vertical. + """ + return pi * (0.5 * self.turbine_diameter) ** 2 - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. + @property + def cross_sectional_area(self): """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) + Calculate the cross-sectional area of the turbine footprint + in the vertical. + """ + return self.depth * self.turbine_diameter - def viscosity(self, mesh): + @property + def corrected_thrust_coefficient(self): """ - Compute the viscosity coefficient on the current `mesh`. + Correct the thrust coefficient to account for the + fact that we use the velocity at the turbine, rather + than an upstream veloicity. + + See [Kramer and Piggott 2016] for details. """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(self.viscosity_coefficient) + Ct = self.thrust_coefficient + if not self.correct_thrust: + return Ct + At = self.swept_area + corr = 4.0 / (1.0 + sqrt(1.0 - Ct * At / self.cross_sectional_area)) ** 2 + return Ct * corr + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + """ + # NOTE: We assume a constant bathymetry field + P0_2d = get_functionspace(mesh, "DG", 0) + return Function(P0_2d).assign(parameters.depth) + def u_inflow(self, mesh): + """ + Compute the inflow velocity based on the current `mesh`. + """ + # NOTE: We assume a constant inflow + return as_vector([self.inflow_speed, 0]) + + # def ic(self, mesh): + # """ + # Initial condition. + # """ + # return self.u_inflow(mesh) def ic(self, mesh): """ Initial condition @@ -73,6 +150,147 @@ def ic(self, mesh): yside = 0 return as_vector([expr, yside]) + def turbine_density(self, mesh): + """ + Compute the turbine density function on the current `mesh`. + """ + if self.discrete: + return Constant(1.0 / self.footprint_area, domain=mesh) + x, y = SpatialCoordinate(mesh) + r2 = self.turbine_diameter / 2 + r1 = r2 if self.turbine_width is None else self.turbine_width / 2 + + def bump(x0, y0, scale=1.0): + qx = ((x - x0) / r1) ** 2 + qy = ((y - y0) / r2) ** 2 + cond = And(qx < 1, qy < 1) + b = exp(1 - 1 / (1 - qx)) * exp(1 - 1 / (1 - qy)) + return conditional(cond, Constant(scale) * b, 0) + + bumps = 0 + for xy in self.turbine_coords: + bumps += bump(*xy, scale=1 / assemble(bump(*xy) * dx)) + return bumps + + def farm(self, mesh): + """ + Construct a dictionary of :class:`TidalTurbineFarmOptions` + objects based on the current `mesh`. + """ + Ct = self.corrected_thrust_coefficient + farm_options = TidalTurbineFarmOptions() + farm_options.turbine_density = self.turbine_density(mesh) + farm_options.turbine_options.diameter = self.turbine_diameter + farm_options.turbine_options.thrust_coefficient = Ct + return {farm_id: farm_options for farm_id in self.turbine_ids} + + def turbine_drag(self, mesh): + """ + Compute the contribution to the drag coefficient due to the + tidal turbine parametrisation on the current `mesh`. + """ + P0_2d = get_functionspace(mesh, "DG", 0) + p0test = TestFunction(P0_2d) + Ct = self.corrected_thrust_coefficient + At = self.swept_area + Cd = 0.5 * Ct * At * self.turbine_density(mesh) + return sum([p0test * Cd * dx(tag, domain=mesh) for tag in self.turbine_ids]) + + def drag(self, mesh, background=False): + r""" + Create a :math:`\mathbb P0` field for the drag on the current + `mesh`. + + :kwarg background: should we consider the background drag + alone, or should the turbine drag be included? + """ + P0_2d = get_functionspace(mesh, "DG", 0) + ret = Function(P0_2d) + + # Background drag + Cb = self.drag_coefficient + if background: + return ret.assign(Cb) + p0test = TestFunction(P0_2d) + expr = p0test * Cb * dx(domain=mesh) + + # Turbine drag + assemble(expr + self.turbine_drag(mesh), tensor=ret) + return ret + + def viscosity(self, mesh): + r""" + Create a :math:`\mathbb P0` field for the viscosity coefficient + on the current `mesh`. + """ + # NOTE: We assume a constant viscosity coefficient + P0_2d = get_functionspace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + +# class Parameters(nn_adapt.model.Parameters): +# """ +# Class encapsulating all parameters required for a simple +# Burgers equation test case. +# """ + +# qoi_name = "right boundary integral" +# qoi_unit = r"m\,s^{-1}" + +# # Adaptation parameters +# h_min = 1.0e-10 # Minimum metric magnitude +# h_max = 1.0 # Maximum metric magnitude + +# # Physical parameters +# viscosity_coefficient = 0.0001 +# initial_speed = 1.0 + +# # Timestepping parameters +# timestep = 0.05 +# tt_steps = 10 + +# solver_parameters = {} +# adjoint_solver_parameters = {} + +# def bathymetry(self, mesh): +# """ +# Compute the bathymetry field on the current `mesh`. + +# Note that there isn't really a concept of bathymetry +# for Burgers equation. It is kept constant and should +# be ignored by the network. +# """ +# P0_2d = FunctionSpace(mesh, "DG", 0) +# return Function(P0_2d).assign(1.0) + +# def drag(self, mesh): +# """ +# Compute the bathymetry field on the current `mesh`. + +# Note that there isn't really a concept of bathymetry +# for Burgers equation. It is kept constant and should +# be ignored by the network. +# """ +# P0_2d = FunctionSpace(mesh, "DG", 0) +# return Function(P0_2d).assign(1.0) + +# def viscosity(self, mesh): +# """ +# Compute the viscosity coefficient on the current `mesh`. +# """ +# P0_2d = FunctionSpace(mesh, "DG", 0) +# return Function(P0_2d).assign(self.viscosity_coefficient) + +# def ic(self, mesh): +# """ +# Initial condition +# """ +# x, y = SpatialCoordinate(mesh) +# expr = self.initial_speed * sin(pi * x) +# yside = self.initial_speed * sin(pi * y) +# yside = 0 +# return as_vector([expr, yside]) + PETSc.Sys.popErrorHandler() parameters = Parameters() @@ -272,5 +490,5 @@ def qoi(sol): return qoi -# Initial mesh for all test cases -initial_mesh = UnitSquareMesh(30, 30) +# # Initial mesh for all test cases +# initial_mesh = UnitSquareMesh(30, 30) diff --git a/adaptation_one2n/run_adapt.py b/adaptation_one2n/run_adapt.py index 9f04f8b..0b0ea21 100644 --- a/adaptation_one2n/run_adapt.py +++ b/adaptation_one2n/run_adapt.py @@ -8,7 +8,6 @@ from nn_adapt.features import * from nn_adapt.metric_one2n import * from nn_adapt.parse import Parser -from nn_adapt.solving import * from nn_adapt.solving_one2n import * from nn_adapt.utility import ConvergenceTracker from firedrake.meshadapt import adapt diff --git a/adaptation_one2n/run_adaptation_loop.py b/adaptation_one2n/run_adaptation_loop.py index 126616a..1c9f0e5 100644 --- a/adaptation_one2n/run_adaptation_loop.py +++ b/adaptation_one2n/run_adaptation_loop.py @@ -5,8 +5,8 @@ """ from nn_adapt.features import * from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric import * -from nn_adapt.solving import * +from nn_adapt.metric_one2n import * +from nn_adapt.solving_one2n import * from nn_adapt.utility import ConvergenceTracker from firedrake.meshadapt import adapt @@ -53,6 +53,7 @@ times["all"] = [] print(f"Test case {test_case}") for i in range(num_refinements + 1): + print(f"\t{i} / {num_refinements}") try: target_complexity = 100.0 * 2 ** (f * i) kwargs = { @@ -83,7 +84,7 @@ ) # Compute goal-oriented metric - out = go_metric(mesh, setup, convergence_checker=ct, **kwargs) + out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) qoi = out["qoi"] times["forward"][-1] += out["times"]["forward"] print(f" Quantity of Interest = {qoi} {unit}") @@ -101,7 +102,7 @@ out["adjoint"], ) dwr, metric = out["dwr"], out["metric"] - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) print(f" DoF count = {dof}") def proj(V): @@ -111,9 +112,9 @@ def proj(V): """ ic = Function(V) try: - ic.project(fwd_sol) + ic.project(fwd_sol[-1]) except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): + for c_init, c in zip(ic.split(), fwd_sol[-1].split()): c_init.project(c) return ic diff --git a/adaptation_one2n/run_adaptation_loop_ml.py b/adaptation_one2n/run_adaptation_loop_ml.py index 018329b..fb8a1a6 100644 --- a/adaptation_one2n/run_adaptation_loop_ml.py +++ b/adaptation_one2n/run_adaptation_loop_ml.py @@ -6,8 +6,8 @@ from nn_adapt.ann import * from nn_adapt.features import * from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric import * -from nn_adapt.solving import * +from nn_adapt.metric_one2n import * +from nn_adapt.solving_one2n import * from nn_adapt.utility import ConvergenceTracker from firedrake.meshadapt import * @@ -85,7 +85,7 @@ ) # Solve forward and adjoint and compute Hessians - out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + out = get_solutions_one2n(mesh, setup, convergence_checker=ct, **kwargs) qoi = out["qoi"] times["forward"][-1] += out["times"]["forward"] print(f" Quantity of Interest = {qoi} {unit}") @@ -93,7 +93,7 @@ break times["adjoint"][-1] += out["times"]["adjoint"] fwd_sol, adj_sol = out["forward"], out["adjoint"] - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) + dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) print(f" DoF count = {dof}") def proj(V): @@ -115,8 +115,10 @@ def proj(V): # Extract features out["times"]["estimator"] = -perf_counter() - features = extract_features(setup, fwd_sol, adj_sol) - features = collect_features(features, layout) + fwd_sol_integrate = time_integrate(fwd_sol) + adj_sol_integrate = time_integrate(adj_sol) + features = extract_features(setup, fwd_sol_integrate, adj_sol_integrate) + features = collect_features_sample(features, layout) # Run model test_targets = np.array([]) @@ -142,7 +144,7 @@ def proj(V): # Construct metric out["times"]["metric"] = -perf_counter() if approach == "anisotropic": - hessian = combine_metrics(*get_hessians(fwd_sol), average=True) + hessian = combine_metrics(*get_hessians(fwd_sol_integrate), average=True) else: hessian = None P1_ten = TensorFunctionSpace(mesh, "CG", 1) diff --git a/build/lib/nn_adapt/__init__.py b/build/lib/nn_adapt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/nn_adapt/ann.py b/build/lib/nn_adapt/ann.py new file mode 100644 index 0000000..1db6ee8 --- /dev/null +++ b/build/lib/nn_adapt/ann.py @@ -0,0 +1,144 @@ +""" +Classes and functions related to using neural networks. +""" +import random +import numpy as np +import torch +from torch import nn + + +# Set device +if torch.cuda.device_count() > 0 and torch.cuda.is_available(): + dev = torch.cuda.current_device() + print(f"Cuda installed. Running on GPU {dev}.") + device = torch.device(f"cuda:{dev}") + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.enabled = True +else: + print("No GPU available.") + device = torch.device("cpu") + + +def set_seed(seed): + """ + Set all random seeds to a fixed value + + :arg seed: the seed value + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def sample_uniform(l, u): + """ + Sample from the continuous uniform + distribution :math:`U(l, u)`. + + :arg l: the lower bound + :arg u: the upper bound + """ + return l + (u - l) * np.random.rand() + + +class SingleLayerFCNN(nn.Module): + """ + Fully Connected Neural Network (FCNN) + for goal-oriented metric-based mesh + adaptation with a single hidden layer. + """ + + def __init__(self, layout, preproc="arctan"): + """ + :arg layout: class instance inherited from + :class:`NetLayoutBase`, with numbers of + inputs, hidden neurons and outputs + specified. + :kwarg preproc: pre-processing function to + apply to the input data + """ + super().__init__() + + # Define preprocessing function + if preproc == "none": + self.preproc1 = lambda x: x + if preproc == "arctan": + self.preproc1 = torch.arctan + elif preproc == "tanh": + self.preproc1 = torch.tanh + elif preproc == "logabs": + self.preproc1 = lambda x: torch.log(torch.abs(x)) + else: + raise ValueError(f'Preprocessor "{preproc}" not recognised.') + + # Define layers + self.linear1 = nn.Linear(layout.num_inputs, layout.num_hidden_neurons) + self.linear2 = nn.Linear(layout.num_hidden_neurons, 1) + + # Define activation functions + self.activate1 = nn.Sigmoid() + + def forward(self, x): + p = self.preproc1(x) + z1 = self.linear1(p) + a1 = self.activate1(z1) + z2 = self.linear2(a1) + return z2 + + +def propagate(data_loader, model, loss_fn, optimizer=None): + """ + Propagate data from a :class:`DataLoader` object + through the neural network. + + If ``optimizer`` is not ``None`` then training is + performed. Otherwise, validation is performed. + + :arg data_loader: PyTorch :class:`DataLoader` instance + :arg model: PyTorch :class:`Module` instance + :arg loss_fn: PyTorch loss function instance + :arg optimizer: PyTorch optimizer instance + """ + num_batches = len(data_loader) + cumulative_loss = 0 + + for x, y in data_loader: + + # Compute prediction and loss + prediction = model(x.to(device)) + loss = loss_fn(prediction, y.to(device)) + cumulative_loss += loss.item() + + # Backpropagation + if optimizer is not None: + optimizer.zero_grad() + loss.backward() + optimizer.step() + + return cumulative_loss / num_batches + + +def collect_features(feature_dict): + """ + Given a dictionary of feature arrays, stack their + data appropriately to be fed into a neural network. + :arg feature_dict: dictionary containing feature data + """ + dofs = [feature for key, feature in feature_dict.items() if "dofs" in key] + nodofs = [feature for key, feature in feature_dict.items() if "dofs" not in key] + return np.hstack((np.vstack(nodofs).transpose(), np.hstack(dofs))) + + +def Loss(): + """ + Custom loss function. + + Needed when there is only one output value. + """ + + def mse(output, target): + target = target.reshape(*output.shape) + return torch.nn.MSELoss(reduction="sum")(output, target) + + return mse diff --git a/build/lib/nn_adapt/features.py b/build/lib/nn_adapt/features.py new file mode 100644 index 0000000..930861d --- /dev/null +++ b/build/lib/nn_adapt/features.py @@ -0,0 +1,259 @@ +""" +Functions for extracting feature data from configuration +files, meshes and solution fields. +""" +import firedrake +from firedrake.petsc import PETSc +from firedrake import op2 +import numpy as np +from pyroteus.metric import * +from sympy import ProductSet +import ufl +from nn_adapt.solving import dwr_indicator +from collections import Iterable + + +__all__ = ["extract_features", "get_values_at_elements"] + + +@PETSc.Log.EventDecorator("Extract components") +def extract_components(matrix): + r""" + Extract components of a matrix that describe its + size, orientation and shape. + + The latter two components are combined in such + a way that we avoid errors relating to arguments + zero and :math:`2\pi` being equal. + """ + density, quotients, evecs = density_and_quotients(matrix, reorder=True) + fs = density.function_space() + ar = firedrake.interpolate(ufl.sqrt(quotients[1]), fs) + armin = ar.vector().gather().min() + assert armin >= 1.0, f"An element has aspect ratio is less than one ({armin})" + theta = firedrake.interpolate(ufl.atan(evecs[1, 1] / evecs[1, 0]), fs) + h1 = firedrake.interpolate(ufl.cos(theta) ** 2 / ar + ufl.sin(theta) ** 2 * ar, fs) + h2 = firedrake.interpolate((1 / ar - ar) * ufl.sin(theta) * ufl.cos(theta), fs) + return density, h1, h2 + + +@PETSc.Log.EventDecorator("Extract elementwise") +def get_values_at_elements(f): + """ + Extract the values for all degrees of freedom associated + with each element. + + :arg f: some :class:`Function` + :return: a vector :class:`Function` holding all DoFs of `f` + """ + fs = f.function_space() + mesh = fs.mesh() + dim = mesh.topological_dimension() + if dim == 2: + assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" + elif dim == 3: + assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" + else: + raise ValueError(f"Dimension {dim} not supported") + el = fs.ufl_element() + if el.sub_elements() == []: + p = el.degree() + size = el.value_size() * (p + 1) * (p + 2) // 2 + else: + size = 0 + for sel in el.sub_elements(): + p = sel.degree() + size += sel.value_size() * (p + 1) * (p + 2) // 2 + P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size) + values = firedrake.Function(P0_vec) + kernel = "for (int i=0; i < vertexwise.dofs; i++) elementwise[i] += vertexwise[i];" + keys = {"vertexwise": (f, op2.READ), "elementwise": (values, op2.INC)} + firedrake.par_loop(kernel, ufl.dx, keys) + return values + + +@PETSc.Log.EventDecorator("Extract at centroids") +def get_values_at_centroids(f): + """ + Extract the values for the function at each element centroid, + along with all derivatives up to the :math:`p^{th}`, where + :math:`p` is the polynomial degree. + + :arg f: some :class:`Function` + :return: a vector :class:`Function` holding all DoFs of `f` + """ + fs = f.function_space() + mesh = fs.mesh() + dim = mesh.topological_dimension() + if dim == 2: + assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" + elif dim == 3: + assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" + else: + raise ValueError(f"Dimension {dim} not supported") + el = fs.ufl_element() + if el.sub_elements() == []: + p = el.degree() + degrees = [p] + size = el.value_size() * (p + 1) * (p + 2) // 2 + funcs = [f] + else: + size = 0 + degrees = [sel.degree() for sel in el.sub_elements()] + for sel, p in zip(el.sub_elements(), degrees): + size += sel.value_size() * (p + 1) * (p + 2) // 2 + funcs = f + values = firedrake.Function(firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size)) + P0 = firedrake.FunctionSpace(mesh, "DG", 0) + P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0) + P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) + i = 0 + for func, p in zip(funcs, degrees): + values.dat.data[:, i] = firedrake.project(func, P0).dat.data_ro + i += 1 + if p == 0: + continue + g = firedrake.project(ufl.grad(func), P0_vec) + values.dat.data[:, i] = g.dat.data_ro[:, 0] + values.dat.data[:, i + 1] = g.dat.data_ro[:, 1] + i += 2 + if p == 1: + continue + H = firedrake.project(ufl.grad(ufl.grad(func)), P0_ten) + values.dat.data[:, i] = H.dat.data_ro[:, 0, 0] + values.dat.data[:, i + 1] = 0.5 * ( + H.dat.data_ro[:, 0, 1] + H.dat.data_ro[:, 1, 0] + ) + values.dat.data[:, i + 2] = H.dat.data_ro[:, 1, 1] + i += 3 + if p > 2: + raise NotImplementedError( + "Polynomial degrees greater than 2 not yet considered" + ) + return values + + +# def time_integrate(list_like): +# length = len(list_like) +# result = 0 +# for step in range(length): +# result += list_like[step] +# return firedrake.product((result, 1/length)) + + +def split_into_scalars(f): + """ + Given a :class:`Function`, split it into + components from its constituent scalar + spaces. + + If it is not mixed then no splitting is + required. + + :arg f: the mixed :class:`Function` + :return: a dictionary containing the + nested structure of the mixed function + """ + V = f.function_space() + if V.value_size > 1: + if not isinstance(V.node_count, Iterable): + assert len(V.shape) == 1, "Tensor spaces not supported" + el = V.ufl_element() + fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) + return {0: [firedrake.interpolate(f[i], fs) for i in range(V.shape[0])]} + subspaces = [V.sub(i) for i in range(len(V.node_count))] + ret = {} + for i, (Vi, fi) in enumerate(zip(subspaces, f.split())): + if len(Vi.shape) == 0: + ret[i] = [fi] + else: + assert len(Vi.shape) == 1, "Tensor spaces not supported" + el = Vi.ufl_element() + fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) + ret[i] = [firedrake.interpolate(fi[j], fs) for j in range(Vi.shape[0])] + return ret + else: + return {0: [f]} + + +def extract_array(f, mesh=None, centroid=False, project=False): + r""" + Extract a cell-wise data array from a :class:`Constant` or + :class:`Function`. + + For constants and scalar fields, this will be an :math:`n\times 1` + array, where :math:`n` is the number of mesh elements. For a mixed + field with :math:`m` components, it will be :math:`n\times m`. + + :arg f: the :class:`Constant` or :class:`Function` + :kwarg mesh: the underlying :class:`MeshGeometry` + :kwarg project: if ``True``, project the field into + :math:`\mathbb P0` space + """ + mesh = mesh or f.ufl_domain() + if isinstance(f, firedrake.Constant): + ones = np.ones(mesh.num_cells()) + assert len(f.values()) == 1 + return f.values()[0] * ones + elif not isinstance(f, firedrake.Function): + raise ValueError(f"Unexpected input type {type(f)}") + if project: + if len(f.function_space().shape) > 0: + raise NotImplementedError("Can currently only project scalar fields") # TODO + element = f.ufl_element() + if (element.family(), element.degree()) != ("Discontinuous Lagrange", 0): + P0 = firedrake.FunctionSpace(mesh, "DG", 0) + f = firedrake.project(f, P0) + s = sum([fi for i, fi in split_into_scalars(f).items()], start=[]) + get = get_values_at_centroids if centroid else get_values_at_elements + if len(s) == 1: + return get(s[0]).dat.data + else: + return np.hstack([get(si).dat.data for si in s]) + + +@PETSc.Log.EventDecorator("Extract features") +def extract_features(config, fwd_sol, adj_sol): + """ + Extract features from the outputs of a run. + + :arg config: the configuration file + :arg fwd_sol: the forward solution + :arg adj_sol: the adjoint solution + :return: a list of feature arrays + """ + mesh = fwd_sol.function_space().mesh() + + # Coarse-grained DWR estimator + with PETSc.Log.Event("Extract estimator"): + dwr = dwr_indicator(config, mesh, fwd_sol, adj_sol) + + # Features describing the mesh element + with PETSc.Log.Event("Analyse element"): + P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) + + # Element size, orientation and shape + J = ufl.Jacobian(mesh) + JTJ = firedrake.interpolate(ufl.dot(ufl.transpose(J), J), P0_ten) + d, h1, h2 = (extract_array(p) for p in extract_components(JTJ)) + + # Is the element on the boundary? + p0test = firedrake.TestFunction(dwr.function_space()) + bnd = firedrake.assemble(p0test * ufl.ds).dat.data + + # Combine the features together + features = { + "estimator_coarse": extract_array(dwr), + "physics_drag": extract_array(config.parameters.drag(mesh)), + "physics_viscosity": extract_array(config.parameters.viscosity(mesh), project=True), + "physics_bathymetry": extract_array(config.parameters.bathymetry(mesh), project=True), + "mesh_d": d, + "mesh_h1": h1, + "mesh_h2": h2, + "mesh_bnd": bnd, + "forward_dofs": extract_array(fwd_sol, centroid=True), + "adjoint_dofs": extract_array(adj_sol, centroid=True), + } + for key, value in features.items(): + assert not np.isnan(value).any() + return features diff --git a/build/lib/nn_adapt/layout.py b/build/lib/nn_adapt/layout.py new file mode 100644 index 0000000..060502f --- /dev/null +++ b/build/lib/nn_adapt/layout.py @@ -0,0 +1,61 @@ +""" +Classes for defining the layout of a neural network. +""" + + +class NetLayoutBase(object): + """ + Base class for specifying the number + of inputs, hidden neurons and outputs + in a neural network. + + The derived class should give values + for each of these parameters. + """ + + # TODO: Allow more general networks + + colours = { + "estimator": "b", + "physics": "C0", + "mesh": "deepskyblue", + "forward": "mediumturquoise", + "adjoint": "mediumseagreen", + } + + def __init__(self): + if not hasattr(self, "inputs"): + raise ValueError("Need to set self.inputs") + colours = set(self.colours.keys()) + for i in self.inputs: + okay = False + for c in colours: + if i.startswith(c): + okay = True + break + if not okay: + raise ValueError("Input names must begin with one of {colours}") + if not hasattr(self, "num_hidden_neurons"): + raise ValueError("Need to set self.num_hidden_neurons") + if not hasattr(self, "dofs_per_element"): + raise ValueError("Need to set self.dofs_per_element") + + def count_inputs(self, prefix): + """ + Count all scalar inputs that start with a given `prefix`. + """ + cnt = 0 + for i in self.inputs: + if i.startswith(prefix): + if i in ("forward_dofs", "adjoint_dofs"): + cnt += self.dofs_per_element + else: + cnt += 1 + return cnt + + @property + def num_inputs(self): + """ + The total number of scalar inputs. + """ + return self.count_inputs("") diff --git a/build/lib/nn_adapt/metric.py b/build/lib/nn_adapt/metric.py new file mode 100644 index 0000000..22ed97e --- /dev/null +++ b/build/lib/nn_adapt/metric.py @@ -0,0 +1,107 @@ +""" +Functions for generating Riemannian metrics from solution +fields. +""" +from pyroteus import * +from nn_adapt.features import split_into_scalars +from nn_adapt.solving import * +from firedrake.meshadapt import RiemannianMetric +from time import perf_counter + + +def get_hessians(f, **kwargs): + """ + Compute Hessians for each component of + a :class:`Function`. + + Any keyword arguments are passed to + ``recover_hessian``. + + :arg f: the function + :return: list of Hessians of each + component + """ + kwargs.setdefault("method", "Clement") + return [ + space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") + for i, fi in split_into_scalars(f).items() + for fij in fi + ] + + +def go_metric( + mesh, + config, + enrichment_method="h", + target_complexity=4000.0, + average=True, + interpolant="Clement", + anisotropic=False, + retall=False, + convergence_checker=None, + **kwargs, +): + """ + Compute an anisotropic goal-oriented + metric field, based on a mesh and + a configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg target_complexity: target complexity + of the goal-oriented metric + :kwarg average: should the Hessian components + be combined using averaging (or intersection)? + :kwarg interpolant: which method to use to + interpolate into the target space? + :kwarg anisotropic: toggle isotropic vs. + anisotropic metric + :kwarg h_min: minimum magnitude + :kwarg h_max: maximum magnitude + :kwarg a_max: maximum anisotropy + :kwarg retall: if ``True``, the error indicator, + forward solution and adjoint solution + are returned, in addition to the metric + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + """ + h_min = kwargs.pop("h_min", 1.0e-30) + h_max = kwargs.pop("h_max", 1.0e+30) + a_max = kwargs.pop("a_max", 1.0e+30) + out = indicate_errors( + mesh, + config, + enrichment_method=enrichment_method, + retall=True, + convergence_checker=convergence_checker, + **kwargs, + ) + if retall and "adjoint" not in out: + return out + out["estimator"] = out["dwr"].vector().gather().sum() + if convergence_checker is not None: + if convergence_checker.check_estimator(out["estimator"]): + return out + + out["times"]["metric"] = -perf_counter() + with PETSc.Log.Event("Metric construction"): + if anisotropic: + hessian = combine_metrics(*get_hessians(out["forward"]), average=average) + else: + hessian = None + metric = anisotropic_metric( + out["dwr"], + hessian=hessian, + target_complexity=target_complexity, + target_space=TensorFunctionSpace(mesh, "CG", 1), + interpolant=interpolant, + ) + space_normalise(metric, target_complexity, "inf") + enforce_element_constraints(metric, h_min, h_max, a_max) + out["metric"] = RiemannianMetric(mesh) + out["metric"].assign(metric) + out["times"]["metric"] += perf_counter() + return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/metric_one2n.py b/build/lib/nn_adapt/metric_one2n.py new file mode 100644 index 0000000..ba25a04 --- /dev/null +++ b/build/lib/nn_adapt/metric_one2n.py @@ -0,0 +1,112 @@ +""" +Functions for generating Riemannian metrics from solution +fields. +""" +from pyroteus import * +from nn_adapt.features import split_into_scalars +from nn_adapt.solving import * +from nn_adapt.solving_one2n import * +from firedrake.meshadapt import RiemannianMetric +from time import perf_counter + + +def get_hessians(f, **kwargs): + """ + Compute Hessians for each component of + a :class:`Function`. + + Any keyword arguments are passed to + ``recover_hessian``. + + :arg f: the function + :return: list of Hessians of each + component + """ + kwargs.setdefault("method", "Clement") + return [ + space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") + for i, fi in split_into_scalars(f).items() + for fij in fi + ] + + +def go_metric_one2n( + mesh, + config, + enrichment_method="h", + target_complexity=4000.0, + average=True, + interpolant="Clement", + anisotropic=False, + retall=False, + convergence_checker=None, + **kwargs, +): + """ + Compute an anisotropic goal-oriented + metric field, based on a mesh and + a configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg target_complexity: target complexity + of the goal-oriented metric + :kwarg average: should the Hessian components + be combined using averaging (or intersection)? + :kwarg interpolant: which method to use to + interpolate into the target space? + :kwarg anisotropic: toggle isotropic vs. + anisotropic metric + :kwarg h_min: minimum magnitude + :kwarg h_max: maximum magnitude + :kwarg a_max: maximum anisotropy + :kwarg retall: if ``True``, the error indicator, + forward solution and adjoint solution + are returned, in addition to the metric + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + """ + h_min = kwargs.pop("h_min", 1.0e-30) + h_max = kwargs.pop("h_max", 1.0e+30) + a_max = kwargs.pop("a_max", 1.0e+30) + out = indicate_errors_one2n( + mesh, + config, + enrichment_method=enrichment_method, + retall=True, + convergence_checker=convergence_checker, + **kwargs, + ) + if retall and "adjoint" not in out: + return out + out["estimator"] = out["dwr"].vector().gather().sum() + if convergence_checker is not None: + if convergence_checker.check_estimator(out["estimator"]): + return out + + tt_steps = len(out["forward"]) + out["times"]["metric"] = -perf_counter() + with PETSc.Log.Event("Metric construction"): + if anisotropic: + hessian_list = [] + for step in range(tt_steps): + hessian_list.append(combine_metrics(*get_hessians(out["forward"][step]), average=average)) + hessian = time_integrate(hessian_list) + else: + hessian = None + metric = anisotropic_metric( + out["dwr"], + hessian=hessian, + target_complexity=target_complexity, + target_space=TensorFunctionSpace(mesh, "CG", 1), + interpolant=interpolant, + ) + space_normalise(metric, target_complexity, "inf") + enforce_element_constraints(metric, h_min, h_max, a_max) + out["metric"] = RiemannianMetric(mesh) + out["metric"].assign(metric) + out["times"]["metric"] += perf_counter() + return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/model.py b/build/lib/nn_adapt/model.py new file mode 100644 index 0000000..f3587f9 --- /dev/null +++ b/build/lib/nn_adapt/model.py @@ -0,0 +1,43 @@ +import abc + + +class Parameters(abc.ABC): + """ + Abstract base class defining the API for parameter + classes that describe PDE models. + """ + + def __init__(self): + self.case = None + if not hasattr(self, "qoi_name"): + raise NotImplementedError("qoi_name attribute must be set") + if not hasattr(self, "qoi_unit"): + raise NotImplementedError("qoi_unit attribute must be set") + + @abc.abstractmethod + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + """ + pass + + @abc.abstractmethod + def drag(self, mesh): + """ + Compute the drag coefficient on the current `mesh`. + """ + pass + + @abc.abstractmethod + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + pass + + @abc.abstractmethod + def ic(self, mesh): + """ + Compute the initial condition on the current `mesh`. + """ + pass diff --git a/build/lib/nn_adapt/parse.py b/build/lib/nn_adapt/parse.py new file mode 100644 index 0000000..7565690 --- /dev/null +++ b/build/lib/nn_adapt/parse.py @@ -0,0 +1,142 @@ +import argparse +import git +import numpy as np + + +__all__ = ["Parser"] + + +def _check_in_range(value, typ, l, u): + tvalue = typ(value) + if not (tvalue >= l and tvalue <= u): + raise argparse.ArgumentTypeError(f"{value} is not in [{l}, {u}]") + return tvalue + + +def _check_strictly_in_range(value, typ, l, u): + tvalue = typ(value) + if not (tvalue >= l and tvalue <= u): + raise argparse.ArgumentTypeError(f"{value} is not in ({l}, {u})") + return tvalue + + +nonnegative_float = lambda value: _check_in_range(value, float, 0, np.inf) +nonnegative_int = lambda value: _check_in_range(value, int, 0, np.inf) +positive_float = lambda value: _check_strictly_in_range(value, float, 0, np.inf) +positive_int = lambda value: _check_strictly_in_range(value, int, 0, np.inf) + + +def bounded_float(l, u): + def chk(value): + return _check_in_range(value, float, l, u) + + return chk + + +def bounded_int(l, u): + def chk(value): + return _check_in_range(value, int, l, u) + + return chk + + +class Parser(argparse.ArgumentParser): + """ + Custom :class:`ArgumentParser` for `nn_adapt`. + """ + + def __init__(self, prog): + super().__init__( + self, prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + self.add_argument("model", help="The model", type=str) + self.add_argument("test_case", help="The configuration file number or name") + self.add_argument( + "--optimise", + help="Turn off plotting and debugging", + action="store_true", + ) + + def parse_convergence_criteria(self): + self.add_argument( + "--miniter", + help="Minimum number of iterations", + type=positive_int, + default=3, + ) + self.add_argument( + "--maxiter", + help="Maximum number of iterations", + type=positive_int, + default=35, + ) + self.add_argument( + "--qoi_rtol", + help="Relative tolerance for QoI", + type=positive_float, + default=0.001, + ) + self.add_argument( + "--element_rtol", + help="Element count tolerance", + type=positive_float, + default=0.001, + ) + self.add_argument( + "--estimator_rtol", + help="Error estimator tolerance", + type=positive_float, + default=0.001, + ) + + def parse_num_refinements(self, default=4): + self.add_argument( + "--num_refinements", + help="Number of mesh refinements", + type=positive_int, + default=default, + ) + + def parse_approach(self): + self.add_argument( + "-a", + "--approach", + help="Adaptive approach to consider", + choices=["isotropic", "anisotropic"], + default="anisotropic", + ) + self.add_argument( + "--transfer", + help="Transfer the solution from the previous mesh as initial guess", + action="store_true", + ) + + def parse_target_complexity(self): + self.add_argument( + "--base_complexity", + help="Base metric complexity", + type=positive_float, + default=200.0, + ) + self.add_argument( + "--target_complexity", + help="Target metric complexity", + type=positive_float, + default=4000.0, + ) + + def parse_preproc(self): + self.add_argument( + "--preproc", + help="Data preprocess function", + type=str, + choices=["none", "arctan", "tanh", "logabs"], + default="arctan", + ) + + def parse_tag(self): + self.add_argument( + "--tag", + help="Model tag (defaults to current git commit sha)", + default=git.Repo(search_parent_directories=True).head.object.hexsha, + ) diff --git a/build/lib/nn_adapt/plotting.py b/build/lib/nn_adapt/plotting.py new file mode 100644 index 0000000..3822c3a --- /dev/null +++ b/build/lib/nn_adapt/plotting.py @@ -0,0 +1,15 @@ +""" +Configuration for plotting. +""" +import matplotlib +import matplotlib.pyplot as plt # noqa + + +matplotlib.rc("text", usetex=True) +matplotlib.rcParams["mathtext.fontset"] = "custom" +matplotlib.rcParams["mathtext.rm"] = "Bitstream Vera Sans" +matplotlib.rcParams["mathtext.it"] = "Bitstream Vera Sans:italic" +matplotlib.rcParams["mathtext.bf"] = "Bitstream Vera Sans:bold" +matplotlib.rcParams["mathtext.fontset"] = "stix" +matplotlib.rcParams["font.family"] = "STIXGeneral" +matplotlib.rcParams["font.size"] = 12 diff --git a/build/lib/nn_adapt/solving.py b/build/lib/nn_adapt/solving.py new file mode 100644 index 0000000..1ba4ee9 --- /dev/null +++ b/build/lib/nn_adapt/solving.py @@ -0,0 +1,227 @@ +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_solutions( + mesh, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + # Solve forward problem in base space + V = config.get_function_space(mesh) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver(mesh, ic, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + J = config.get_qoi(mesh)(q) + qoi = assemble(J) + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + q_star = Function(V) + F = solver_obj.form + dFdq = derivative(F, q, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) + out["adjoint"] = q_star + out["times"]["adjoint"] += perf_counter() + + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) + q_plus = solver_obj.solution + J = config.get_qoi(refined_mesh)(q_plus) + F = solver_obj.form + tm.prolong(q, q_plus) + q_star_plus = Function(V) + dFdq = derivative(F, q_plus, TrialFunction(V)) + dFdq_transpose = adjoint(dFdq) + dJdq = derivative(J, q_plus, TestFunction(V)) + solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) + out["enriched_adjoint"] = q_star_plus + out["times"]["estimator"] += perf_counter() + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + + # Prolong + V_plus = adj_sol_plus.function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr diff --git a/build/lib/nn_adapt/solving_n2n.py b/build/lib/nn_adapt/solving_n2n.py new file mode 100644 index 0000000..39143ef --- /dev/null +++ b/build/lib/nn_adapt/solving_n2n.py @@ -0,0 +1,253 @@ +""" +Time dependent goal-oriented error estimation +""" +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from firedrake_adjoint import * +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def get_solutions_n2n( + meshes, + config, + solve_adjoint=True, + refined_meshes=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + Trying to work it out. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + tt_steps = config.parameters.tt_steps + + # Solve forward problem in base space + V = config.get_function_space(meshes[-1]) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver_n2n(meshes, ic=0, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + # Calculate QoI + qoi = 0 + for step in range(tt_steps): + J = config.get_qoi(V)(q[-1]) + qoi += assemble(J) + qoi = qoi / tt_steps + + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if not convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + sp = config.parameters.adjoint_solver_parameters + adj_solution = [] + dJdu, solve_blocks = solver_obj.adjoint_setup() + + for step in range(tt_steps-1): + adjoint_solution = solve_blocks[step].adj_sol + adj_solution.append(adjoint_solution) + + # initial condition for adjoint solution + adj_solution.append(dJdu) + out["adjoint"] = adj_solution + out["times"]["adjoint"] += perf_counter() + if refined_meshes is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_meshes[-1]) + q_plus = Function(V) + solver_obj_plus = config.Solver_n2n(refined_meshes, q_plus, **kwargs) + solver_obj_plus.iterate() + q_plus = solver_obj_plus.solution + # J = config.get_qoi(refined_mesh[-1])(q_plus[-1]) + adj_solution_plus = [] + dJdu_plus, solve_blocks_plus = solver_obj_plus.adjoint_setup() + + for step in range(tt_steps-1): + adjoint_solution_plus = solve_blocks_plus[step].adj_sol + adj_solution_plus.append(adjoint_solution_plus) + + adj_solution_plus.append(dJdu_plus) + out["enriched_adjoint"] = adj_solution_plus + out["times"]["estimator"] += perf_counter() + + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors_n2n(meshes, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + # with PETSc.Log.Event("Enrichment"): + mesh_list = [] + ref_mesh_list = [] + tt_steps = len(meshes) + for i in range(tt_steps): + mesh, ref_mesh = MeshHierarchy(meshes[i], 1) + mesh_list.append(mesh) + ref_mesh_list.append(ref_mesh) + + # Solve the forward and adjoint problems + out = get_solutions_n2n(meshes=mesh_list, config=config, refined_meshes=ref_mesh_list, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + # with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + dwr_list = [] + + for step in range(tt_steps): + # Prolong + V_plus = out["enriched_adjoint"][step].function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"][step], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"][step], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(out["enriched_adjoint"][step]) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) + out["dwr"] = dwr_list + + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr diff --git a/build/lib/nn_adapt/solving_one2n.py b/build/lib/nn_adapt/solving_one2n.py new file mode 100644 index 0000000..2eb5801 --- /dev/null +++ b/build/lib/nn_adapt/solving_one2n.py @@ -0,0 +1,246 @@ +""" +Time dependent goal-oriented error estimation +""" +""" +Functions for solving problems defined by configuration +files and performing goal-oriented error estimation. +""" +from firedrake import * +from firedrake.petsc import PETSc +from firedrake.mg.embedded import TransferManager +from firedrake_adjoint import * +from pyroteus.error_estimation import get_dwr_indicator +import abc +from time import perf_counter + + +tm = TransferManager() + + +class Solver(abc.ABC): + """ + Base class that defines the API for solver objects. + """ + + @abc.abstractmethod + def __init__(self, mesh, ic, **kwargs): + """ + Setup the solver. + + :arg mesh: the mesh to define the solver on + :arg ic: the initial condition + """ + pass + + @property + @abc.abstractmethod + def function_space(self): + """ + The function space that the PDE is solved in. + """ + pass + + @property + @abc.abstractmethod + def form(self): + """ + Return the weak form. + """ + pass + + @abc.abstractmethod + def iterate(self, **kwargs): + """ + Solve the PDE. + """ + pass + + @property + @abc.abstractmethod + def solution(self): + """ + Return the solution field. + """ + pass + + +def time_integrate(list_like): + length = len(list_like) + result = 0 + for step in range(length): + result += list_like[step] + return product((result, 1/length)) + + +def get_solutions_one2n( + mesh, + config, + solve_adjoint=True, + refined_mesh=None, + init=None, + convergence_checker=None, + **kwargs, +): + """ + Solve forward and adjoint equations on a + given mesh. + + This works only for steady-state problems. + Trying to work it out. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg solve_adjoint: should we solve the + adjoint problem? + :kwarg refined_mesh: refined mesh to compute + enriched adjoint solution on + :kwarg init: custom initial condition function + :kwarg convergence_checker: :class:`ConvergenceTracer` + instance + :return: forward solution, adjoint solution + and enriched adjoint solution (if requested) + """ + + tt_steps = config.parameters.tt_steps + + # Solve forward problem in base space + V = config.get_function_space(mesh) + out = {"times": {"forward": -perf_counter()}} + with PETSc.Log.Event("Forward solve"): + if init is None: + ic = config.get_initial_condition(V) + else: + ic = init(V) + solver_obj = config.Solver_one2n(mesh, ic, **kwargs) + solver_obj.iterate() + q = solver_obj.solution + # Calculate QoI + qoi_list = [] + for step in range(tt_steps): + J = config.get_qoi(V)(q[step]) + qoi_list.append(assemble(J)) + qoi = time_integrate(qoi_list) + + out["times"]["forward"] += perf_counter() + out["qoi"] = qoi + out["forward"] = q + if convergence_checker is not None: + if convergence_checker.check_qoi(qoi): + return out + if not solve_adjoint: + return out + + # Solve adjoint problem in base space + out["times"]["adjoint"] = -perf_counter() + with PETSc.Log.Event("Adjoint solve"): + solver_obj.adjoint_iteration() + out["adjoint"] = solver_obj.adj_solution + + out["times"]["adjoint"] += perf_counter() + if refined_mesh is None: + return out + + # Solve adjoint problem in enriched space + out["times"]["estimator"] = -perf_counter() + with PETSc.Log.Event("Enrichment"): + V = config.get_function_space(refined_mesh) + q_plus = Function(V) + solver_obj_plus = config.Solver_one2n(refined_mesh, q_plus, **kwargs) + solver_obj_plus.iterate() + q_plus = solver_obj_plus.solution + adj_solution_plus = [] + solver_obj_plus.adjoint_iteration() + adj_solution_plus = solver_obj_plus.adj_solution + out["enriched_adjoint"] = adj_solution_plus + + out["times"]["estimator"] += perf_counter() + + return out + + +def split_into_components(f): + r""" + Extend the :attr:`split` method to apply + to non-mixed :class:`Function`\s. + """ + return [f] if f.function_space().value_size == 1 else f.split() + + +def indicate_errors_one2n(mesh, config, enrichment_method="h", retall=False, **kwargs): + """ + Indicate errors according to ``dwr_indicator``, + using the solver given in the configuration file. + + :arg mesh: input mesh + :arg config: configuration file, which + specifies the PDE and QoI + :kwarg enrichment_method: how to enrich the + finite element space? + :kwarg retall: if ``True``, return the forward + solution and adjoint solution in addition + to the dual-weighted residual error indicator + """ + if not enrichment_method == "h": + raise NotImplementedError # TODO + with PETSc.Log.Event("Enrichment"): + tt_steps = config.parameters.tt_steps + mesh, ref_mesh = MeshHierarchy(mesh, 1) + + # Solve the forward and adjoint problems + out = get_solutions_one2n(mesh=mesh, config=config, refined_mesh=ref_mesh, **kwargs) + if retall and "adjoint" not in out: + return out + + out["times"]["estimator"] -= perf_counter() + + with PETSc.Log.Event("Enrichment"): + adj_sol_plus = out["enriched_adjoint"] + dwr_list = [] + + for step in range(tt_steps): + # Prolong + V_plus = adj_sol_plus[step].function_space() + fwd_sol_plg = Function(V_plus) + tm.prolong(out["forward"][step], fwd_sol_plg) + adj_sol_plg = Function(V_plus) + tm.prolong(out["adjoint"][step], adj_sol_plg) + + # Subtract prolonged adjoint solution from enriched version + adj_error = Function(V_plus) + adj_sols_plus = split_into_components(adj_sol_plus[step]) + adj_sols_plg = split_into_components(adj_sol_plg) + for i, err in enumerate(split_into_components(adj_error)): + err += adj_sols_plus[i] - adj_sols_plg[i] + + # Evaluate errors + dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) + + out["dwr"] = time_integrate(dwr_list) + + out["times"]["estimator"] += perf_counter() + + return out if retall else out["dwr"] + + +def dwr_indicator(config, mesh, q, q_star): + r""" + Evaluate the DWR error indicator as a :math:`\mathbb P0` field. + + :arg mesh: the current mesh + :arg q: the forward solution, transferred into enriched space + :arg q_star: the adjoint solution in enriched space + """ + mesh_plus = q.function_space().mesh() + + # Extract indicator in enriched space + solver_obj = config.Solver(mesh_plus, q) + F = solver_obj.form + V = solver_obj.function_space + dwr_plus = get_dwr_indicator(F, q_star, test_space=V) + + # Project down to base space + P0 = FunctionSpace(mesh, "DG", 0) + dwr = project(dwr_plus, P0) + dwr.interpolate(abs(dwr)) + return dwr diff --git a/build/lib/nn_adapt/utility.py b/build/lib/nn_adapt/utility.py new file mode 100644 index 0000000..229de16 --- /dev/null +++ b/build/lib/nn_adapt/utility.py @@ -0,0 +1,66 @@ +__all__ = ["ConvergenceTracker"] + + +class ConvergenceTracker: + """ + Class for checking convergence of fixed point + iteration loops. + """ + + def __init__(self, mesh, parsed_args): + self.qoi_old = None + self.elements_old = mesh.num_cells() + self.estimator_old = None + self.converged_reason = None + self.qoi_rtol = parsed_args.qoi_rtol + self.element_rtol = parsed_args.element_rtol + self.estimator_rtol = parsed_args.estimator_rtol + self.fp_iteration = 0 + self.miniter = parsed_args.miniter + self.maxiter = parsed_args.maxiter + assert self.maxiter >= self.miniter + + def check_maxiter(self): + """ + Check for reaching maximum number of iterations. + """ + converged = False + if self.fp_iteration >= self.maxiter: + self.converged_reason = "reaching maximum iteration count" + converged = True + return converged + + def _chk(self, val, old, rtol, reason): + converged = False + if old is not None and self.fp_iteration >= self.miniter: + if abs(val - old) < rtol * abs(old): + self.converged_reason = reason + converged = True + return converged + + def check_qoi(self, val): + """ + Check for QoI convergence. + """ + r = "QoI convergence" + converged = self._chk(val, self.qoi_old, self.qoi_rtol, r) + self.qoi_old = val + return converged + + def check_estimator(self, val): + """ + Check for error estimator convergence. + """ + r = "error estimator convergence" + converged = self._chk(val, self.estimator_old, self.estimator_rtol, r) + self.estimator_old = val + return converged + + def check_elements(self, val): + """ + Check for mesh element count convergence. + """ + r = "element count convergence" + converged = self._chk(val, self.elements_old, self.element_rtol, r) + self.elements_old = val + return converged diff --git a/examples/a_text.py b/examples/a_text.py new file mode 100644 index 0000000..cab171a --- /dev/null +++ b/examples/a_text.py @@ -0,0 +1,117 @@ +from copy import deepcopy +from thetis import * +from firedrake.adjoint import * +from firedrake import * +from firedrake_adjoint import * + +import numpy as np + +lx = 40e3 +ly = 2e3 +nx = 25 +ny = 20 +mesh2d = RectangleMesh(nx, ny, lx, ly) + + +def get_function_space(mesh): + """ + Construct the (mixed) finite element space used for the + prognostic solution. + """ + P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) + P2_2d = get_functionspace(mesh, "CG", 2) + return P1v_2d * P2_2d + + +def get_qoi(mesh): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + def qoi(sol): + return inner(sol, sol) * ds(2) + + return qoi + + +P1_2d = FunctionSpace(mesh2d, 'CG', 1) +bathymetry_2d = Function(P1_2d, name='Bathymetry') +depth = 20.0 +bathymetry_2d.assign(depth) + +# total duration in seconds +t_end = 50 +# export interval in seconds +t_export = 10 + +solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d) +options = solver_obj.options +options.simulation_export_time = t_export +options.simulation_end_time = t_end +options.quadratic_drag_coefficient = Constant(0.0025) + +options.swe_timestepper_type = 'CrankNicolson' +options.timestep = 10.0 + +elev_init = Function(P1_2d, name='initial elevation') + +xy = SpatialCoordinate(mesh2d) +gauss_width = 4000. +gauss_ampl = 2.0 +gauss_expr = gauss_ampl * exp(-((xy[0]-lx/2)/gauss_width)**2) + +elev_init.interpolate(gauss_expr) + +tape = get_working_tape() +tape.clear_tape() + +# Setup forward solution +solver_obj.assign_initial_conditions(elev=elev_init) +solver_obj.iterate() +fwd_sol = solver_obj.fields.solution_2d + +stop_annotating(); +solve_blocks = get_solve_blocks() +J_form = inner(fwd_sol, fwd_sol)*ds(2) +J = assemble(J_form) +drag_func = Control(solver_obj.options.quadratic_drag_coefficient) +g = compute_gradient(J, drag_func) + +q_star = solve_blocks[0].adj_sol +print(q_star) + +# # Adjoint solver +# sp = { +# "mat_type": "aij", +# "snes_type": "newtonls", +# "snes_linesearch_type": "bt", +# "snes_rtol": 1.0e-08, +# "snes_max_it": 100, +# "ksp_type": "preonly", +# "pc_type": "lu", +# "pc_factor_mat_solver_type": "mumps", +# } + +# V = fwd_sol.function_space() +# q_star = Function(V) +# F = solver_obj.timestepper.F +# sol_temp = Function(V) +# sol_temp.assign(fwd_sol) +# J = get_qoi(mesh2d)(fwd_sol) +# dJdq = derivative(J, fwd_sol, TestFunction(V)) +# q_star = [] +# for i in range(10): +# dFdq = derivative(F, sol_temp, TrialFunction(V)) +# dFdq_transpose = adjoint(dFdq) +# print("this step") +# solve(dFdq_transpose == dJdq, sol_temp, solver_parameters=sp) +# q_star.append(sol_temp) + + +ee_file = File(f"out/adjoint.pvd") +ee_file.write(*q_star.split()) + +# for i in range(len(q_star)): +# ee_file.write(*q_star[i].split()) diff --git a/examples/burgers/plot_pipe.py b/examples/burgers/plot_pipe.py new file mode 100644 index 0000000..894b2c4 --- /dev/null +++ b/examples/burgers/plot_pipe.py @@ -0,0 +1,32 @@ +from firedrake import * +from nn_adapt.plotting import * + + +matplotlib.rcParams["font.size"] = 12 + +# Plot mesh +fig, axes = plt.subplots(figsize=(6, 3)) +mesh = Mesh("meshes/pipe.msh") +triplot( + mesh, + axes=axes, + boundary_kw={"color": "dodgerblue"}, + interior_kw={"edgecolor": "w"}, +) + +# Add turbine footprints +P0 = FunctionSpace(mesh, "DG", 0) +footprints = assemble(sum(TestFunction(P0) * dx(tag) for tag in (2, 3))) +footprints.interpolate(conditional(footprints > 0, 0, 1)) +tricontourf(footprints, axes=axes, cmap="Blues", levels=[0, 1]) + +# Annotate with physical parameters +txt = r"""$\nu = 100.0$ +$b = 50.0$ +$u_{\mathrm{in}} = \widetilde{y}^2(1-\widetilde{y})^2$""" +xy = (940, 10) +axes.annotate(txt, xy=xy, bbox={"fc": "w"}) + +axes.axis(False) +plt.tight_layout() +plt.savefig("plots/pipe.pdf") diff --git a/examples/burgers/plotting.py b/examples/burgers/plotting.py new file mode 100644 index 0000000..295f10b --- /dev/null +++ b/examples/burgers/plotting.py @@ -0,0 +1,83 @@ +from firedrake import * +import matplotlib +import numpy as np + + +matplotlib.rcParams["font.size"] = 12 + + +def plot_config(config, mesh, axes): + """ + Plot a given configuration of a problem on a given + mesh and axes. + """ + tags = config.parameters.turbine_ids + P0 = FunctionSpace(mesh, "DG", 0) + footprints = assemble(sum(TestFunction(P0) * dx(tag) for tag in tags)) + footprints.interpolate(conditional(footprints > 0, 0, 1)) + triplot( + mesh, + axes=axes, + boundary_kw={"color": "dodgerblue"}, + interior_kw={"edgecolor": "w"}, + ) + tricontourf(footprints, axes=axes, cmap="Blues", levels=[0, 1]) + + # Bounding box + xmin = 0 + xmax = 1200 + ymin = 0 + ymax = 500 + eps = 5 + + # Adjust axes + W = assemble(Constant(1.0, domain=mesh) * ds(1)) + L = 0.5 * assemble( + Constant(1.0, domain=mesh) * ds(3) + ) # NOTE: both top and bottom are tagged as 3 + dL = 0.5 * (xmax - L) + dW = 0.5 * (ymax - W) + axes.axis(False) + axes.set_xlim([xmin - dL - eps, xmax - dL + eps]) + axes.set_ylim([ymin - dW - eps, ymax - dW + eps]) + + # Annotate with viscosity coefficient and bathymetry + nu = config.parameters.viscosity_coefficient + b = config.parameters.depth + u_in = config.parameters.inflow_speed + txt = r"$\nu$ = %.3f, $b$ = %.2f, $u_{\mathrm{in}}$ = %.2f" % (nu, b, u_in) + axes.annotate( + txt, xy=(0.025 * L, -0.25 * W), bbox={"fc": "w"}, annotation_clip=False + ) + + +def process_sensitivities(data, layout): + """ + Separate sensitivity experiment data by variable. + + :arg data: the output of `compute_importance.py` + :arg layout: the :class:`NetLayout` instance + """ + i = 0 + sensitivities = {} + dofs = {"u": 3, "v": 3, r"\eta": 6} + for label in ("estimator", "physics", "mesh", "forward", "adjoint"): + n = layout.count_inputs(label) + if n == 0: + continue + if label in ("forward", "adjoint"): + assert n == sum(dofs.values()) + for key, dof in dofs.items(): + S = np.zeros(6) + for j in range(dof): + S[j] = data[i + j] + l = (r"$%s$" if label == "forward" else r"$%s^*$") % key + sensitivities[l] = S + i += dof + else: + S = np.zeros(6) + for j in range(n): + S[j] = data[i + j] + i += n + sensitivities[label.capitalize()] = S + return sensitivities diff --git a/examples/makefile b/examples/makefile index d26d52a..940718c 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,8 +3,8 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = burgers_one2n -NUM_TRAINING_CASES = 1 +MODEL = steady_turbine +NUM_TRAINING_CASES = 100 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all @@ -89,6 +89,8 @@ clean: # --- Construct the neural network network: features train plot_progress plot_importance +train_one: train plot_progress plot_importance +test_one: snapshot_ml ml # Generate feature data # ===================== diff --git a/examples/models/burgers.py b/examples/models/burgers.py index bbf01d6..9debd1b 100644 --- a/examples/models/burgers.py +++ b/examples/models/burgers.py @@ -172,5 +172,5 @@ def qoi(sol): return qoi -# Initial mesh for all test cases -initial_mesh = UnitSquareMesh(30, 30) +# # Initial mesh for all test cases +# initial_mesh = UnitSquareMesh(30, 30) diff --git a/examples/run_adaptation_loop.py b/examples/run_adaptation_loop.py index 126616a..2f78353 100644 --- a/examples/run_adaptation_loop.py +++ b/examples/run_adaptation_loop.py @@ -53,6 +53,7 @@ times["all"] = [] print(f"Test case {test_case}") for i in range(num_refinements + 1): + print(f"\t{i} / {num_refinements}") try: target_complexity = 100.0 * 2 ** (f * i) kwargs = { diff --git a/examples/run_adaptation_loop_ml.py b/examples/run_adaptation_loop_ml.py index 018329b..7c2422f 100644 --- a/examples/run_adaptation_loop_ml.py +++ b/examples/run_adaptation_loop_ml.py @@ -116,7 +116,7 @@ def proj(V): # Extract features out["times"]["estimator"] = -perf_counter() features = extract_features(setup, fwd_sol, adj_sol) - features = collect_features(features, layout) + features = collect_features(features) # Run model test_targets = np.array([]) diff --git a/examples/turbine/.DS_Store b/examples/turbine/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..f823c1fdaf801e254bff610ce47bbbd79077c8af GIT binary patch literal 6148 zcmeHK!AiqG5S^`6Q;N`oipK@71tUd^cnMYU;6;q+L8UgPXfVx|CT)>Y$WedDPw{)4 z+1*M@^(G=^24>&x?7W113A;N0AR7H}7oY+F4mx3>gv~cb^W<|@u#qC7(9cLuFV5Y5 zn)T*#Iq*Lfpl`PfU5G(J2GifK2xkyM0^w{%Fig8atOAU_2i-VIGuQnPg{9*1%Br*K zl$|^ONe%t1mG#p`t9L`AOQqsq(r*PEjcU+aDdVQ}d_Yd~Qocce2zPbdNo1|{3ErkS)9Nd zcBWQ5@Ow$Dl1GdZV|pgd;_Ri{euz7R37L@O;61z3UKRDkve ziB9NQ%nj Date: Sat, 20 Aug 2022 04:15:42 +0100 Subject: [PATCH 08/13] using pyroteus --- adaptation_n2n/models/burgers_n2n.py | 2 +- adaptation_one2n/a_test.py | 138 +++--- adaptation_one2n/burgers_one2n/.DS_Store | Bin 0 -> 6148 bytes adaptation_one2n/makefile | 2 +- adaptation_one2n/models/burgers_one2n.py | 524 ++++++++++++---------- build/lib/nn_adapt/__init__.py | 0 build/lib/nn_adapt/ann.py | 144 ------ build/lib/nn_adapt/features.py | 259 ----------- build/lib/nn_adapt/layout.py | 61 --- build/lib/nn_adapt/metric.py | 107 ----- build/lib/nn_adapt/metric_one2n.py | 112 ----- build/lib/nn_adapt/model.py | 43 -- build/lib/nn_adapt/parse.py | 142 ------ build/lib/nn_adapt/plotting.py | 15 - build/lib/nn_adapt/solving.py | 227 ---------- build/lib/nn_adapt/solving_n2n.py | 253 ----------- build/lib/nn_adapt/solving_one2n.py | 246 ---------- build/lib/nn_adapt/utility.py | 66 --- examples/a_text.py | 117 ----- examples/makefile | 10 +- examples/models/pyroteus_burgers.py | 230 ++++++++++ examples/models/pyroteus_turbine.py | 400 +++++++++++++++++ examples/models/steady_turbine.py | 19 +- examples/run_adapt.py | 3 + examples/steady_turbine/testing_cases.txt | 2 +- 25 files changed, 1022 insertions(+), 2100 deletions(-) create mode 100644 adaptation_one2n/burgers_one2n/.DS_Store delete mode 100644 build/lib/nn_adapt/__init__.py delete mode 100644 build/lib/nn_adapt/ann.py delete mode 100644 build/lib/nn_adapt/features.py delete mode 100644 build/lib/nn_adapt/layout.py delete mode 100644 build/lib/nn_adapt/metric.py delete mode 100644 build/lib/nn_adapt/metric_one2n.py delete mode 100644 build/lib/nn_adapt/model.py delete mode 100644 build/lib/nn_adapt/parse.py delete mode 100644 build/lib/nn_adapt/plotting.py delete mode 100644 build/lib/nn_adapt/solving.py delete mode 100644 build/lib/nn_adapt/solving_n2n.py delete mode 100644 build/lib/nn_adapt/solving_one2n.py delete mode 100644 build/lib/nn_adapt/utility.py delete mode 100644 examples/a_text.py create mode 100644 examples/models/pyroteus_burgers.py create mode 100644 examples/models/pyroteus_turbine.py diff --git a/adaptation_n2n/models/burgers_n2n.py b/adaptation_n2n/models/burgers_n2n.py index a21b6e7..bdd6cb3 100644 --- a/adaptation_n2n/models/burgers_n2n.py +++ b/adaptation_n2n/models/burgers_n2n.py @@ -29,7 +29,7 @@ class Parameters(nn_adapt.model.Parameters): # Timestepping parameters timestep = 0.05 - tt_steps = 10 + tt_steps = 20 solver_parameters = {} adjoint_solver_parameters = {} diff --git a/adaptation_one2n/a_test.py b/adaptation_one2n/a_test.py index f8d8872..f32ef2f 100644 --- a/adaptation_one2n/a_test.py +++ b/adaptation_one2n/a_test.py @@ -1,69 +1,101 @@ -from nn_adapt.features import * -from nn_adapt.features import extract_array -from nn_adapt.metric import * -from nn_adapt.parse import Parser -from nn_adapt.solving_one2n import * -from nn_adapt.solving_n2n import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt -from firedrake.petsc import PETSc +# from nn_adapt.features import * +# from nn_adapt.features import extract_array +# from nn_adapt.metric import * +# from nn_adapt.parse import Parser +# from nn_adapt.solving_one2n import * +# from nn_adapt.solving_n2n import * +# from nn_adapt.solving import * +# from nn_adapt.utility import ConvergenceTracker +# from firedrake.meshadapt import adapt +# from firedrake.petsc import PETSc -import importlib -import numpy as np +# import importlib +# import numpy as np -tt_steps = 10 +# tt_steps = 10 -# setup1 = importlib.import_module(f"burgers_n2n.config") -# meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] -# out1 = indicate_errors_n2n(meshes=meshes, config=setup1) -# print(out1) +# # setup1 = importlib.import_module(f"burgers_n2n.config") +# # meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] +# # out1 = indicate_errors_n2n(meshes=meshes, config=setup1) +# # print(out1) # mesh = UnitSquareMesh(20, 20) # setup2 = importlib.import_module(f"burgers_one2n.config") # out2 = indicate_errors_one2n(mesh=mesh, config=setup2) # print(out2) -mesh = UnitSquareMesh(20, 20) -setup2 = importlib.import_module(f"burgers_one2n.config") -out2 = get_solutions_one2n(mesh=mesh, config=setup2) -fwd_sol = out2["forward"] +# # mesh = UnitSquareMesh(20, 20) +# # setup2 = importlib.import_module(f"burgers_one2n.config") +# # out2 = get_solutions_one2n(mesh=mesh, config=setup2) +# # fwd_sol = out2["forward"] -# Adjoint solver -sp = { - "mat_type": "aij", - "snes_type": "newtonls", - "snes_linesearch_type": "bt", - "snes_rtol": 1.0e-08, - "snes_max_it": 100, - "ksp_type": "preonly", - "pc_type": "lu", - "pc_factor_mat_solver_type": "mumps", -} +# lines = [[1,6,8,5], [1,3,7,6,5], [2,8,5]] +# length = 3 -V = fwd_sol[-1].function_space() -q_star = Function(V) -F = setup2.Solver_one2n(mesh=mesh, ic=0, config=setup2).form -sol_temp = Function(V) -sol_temp.assign(fwd_sol[-1]) -J = setup2.get_qoi(mesh)(fwd_sol[-1]) -dJdq = derivative(J, fwd_sol[-1], TestFunction(V)) -q_star = [] -for i in range(1, 11): - V = fwd_sol[-i].function_space() - q_star = Function(V) - dFdq = derivative(F, fwd_sol[-i], TrialFunction(V)) - print(dFdq) - dFdq_transpose = adjoint(dFdq) - print("this step") - solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - q_star.append(sol_temp) +# end = 5 +# id_list = [0 for _ in range(length)] +# toend = 0 +# steps = 0 +# while not toend: +# steps += 1 +# forward = [1 for _ in range(length)] +# t = [lines[id][item] for id, item in enumerate(id_list)] +# toend = 1 +# for item in t: +# toend = 0 if item != end else 1 +# if toend == 1: +# break; + +# for id, item in enumerate(t): +# for line_id, line in enumerate(lines): +# if item in line[id_list[line_id]+1:]: +# forward[id] = 0 +# break; +# for i in range(length): +# id_list[i] += forward[i] + + +# print(steps) -ee_file = File(f"out/adjoint.pvd") -# ee_file.write(*q_star.split()) +# def dec2bin(input): +# return "{0:b}".format(input) -for i in range(len(q_star)): - ee_file.write(*q_star[i].split()) +# def bin2dec(input): +# length = len(input) +# output = 0 +# for id, item in enumerate(input): +# output += pow(2, length-1-id) * int(item) +# return output + +# def sp_add(input1, input2): +# min_len = min(len(input1), len(input2)) +# max_len = max(len(input1), len(input2)) +# input1 = input1[::-1] +# input2 = input2[::-1] + +# output = "" +# for i in range(max_len): +# if i < min_len: +# if input1[i] == input2[i]: +# output += "0" +# else: +# output += "1" +# else: +# try: +# output += input1[i] +# except: +# output += input2[i] + +# return output[::-1] + +# a = dec2bin(9) +# b = dec2bin(5) +# c = sp_add(a, b) +# print(a, b, c) + +a = [1,1,1,1] +for i, j in enumerate(a): + print(j) diff --git a/adaptation_one2n/burgers_one2n/.DS_Store b/adaptation_one2n/burgers_one2n/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 0 and torch.cuda.is_available(): - dev = torch.cuda.current_device() - print(f"Cuda installed. Running on GPU {dev}.") - device = torch.device(f"cuda:{dev}") - torch.backends.cudnn.benchmark = True - torch.backends.cudnn.enabled = True -else: - print("No GPU available.") - device = torch.device("cpu") - - -def set_seed(seed): - """ - Set all random seeds to a fixed value - - :arg seed: the seed value - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def sample_uniform(l, u): - """ - Sample from the continuous uniform - distribution :math:`U(l, u)`. - - :arg l: the lower bound - :arg u: the upper bound - """ - return l + (u - l) * np.random.rand() - - -class SingleLayerFCNN(nn.Module): - """ - Fully Connected Neural Network (FCNN) - for goal-oriented metric-based mesh - adaptation with a single hidden layer. - """ - - def __init__(self, layout, preproc="arctan"): - """ - :arg layout: class instance inherited from - :class:`NetLayoutBase`, with numbers of - inputs, hidden neurons and outputs - specified. - :kwarg preproc: pre-processing function to - apply to the input data - """ - super().__init__() - - # Define preprocessing function - if preproc == "none": - self.preproc1 = lambda x: x - if preproc == "arctan": - self.preproc1 = torch.arctan - elif preproc == "tanh": - self.preproc1 = torch.tanh - elif preproc == "logabs": - self.preproc1 = lambda x: torch.log(torch.abs(x)) - else: - raise ValueError(f'Preprocessor "{preproc}" not recognised.') - - # Define layers - self.linear1 = nn.Linear(layout.num_inputs, layout.num_hidden_neurons) - self.linear2 = nn.Linear(layout.num_hidden_neurons, 1) - - # Define activation functions - self.activate1 = nn.Sigmoid() - - def forward(self, x): - p = self.preproc1(x) - z1 = self.linear1(p) - a1 = self.activate1(z1) - z2 = self.linear2(a1) - return z2 - - -def propagate(data_loader, model, loss_fn, optimizer=None): - """ - Propagate data from a :class:`DataLoader` object - through the neural network. - - If ``optimizer`` is not ``None`` then training is - performed. Otherwise, validation is performed. - - :arg data_loader: PyTorch :class:`DataLoader` instance - :arg model: PyTorch :class:`Module` instance - :arg loss_fn: PyTorch loss function instance - :arg optimizer: PyTorch optimizer instance - """ - num_batches = len(data_loader) - cumulative_loss = 0 - - for x, y in data_loader: - - # Compute prediction and loss - prediction = model(x.to(device)) - loss = loss_fn(prediction, y.to(device)) - cumulative_loss += loss.item() - - # Backpropagation - if optimizer is not None: - optimizer.zero_grad() - loss.backward() - optimizer.step() - - return cumulative_loss / num_batches - - -def collect_features(feature_dict): - """ - Given a dictionary of feature arrays, stack their - data appropriately to be fed into a neural network. - :arg feature_dict: dictionary containing feature data - """ - dofs = [feature for key, feature in feature_dict.items() if "dofs" in key] - nodofs = [feature for key, feature in feature_dict.items() if "dofs" not in key] - return np.hstack((np.vstack(nodofs).transpose(), np.hstack(dofs))) - - -def Loss(): - """ - Custom loss function. - - Needed when there is only one output value. - """ - - def mse(output, target): - target = target.reshape(*output.shape) - return torch.nn.MSELoss(reduction="sum")(output, target) - - return mse diff --git a/build/lib/nn_adapt/features.py b/build/lib/nn_adapt/features.py deleted file mode 100644 index 930861d..0000000 --- a/build/lib/nn_adapt/features.py +++ /dev/null @@ -1,259 +0,0 @@ -""" -Functions for extracting feature data from configuration -files, meshes and solution fields. -""" -import firedrake -from firedrake.petsc import PETSc -from firedrake import op2 -import numpy as np -from pyroteus.metric import * -from sympy import ProductSet -import ufl -from nn_adapt.solving import dwr_indicator -from collections import Iterable - - -__all__ = ["extract_features", "get_values_at_elements"] - - -@PETSc.Log.EventDecorator("Extract components") -def extract_components(matrix): - r""" - Extract components of a matrix that describe its - size, orientation and shape. - - The latter two components are combined in such - a way that we avoid errors relating to arguments - zero and :math:`2\pi` being equal. - """ - density, quotients, evecs = density_and_quotients(matrix, reorder=True) - fs = density.function_space() - ar = firedrake.interpolate(ufl.sqrt(quotients[1]), fs) - armin = ar.vector().gather().min() - assert armin >= 1.0, f"An element has aspect ratio is less than one ({armin})" - theta = firedrake.interpolate(ufl.atan(evecs[1, 1] / evecs[1, 0]), fs) - h1 = firedrake.interpolate(ufl.cos(theta) ** 2 / ar + ufl.sin(theta) ** 2 * ar, fs) - h2 = firedrake.interpolate((1 / ar - ar) * ufl.sin(theta) * ufl.cos(theta), fs) - return density, h1, h2 - - -@PETSc.Log.EventDecorator("Extract elementwise") -def get_values_at_elements(f): - """ - Extract the values for all degrees of freedom associated - with each element. - - :arg f: some :class:`Function` - :return: a vector :class:`Function` holding all DoFs of `f` - """ - fs = f.function_space() - mesh = fs.mesh() - dim = mesh.topological_dimension() - if dim == 2: - assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" - elif dim == 3: - assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" - else: - raise ValueError(f"Dimension {dim} not supported") - el = fs.ufl_element() - if el.sub_elements() == []: - p = el.degree() - size = el.value_size() * (p + 1) * (p + 2) // 2 - else: - size = 0 - for sel in el.sub_elements(): - p = sel.degree() - size += sel.value_size() * (p + 1) * (p + 2) // 2 - P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size) - values = firedrake.Function(P0_vec) - kernel = "for (int i=0; i < vertexwise.dofs; i++) elementwise[i] += vertexwise[i];" - keys = {"vertexwise": (f, op2.READ), "elementwise": (values, op2.INC)} - firedrake.par_loop(kernel, ufl.dx, keys) - return values - - -@PETSc.Log.EventDecorator("Extract at centroids") -def get_values_at_centroids(f): - """ - Extract the values for the function at each element centroid, - along with all derivatives up to the :math:`p^{th}`, where - :math:`p` is the polynomial degree. - - :arg f: some :class:`Function` - :return: a vector :class:`Function` holding all DoFs of `f` - """ - fs = f.function_space() - mesh = fs.mesh() - dim = mesh.topological_dimension() - if dim == 2: - assert fs.ufl_element().cell() == ufl.triangle, "Simplex meshes only" - elif dim == 3: - assert fs.ufl_element().cell() == ufl.tetrahedron, "Simplex meshes only" - else: - raise ValueError(f"Dimension {dim} not supported") - el = fs.ufl_element() - if el.sub_elements() == []: - p = el.degree() - degrees = [p] - size = el.value_size() * (p + 1) * (p + 2) // 2 - funcs = [f] - else: - size = 0 - degrees = [sel.degree() for sel in el.sub_elements()] - for sel, p in zip(el.sub_elements(), degrees): - size += sel.value_size() * (p + 1) * (p + 2) // 2 - funcs = f - values = firedrake.Function(firedrake.VectorFunctionSpace(mesh, "DG", 0, dim=size)) - P0 = firedrake.FunctionSpace(mesh, "DG", 0) - P0_vec = firedrake.VectorFunctionSpace(mesh, "DG", 0) - P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) - i = 0 - for func, p in zip(funcs, degrees): - values.dat.data[:, i] = firedrake.project(func, P0).dat.data_ro - i += 1 - if p == 0: - continue - g = firedrake.project(ufl.grad(func), P0_vec) - values.dat.data[:, i] = g.dat.data_ro[:, 0] - values.dat.data[:, i + 1] = g.dat.data_ro[:, 1] - i += 2 - if p == 1: - continue - H = firedrake.project(ufl.grad(ufl.grad(func)), P0_ten) - values.dat.data[:, i] = H.dat.data_ro[:, 0, 0] - values.dat.data[:, i + 1] = 0.5 * ( - H.dat.data_ro[:, 0, 1] + H.dat.data_ro[:, 1, 0] - ) - values.dat.data[:, i + 2] = H.dat.data_ro[:, 1, 1] - i += 3 - if p > 2: - raise NotImplementedError( - "Polynomial degrees greater than 2 not yet considered" - ) - return values - - -# def time_integrate(list_like): -# length = len(list_like) -# result = 0 -# for step in range(length): -# result += list_like[step] -# return firedrake.product((result, 1/length)) - - -def split_into_scalars(f): - """ - Given a :class:`Function`, split it into - components from its constituent scalar - spaces. - - If it is not mixed then no splitting is - required. - - :arg f: the mixed :class:`Function` - :return: a dictionary containing the - nested structure of the mixed function - """ - V = f.function_space() - if V.value_size > 1: - if not isinstance(V.node_count, Iterable): - assert len(V.shape) == 1, "Tensor spaces not supported" - el = V.ufl_element() - fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) - return {0: [firedrake.interpolate(f[i], fs) for i in range(V.shape[0])]} - subspaces = [V.sub(i) for i in range(len(V.node_count))] - ret = {} - for i, (Vi, fi) in enumerate(zip(subspaces, f.split())): - if len(Vi.shape) == 0: - ret[i] = [fi] - else: - assert len(Vi.shape) == 1, "Tensor spaces not supported" - el = Vi.ufl_element() - fs = firedrake.FunctionSpace(V.mesh(), el.family(), el.degree()) - ret[i] = [firedrake.interpolate(fi[j], fs) for j in range(Vi.shape[0])] - return ret - else: - return {0: [f]} - - -def extract_array(f, mesh=None, centroid=False, project=False): - r""" - Extract a cell-wise data array from a :class:`Constant` or - :class:`Function`. - - For constants and scalar fields, this will be an :math:`n\times 1` - array, where :math:`n` is the number of mesh elements. For a mixed - field with :math:`m` components, it will be :math:`n\times m`. - - :arg f: the :class:`Constant` or :class:`Function` - :kwarg mesh: the underlying :class:`MeshGeometry` - :kwarg project: if ``True``, project the field into - :math:`\mathbb P0` space - """ - mesh = mesh or f.ufl_domain() - if isinstance(f, firedrake.Constant): - ones = np.ones(mesh.num_cells()) - assert len(f.values()) == 1 - return f.values()[0] * ones - elif not isinstance(f, firedrake.Function): - raise ValueError(f"Unexpected input type {type(f)}") - if project: - if len(f.function_space().shape) > 0: - raise NotImplementedError("Can currently only project scalar fields") # TODO - element = f.ufl_element() - if (element.family(), element.degree()) != ("Discontinuous Lagrange", 0): - P0 = firedrake.FunctionSpace(mesh, "DG", 0) - f = firedrake.project(f, P0) - s = sum([fi for i, fi in split_into_scalars(f).items()], start=[]) - get = get_values_at_centroids if centroid else get_values_at_elements - if len(s) == 1: - return get(s[0]).dat.data - else: - return np.hstack([get(si).dat.data for si in s]) - - -@PETSc.Log.EventDecorator("Extract features") -def extract_features(config, fwd_sol, adj_sol): - """ - Extract features from the outputs of a run. - - :arg config: the configuration file - :arg fwd_sol: the forward solution - :arg adj_sol: the adjoint solution - :return: a list of feature arrays - """ - mesh = fwd_sol.function_space().mesh() - - # Coarse-grained DWR estimator - with PETSc.Log.Event("Extract estimator"): - dwr = dwr_indicator(config, mesh, fwd_sol, adj_sol) - - # Features describing the mesh element - with PETSc.Log.Event("Analyse element"): - P0_ten = firedrake.TensorFunctionSpace(mesh, "DG", 0) - - # Element size, orientation and shape - J = ufl.Jacobian(mesh) - JTJ = firedrake.interpolate(ufl.dot(ufl.transpose(J), J), P0_ten) - d, h1, h2 = (extract_array(p) for p in extract_components(JTJ)) - - # Is the element on the boundary? - p0test = firedrake.TestFunction(dwr.function_space()) - bnd = firedrake.assemble(p0test * ufl.ds).dat.data - - # Combine the features together - features = { - "estimator_coarse": extract_array(dwr), - "physics_drag": extract_array(config.parameters.drag(mesh)), - "physics_viscosity": extract_array(config.parameters.viscosity(mesh), project=True), - "physics_bathymetry": extract_array(config.parameters.bathymetry(mesh), project=True), - "mesh_d": d, - "mesh_h1": h1, - "mesh_h2": h2, - "mesh_bnd": bnd, - "forward_dofs": extract_array(fwd_sol, centroid=True), - "adjoint_dofs": extract_array(adj_sol, centroid=True), - } - for key, value in features.items(): - assert not np.isnan(value).any() - return features diff --git a/build/lib/nn_adapt/layout.py b/build/lib/nn_adapt/layout.py deleted file mode 100644 index 060502f..0000000 --- a/build/lib/nn_adapt/layout.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Classes for defining the layout of a neural network. -""" - - -class NetLayoutBase(object): - """ - Base class for specifying the number - of inputs, hidden neurons and outputs - in a neural network. - - The derived class should give values - for each of these parameters. - """ - - # TODO: Allow more general networks - - colours = { - "estimator": "b", - "physics": "C0", - "mesh": "deepskyblue", - "forward": "mediumturquoise", - "adjoint": "mediumseagreen", - } - - def __init__(self): - if not hasattr(self, "inputs"): - raise ValueError("Need to set self.inputs") - colours = set(self.colours.keys()) - for i in self.inputs: - okay = False - for c in colours: - if i.startswith(c): - okay = True - break - if not okay: - raise ValueError("Input names must begin with one of {colours}") - if not hasattr(self, "num_hidden_neurons"): - raise ValueError("Need to set self.num_hidden_neurons") - if not hasattr(self, "dofs_per_element"): - raise ValueError("Need to set self.dofs_per_element") - - def count_inputs(self, prefix): - """ - Count all scalar inputs that start with a given `prefix`. - """ - cnt = 0 - for i in self.inputs: - if i.startswith(prefix): - if i in ("forward_dofs", "adjoint_dofs"): - cnt += self.dofs_per_element - else: - cnt += 1 - return cnt - - @property - def num_inputs(self): - """ - The total number of scalar inputs. - """ - return self.count_inputs("") diff --git a/build/lib/nn_adapt/metric.py b/build/lib/nn_adapt/metric.py deleted file mode 100644 index 22ed97e..0000000 --- a/build/lib/nn_adapt/metric.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Functions for generating Riemannian metrics from solution -fields. -""" -from pyroteus import * -from nn_adapt.features import split_into_scalars -from nn_adapt.solving import * -from firedrake.meshadapt import RiemannianMetric -from time import perf_counter - - -def get_hessians(f, **kwargs): - """ - Compute Hessians for each component of - a :class:`Function`. - - Any keyword arguments are passed to - ``recover_hessian``. - - :arg f: the function - :return: list of Hessians of each - component - """ - kwargs.setdefault("method", "Clement") - return [ - space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") - for i, fi in split_into_scalars(f).items() - for fij in fi - ] - - -def go_metric( - mesh, - config, - enrichment_method="h", - target_complexity=4000.0, - average=True, - interpolant="Clement", - anisotropic=False, - retall=False, - convergence_checker=None, - **kwargs, -): - """ - Compute an anisotropic goal-oriented - metric field, based on a mesh and - a configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg target_complexity: target complexity - of the goal-oriented metric - :kwarg average: should the Hessian components - be combined using averaging (or intersection)? - :kwarg interpolant: which method to use to - interpolate into the target space? - :kwarg anisotropic: toggle isotropic vs. - anisotropic metric - :kwarg h_min: minimum magnitude - :kwarg h_max: maximum magnitude - :kwarg a_max: maximum anisotropy - :kwarg retall: if ``True``, the error indicator, - forward solution and adjoint solution - are returned, in addition to the metric - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - """ - h_min = kwargs.pop("h_min", 1.0e-30) - h_max = kwargs.pop("h_max", 1.0e+30) - a_max = kwargs.pop("a_max", 1.0e+30) - out = indicate_errors( - mesh, - config, - enrichment_method=enrichment_method, - retall=True, - convergence_checker=convergence_checker, - **kwargs, - ) - if retall and "adjoint" not in out: - return out - out["estimator"] = out["dwr"].vector().gather().sum() - if convergence_checker is not None: - if convergence_checker.check_estimator(out["estimator"]): - return out - - out["times"]["metric"] = -perf_counter() - with PETSc.Log.Event("Metric construction"): - if anisotropic: - hessian = combine_metrics(*get_hessians(out["forward"]), average=average) - else: - hessian = None - metric = anisotropic_metric( - out["dwr"], - hessian=hessian, - target_complexity=target_complexity, - target_space=TensorFunctionSpace(mesh, "CG", 1), - interpolant=interpolant, - ) - space_normalise(metric, target_complexity, "inf") - enforce_element_constraints(metric, h_min, h_max, a_max) - out["metric"] = RiemannianMetric(mesh) - out["metric"].assign(metric) - out["times"]["metric"] += perf_counter() - return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/metric_one2n.py b/build/lib/nn_adapt/metric_one2n.py deleted file mode 100644 index ba25a04..0000000 --- a/build/lib/nn_adapt/metric_one2n.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -Functions for generating Riemannian metrics from solution -fields. -""" -from pyroteus import * -from nn_adapt.features import split_into_scalars -from nn_adapt.solving import * -from nn_adapt.solving_one2n import * -from firedrake.meshadapt import RiemannianMetric -from time import perf_counter - - -def get_hessians(f, **kwargs): - """ - Compute Hessians for each component of - a :class:`Function`. - - Any keyword arguments are passed to - ``recover_hessian``. - - :arg f: the function - :return: list of Hessians of each - component - """ - kwargs.setdefault("method", "Clement") - return [ - space_normalise(hessian_metric(recover_hessian(fij, **kwargs)), 4000.0, "inf") - for i, fi in split_into_scalars(f).items() - for fij in fi - ] - - -def go_metric_one2n( - mesh, - config, - enrichment_method="h", - target_complexity=4000.0, - average=True, - interpolant="Clement", - anisotropic=False, - retall=False, - convergence_checker=None, - **kwargs, -): - """ - Compute an anisotropic goal-oriented - metric field, based on a mesh and - a configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg target_complexity: target complexity - of the goal-oriented metric - :kwarg average: should the Hessian components - be combined using averaging (or intersection)? - :kwarg interpolant: which method to use to - interpolate into the target space? - :kwarg anisotropic: toggle isotropic vs. - anisotropic metric - :kwarg h_min: minimum magnitude - :kwarg h_max: maximum magnitude - :kwarg a_max: maximum anisotropy - :kwarg retall: if ``True``, the error indicator, - forward solution and adjoint solution - are returned, in addition to the metric - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - """ - h_min = kwargs.pop("h_min", 1.0e-30) - h_max = kwargs.pop("h_max", 1.0e+30) - a_max = kwargs.pop("a_max", 1.0e+30) - out = indicate_errors_one2n( - mesh, - config, - enrichment_method=enrichment_method, - retall=True, - convergence_checker=convergence_checker, - **kwargs, - ) - if retall and "adjoint" not in out: - return out - out["estimator"] = out["dwr"].vector().gather().sum() - if convergence_checker is not None: - if convergence_checker.check_estimator(out["estimator"]): - return out - - tt_steps = len(out["forward"]) - out["times"]["metric"] = -perf_counter() - with PETSc.Log.Event("Metric construction"): - if anisotropic: - hessian_list = [] - for step in range(tt_steps): - hessian_list.append(combine_metrics(*get_hessians(out["forward"][step]), average=average)) - hessian = time_integrate(hessian_list) - else: - hessian = None - metric = anisotropic_metric( - out["dwr"], - hessian=hessian, - target_complexity=target_complexity, - target_space=TensorFunctionSpace(mesh, "CG", 1), - interpolant=interpolant, - ) - space_normalise(metric, target_complexity, "inf") - enforce_element_constraints(metric, h_min, h_max, a_max) - out["metric"] = RiemannianMetric(mesh) - out["metric"].assign(metric) - out["times"]["metric"] += perf_counter() - return out if retall else out["metric"] diff --git a/build/lib/nn_adapt/model.py b/build/lib/nn_adapt/model.py deleted file mode 100644 index f3587f9..0000000 --- a/build/lib/nn_adapt/model.py +++ /dev/null @@ -1,43 +0,0 @@ -import abc - - -class Parameters(abc.ABC): - """ - Abstract base class defining the API for parameter - classes that describe PDE models. - """ - - def __init__(self): - self.case = None - if not hasattr(self, "qoi_name"): - raise NotImplementedError("qoi_name attribute must be set") - if not hasattr(self, "qoi_unit"): - raise NotImplementedError("qoi_unit attribute must be set") - - @abc.abstractmethod - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - """ - pass - - @abc.abstractmethod - def drag(self, mesh): - """ - Compute the drag coefficient on the current `mesh`. - """ - pass - - @abc.abstractmethod - def viscosity(self, mesh): - """ - Compute the viscosity coefficient on the current `mesh`. - """ - pass - - @abc.abstractmethod - def ic(self, mesh): - """ - Compute the initial condition on the current `mesh`. - """ - pass diff --git a/build/lib/nn_adapt/parse.py b/build/lib/nn_adapt/parse.py deleted file mode 100644 index 7565690..0000000 --- a/build/lib/nn_adapt/parse.py +++ /dev/null @@ -1,142 +0,0 @@ -import argparse -import git -import numpy as np - - -__all__ = ["Parser"] - - -def _check_in_range(value, typ, l, u): - tvalue = typ(value) - if not (tvalue >= l and tvalue <= u): - raise argparse.ArgumentTypeError(f"{value} is not in [{l}, {u}]") - return tvalue - - -def _check_strictly_in_range(value, typ, l, u): - tvalue = typ(value) - if not (tvalue >= l and tvalue <= u): - raise argparse.ArgumentTypeError(f"{value} is not in ({l}, {u})") - return tvalue - - -nonnegative_float = lambda value: _check_in_range(value, float, 0, np.inf) -nonnegative_int = lambda value: _check_in_range(value, int, 0, np.inf) -positive_float = lambda value: _check_strictly_in_range(value, float, 0, np.inf) -positive_int = lambda value: _check_strictly_in_range(value, int, 0, np.inf) - - -def bounded_float(l, u): - def chk(value): - return _check_in_range(value, float, l, u) - - return chk - - -def bounded_int(l, u): - def chk(value): - return _check_in_range(value, int, l, u) - - return chk - - -class Parser(argparse.ArgumentParser): - """ - Custom :class:`ArgumentParser` for `nn_adapt`. - """ - - def __init__(self, prog): - super().__init__( - self, prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - self.add_argument("model", help="The model", type=str) - self.add_argument("test_case", help="The configuration file number or name") - self.add_argument( - "--optimise", - help="Turn off plotting and debugging", - action="store_true", - ) - - def parse_convergence_criteria(self): - self.add_argument( - "--miniter", - help="Minimum number of iterations", - type=positive_int, - default=3, - ) - self.add_argument( - "--maxiter", - help="Maximum number of iterations", - type=positive_int, - default=35, - ) - self.add_argument( - "--qoi_rtol", - help="Relative tolerance for QoI", - type=positive_float, - default=0.001, - ) - self.add_argument( - "--element_rtol", - help="Element count tolerance", - type=positive_float, - default=0.001, - ) - self.add_argument( - "--estimator_rtol", - help="Error estimator tolerance", - type=positive_float, - default=0.001, - ) - - def parse_num_refinements(self, default=4): - self.add_argument( - "--num_refinements", - help="Number of mesh refinements", - type=positive_int, - default=default, - ) - - def parse_approach(self): - self.add_argument( - "-a", - "--approach", - help="Adaptive approach to consider", - choices=["isotropic", "anisotropic"], - default="anisotropic", - ) - self.add_argument( - "--transfer", - help="Transfer the solution from the previous mesh as initial guess", - action="store_true", - ) - - def parse_target_complexity(self): - self.add_argument( - "--base_complexity", - help="Base metric complexity", - type=positive_float, - default=200.0, - ) - self.add_argument( - "--target_complexity", - help="Target metric complexity", - type=positive_float, - default=4000.0, - ) - - def parse_preproc(self): - self.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", - ) - - def parse_tag(self): - self.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=git.Repo(search_parent_directories=True).head.object.hexsha, - ) diff --git a/build/lib/nn_adapt/plotting.py b/build/lib/nn_adapt/plotting.py deleted file mode 100644 index 3822c3a..0000000 --- a/build/lib/nn_adapt/plotting.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Configuration for plotting. -""" -import matplotlib -import matplotlib.pyplot as plt # noqa - - -matplotlib.rc("text", usetex=True) -matplotlib.rcParams["mathtext.fontset"] = "custom" -matplotlib.rcParams["mathtext.rm"] = "Bitstream Vera Sans" -matplotlib.rcParams["mathtext.it"] = "Bitstream Vera Sans:italic" -matplotlib.rcParams["mathtext.bf"] = "Bitstream Vera Sans:bold" -matplotlib.rcParams["mathtext.fontset"] = "stix" -matplotlib.rcParams["font.family"] = "STIXGeneral" -matplotlib.rcParams["font.size"] = 12 diff --git a/build/lib/nn_adapt/solving.py b/build/lib/nn_adapt/solving.py deleted file mode 100644 index 1ba4ee9..0000000 --- a/build/lib/nn_adapt/solving.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Functions for solving problems defined by configuration -files and performing goal-oriented error estimation. -""" -from firedrake import * -from firedrake.petsc import PETSc -from firedrake.mg.embedded import TransferManager -from pyroteus.error_estimation import get_dwr_indicator -import abc -from time import perf_counter - - -tm = TransferManager() - - -class Solver(abc.ABC): - """ - Base class that defines the API for solver objects. - """ - - @abc.abstractmethod - def __init__(self, mesh, ic, **kwargs): - """ - Setup the solver. - - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - pass - - @property - @abc.abstractmethod - def function_space(self): - """ - The function space that the PDE is solved in. - """ - pass - - @property - @abc.abstractmethod - def form(self): - """ - Return the weak form. - """ - pass - - @abc.abstractmethod - def iterate(self, **kwargs): - """ - Solve the PDE. - """ - pass - - @property - @abc.abstractmethod - def solution(self): - """ - Return the solution field. - """ - pass - - -def get_solutions( - mesh, - config, - solve_adjoint=True, - refined_mesh=None, - init=None, - convergence_checker=None, - **kwargs, -): - """ - Solve forward and adjoint equations on a - given mesh. - - This works only for steady-state problems. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg solve_adjoint: should we solve the - adjoint problem? - :kwarg refined_mesh: refined mesh to compute - enriched adjoint solution on - :kwarg init: custom initial condition function - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - :return: forward solution, adjoint solution - and enriched adjoint solution (if requested) - """ - - # Solve forward problem in base space - V = config.get_function_space(mesh) - out = {"times": {"forward": -perf_counter()}} - with PETSc.Log.Event("Forward solve"): - if init is None: - ic = config.get_initial_condition(V) - else: - ic = init(V) - solver_obj = config.Solver(mesh, ic, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - J = config.get_qoi(mesh)(q) - qoi = assemble(J) - out["times"]["forward"] += perf_counter() - out["qoi"] = qoi - out["forward"] = q - if convergence_checker is not None: - if convergence_checker.check_qoi(qoi): - return out - if not solve_adjoint: - return out - - # Solve adjoint problem in base space - out["times"]["adjoint"] = -perf_counter() - with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - q_star = Function(V) - F = solver_obj.form - dFdq = derivative(F, q, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star, solver_parameters=sp) - out["adjoint"] = q_star - out["times"]["adjoint"] += perf_counter() - - if refined_mesh is None: - return out - - # Solve adjoint problem in enriched space - out["times"]["estimator"] = -perf_counter() - with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_mesh) - q_plus = Function(V) - solver_obj = config.Solver(refined_mesh, q_plus, **kwargs) - q_plus = solver_obj.solution - J = config.get_qoi(refined_mesh)(q_plus) - F = solver_obj.form - tm.prolong(q, q_plus) - q_star_plus = Function(V) - dFdq = derivative(F, q_plus, TrialFunction(V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, q_plus, TestFunction(V)) - solve(dFdq_transpose == dJdq, q_star_plus, solver_parameters=sp) - out["enriched_adjoint"] = q_star_plus - out["times"]["estimator"] += perf_counter() - return out - - -def split_into_components(f): - r""" - Extend the :attr:`split` method to apply - to non-mixed :class:`Function`\s. - """ - return [f] if f.function_space().value_size == 1 else f.split() - - -def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): - """ - Indicate errors according to ``dwr_indicator``, - using the solver given in the configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg retall: if ``True``, return the forward - solution and adjoint solution in addition - to the dual-weighted residual error indicator - """ - if not enrichment_method == "h": - raise NotImplementedError # TODO - with PETSc.Log.Event("Enrichment"): - mesh, ref_mesh = MeshHierarchy(mesh, 1) - - # Solve the forward and adjoint problems - out = get_solutions(mesh, config, refined_mesh=ref_mesh, **kwargs) - if retall and "adjoint" not in out: - return out - - out["times"]["estimator"] -= perf_counter() - with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - - # Prolong - V_plus = adj_sol_plus.function_space() - fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"], fwd_sol_plg) - adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"], adj_sol_plg) - - # Subtract prolonged adjoint solution from enriched version - adj_error = Function(V_plus) - adj_sols_plus = split_into_components(adj_sol_plus) - adj_sols_plg = split_into_components(adj_sol_plg) - for i, err in enumerate(split_into_components(adj_error)): - err += adj_sols_plus[i] - adj_sols_plg[i] - - # Evaluate errors - out["dwr"] = dwr_indicator(config, mesh, fwd_sol_plg, adj_error) - out["times"]["estimator"] += perf_counter() - - return out if retall else out["dwr"] - - -def dwr_indicator(config, mesh, q, q_star): - r""" - Evaluate the DWR error indicator as a :math:`\mathbb P0` field. - - :arg mesh: the current mesh - :arg q: the forward solution, transferred into enriched space - :arg q_star: the adjoint solution in enriched space - """ - mesh_plus = q.function_space().mesh() - - # Extract indicator in enriched space - solver_obj = config.Solver(mesh_plus, q) - F = solver_obj.form - V = solver_obj.function_space - dwr_plus = get_dwr_indicator(F, q_star, test_space=V) - - # Project down to base space - P0 = FunctionSpace(mesh, "DG", 0) - dwr = project(dwr_plus, P0) - dwr.interpolate(abs(dwr)) - return dwr diff --git a/build/lib/nn_adapt/solving_n2n.py b/build/lib/nn_adapt/solving_n2n.py deleted file mode 100644 index 39143ef..0000000 --- a/build/lib/nn_adapt/solving_n2n.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -Time dependent goal-oriented error estimation -""" -""" -Functions for solving problems defined by configuration -files and performing goal-oriented error estimation. -""" -from firedrake import * -from firedrake.petsc import PETSc -from firedrake.mg.embedded import TransferManager -from firedrake_adjoint import * -from pyroteus.error_estimation import get_dwr_indicator -import abc -from time import perf_counter - -tm = TransferManager() - - -class Solver(abc.ABC): - """ - Base class that defines the API for solver objects. - """ - - @abc.abstractmethod - def __init__(self, mesh, ic, **kwargs): - """ - Setup the solver. - - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - pass - - @property - @abc.abstractmethod - def function_space(self): - """ - The function space that the PDE is solved in. - """ - pass - - @property - @abc.abstractmethod - def form(self): - """ - Return the weak form. - """ - pass - - @abc.abstractmethod - def iterate(self, **kwargs): - """ - Solve the PDE. - """ - pass - - @property - @abc.abstractmethod - def solution(self): - """ - Return the solution field. - """ - pass - - -def get_solutions_n2n( - meshes, - config, - solve_adjoint=True, - refined_meshes=None, - init=None, - convergence_checker=None, - **kwargs, -): - """ - Solve forward and adjoint equations on a - given mesh. - - This works only for steady-state problems. - Trying to work it out. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg solve_adjoint: should we solve the - adjoint problem? - :kwarg refined_mesh: refined mesh to compute - enriched adjoint solution on - :kwarg init: custom initial condition function - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - :return: forward solution, adjoint solution - and enriched adjoint solution (if requested) - """ - - tt_steps = config.parameters.tt_steps - - # Solve forward problem in base space - V = config.get_function_space(meshes[-1]) - out = {"times": {"forward": -perf_counter()}} - with PETSc.Log.Event("Forward solve"): - if init is None: - ic = config.get_initial_condition(V) - else: - ic = init(V) - solver_obj = config.Solver_n2n(meshes, ic=0, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - # Calculate QoI - qoi = 0 - for step in range(tt_steps): - J = config.get_qoi(V)(q[-1]) - qoi += assemble(J) - qoi = qoi / tt_steps - - out["times"]["forward"] += perf_counter() - out["qoi"] = qoi - out["forward"] = q - if convergence_checker is not None: - if not convergence_checker.check_qoi(qoi): - return out - if not solve_adjoint: - return out - - # Solve adjoint problem in base space - out["times"]["adjoint"] = -perf_counter() - with PETSc.Log.Event("Adjoint solve"): - sp = config.parameters.adjoint_solver_parameters - adj_solution = [] - dJdu, solve_blocks = solver_obj.adjoint_setup() - - for step in range(tt_steps-1): - adjoint_solution = solve_blocks[step].adj_sol - adj_solution.append(adjoint_solution) - - # initial condition for adjoint solution - adj_solution.append(dJdu) - out["adjoint"] = adj_solution - out["times"]["adjoint"] += perf_counter() - if refined_meshes is None: - return out - - # Solve adjoint problem in enriched space - out["times"]["estimator"] = -perf_counter() - with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_meshes[-1]) - q_plus = Function(V) - solver_obj_plus = config.Solver_n2n(refined_meshes, q_plus, **kwargs) - solver_obj_plus.iterate() - q_plus = solver_obj_plus.solution - # J = config.get_qoi(refined_mesh[-1])(q_plus[-1]) - adj_solution_plus = [] - dJdu_plus, solve_blocks_plus = solver_obj_plus.adjoint_setup() - - for step in range(tt_steps-1): - adjoint_solution_plus = solve_blocks_plus[step].adj_sol - adj_solution_plus.append(adjoint_solution_plus) - - adj_solution_plus.append(dJdu_plus) - out["enriched_adjoint"] = adj_solution_plus - out["times"]["estimator"] += perf_counter() - - return out - - -def split_into_components(f): - r""" - Extend the :attr:`split` method to apply - to non-mixed :class:`Function`\s. - """ - return [f] if f.function_space().value_size == 1 else f.split() - - -def indicate_errors_n2n(meshes, config, enrichment_method="h", retall=False, **kwargs): - """ - Indicate errors according to ``dwr_indicator``, - using the solver given in the configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg retall: if ``True``, return the forward - solution and adjoint solution in addition - to the dual-weighted residual error indicator - """ - if not enrichment_method == "h": - raise NotImplementedError # TODO - # with PETSc.Log.Event("Enrichment"): - mesh_list = [] - ref_mesh_list = [] - tt_steps = len(meshes) - for i in range(tt_steps): - mesh, ref_mesh = MeshHierarchy(meshes[i], 1) - mesh_list.append(mesh) - ref_mesh_list.append(ref_mesh) - - # Solve the forward and adjoint problems - out = get_solutions_n2n(meshes=mesh_list, config=config, refined_meshes=ref_mesh_list, **kwargs) - if retall and "adjoint" not in out: - return out - - out["times"]["estimator"] -= perf_counter() - # with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - dwr_list = [] - - for step in range(tt_steps): - # Prolong - V_plus = out["enriched_adjoint"][step].function_space() - fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"][step], fwd_sol_plg) - adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"][step], adj_sol_plg) - - # Subtract prolonged adjoint solution from enriched version - adj_error = Function(V_plus) - adj_sols_plus = split_into_components(out["enriched_adjoint"][step]) - adj_sols_plg = split_into_components(adj_sol_plg) - for i, err in enumerate(split_into_components(adj_error)): - err += adj_sols_plus[i] - adj_sols_plg[i] - - # Evaluate errors - dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) - out["dwr"] = dwr_list - - out["times"]["estimator"] += perf_counter() - - return out if retall else out["dwr"] - - -def dwr_indicator(config, mesh, q, q_star): - r""" - Evaluate the DWR error indicator as a :math:`\mathbb P0` field. - - :arg mesh: the current mesh - :arg q: the forward solution, transferred into enriched space - :arg q_star: the adjoint solution in enriched space - """ - mesh_plus = q.function_space().mesh() - - # Extract indicator in enriched space - solver_obj = config.Solver(mesh_plus, q) - F = solver_obj.form - V = solver_obj.function_space - dwr_plus = get_dwr_indicator(F, q_star, test_space=V) - - # Project down to base space - P0 = FunctionSpace(mesh, "DG", 0) - dwr = project(dwr_plus, P0) - dwr.interpolate(abs(dwr)) - return dwr diff --git a/build/lib/nn_adapt/solving_one2n.py b/build/lib/nn_adapt/solving_one2n.py deleted file mode 100644 index 2eb5801..0000000 --- a/build/lib/nn_adapt/solving_one2n.py +++ /dev/null @@ -1,246 +0,0 @@ -""" -Time dependent goal-oriented error estimation -""" -""" -Functions for solving problems defined by configuration -files and performing goal-oriented error estimation. -""" -from firedrake import * -from firedrake.petsc import PETSc -from firedrake.mg.embedded import TransferManager -from firedrake_adjoint import * -from pyroteus.error_estimation import get_dwr_indicator -import abc -from time import perf_counter - - -tm = TransferManager() - - -class Solver(abc.ABC): - """ - Base class that defines the API for solver objects. - """ - - @abc.abstractmethod - def __init__(self, mesh, ic, **kwargs): - """ - Setup the solver. - - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - pass - - @property - @abc.abstractmethod - def function_space(self): - """ - The function space that the PDE is solved in. - """ - pass - - @property - @abc.abstractmethod - def form(self): - """ - Return the weak form. - """ - pass - - @abc.abstractmethod - def iterate(self, **kwargs): - """ - Solve the PDE. - """ - pass - - @property - @abc.abstractmethod - def solution(self): - """ - Return the solution field. - """ - pass - - -def time_integrate(list_like): - length = len(list_like) - result = 0 - for step in range(length): - result += list_like[step] - return product((result, 1/length)) - - -def get_solutions_one2n( - mesh, - config, - solve_adjoint=True, - refined_mesh=None, - init=None, - convergence_checker=None, - **kwargs, -): - """ - Solve forward and adjoint equations on a - given mesh. - - This works only for steady-state problems. - Trying to work it out. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg solve_adjoint: should we solve the - adjoint problem? - :kwarg refined_mesh: refined mesh to compute - enriched adjoint solution on - :kwarg init: custom initial condition function - :kwarg convergence_checker: :class:`ConvergenceTracer` - instance - :return: forward solution, adjoint solution - and enriched adjoint solution (if requested) - """ - - tt_steps = config.parameters.tt_steps - - # Solve forward problem in base space - V = config.get_function_space(mesh) - out = {"times": {"forward": -perf_counter()}} - with PETSc.Log.Event("Forward solve"): - if init is None: - ic = config.get_initial_condition(V) - else: - ic = init(V) - solver_obj = config.Solver_one2n(mesh, ic, **kwargs) - solver_obj.iterate() - q = solver_obj.solution - # Calculate QoI - qoi_list = [] - for step in range(tt_steps): - J = config.get_qoi(V)(q[step]) - qoi_list.append(assemble(J)) - qoi = time_integrate(qoi_list) - - out["times"]["forward"] += perf_counter() - out["qoi"] = qoi - out["forward"] = q - if convergence_checker is not None: - if convergence_checker.check_qoi(qoi): - return out - if not solve_adjoint: - return out - - # Solve adjoint problem in base space - out["times"]["adjoint"] = -perf_counter() - with PETSc.Log.Event("Adjoint solve"): - solver_obj.adjoint_iteration() - out["adjoint"] = solver_obj.adj_solution - - out["times"]["adjoint"] += perf_counter() - if refined_mesh is None: - return out - - # Solve adjoint problem in enriched space - out["times"]["estimator"] = -perf_counter() - with PETSc.Log.Event("Enrichment"): - V = config.get_function_space(refined_mesh) - q_plus = Function(V) - solver_obj_plus = config.Solver_one2n(refined_mesh, q_plus, **kwargs) - solver_obj_plus.iterate() - q_plus = solver_obj_plus.solution - adj_solution_plus = [] - solver_obj_plus.adjoint_iteration() - adj_solution_plus = solver_obj_plus.adj_solution - out["enriched_adjoint"] = adj_solution_plus - - out["times"]["estimator"] += perf_counter() - - return out - - -def split_into_components(f): - r""" - Extend the :attr:`split` method to apply - to non-mixed :class:`Function`\s. - """ - return [f] if f.function_space().value_size == 1 else f.split() - - -def indicate_errors_one2n(mesh, config, enrichment_method="h", retall=False, **kwargs): - """ - Indicate errors according to ``dwr_indicator``, - using the solver given in the configuration file. - - :arg mesh: input mesh - :arg config: configuration file, which - specifies the PDE and QoI - :kwarg enrichment_method: how to enrich the - finite element space? - :kwarg retall: if ``True``, return the forward - solution and adjoint solution in addition - to the dual-weighted residual error indicator - """ - if not enrichment_method == "h": - raise NotImplementedError # TODO - with PETSc.Log.Event("Enrichment"): - tt_steps = config.parameters.tt_steps - mesh, ref_mesh = MeshHierarchy(mesh, 1) - - # Solve the forward and adjoint problems - out = get_solutions_one2n(mesh=mesh, config=config, refined_mesh=ref_mesh, **kwargs) - if retall and "adjoint" not in out: - return out - - out["times"]["estimator"] -= perf_counter() - - with PETSc.Log.Event("Enrichment"): - adj_sol_plus = out["enriched_adjoint"] - dwr_list = [] - - for step in range(tt_steps): - # Prolong - V_plus = adj_sol_plus[step].function_space() - fwd_sol_plg = Function(V_plus) - tm.prolong(out["forward"][step], fwd_sol_plg) - adj_sol_plg = Function(V_plus) - tm.prolong(out["adjoint"][step], adj_sol_plg) - - # Subtract prolonged adjoint solution from enriched version - adj_error = Function(V_plus) - adj_sols_plus = split_into_components(adj_sol_plus[step]) - adj_sols_plg = split_into_components(adj_sol_plg) - for i, err in enumerate(split_into_components(adj_error)): - err += adj_sols_plus[i] - adj_sols_plg[i] - - # Evaluate errors - dwr_list.append(dwr_indicator(config, mesh, fwd_sol_plg, adj_error)) - - out["dwr"] = time_integrate(dwr_list) - - out["times"]["estimator"] += perf_counter() - - return out if retall else out["dwr"] - - -def dwr_indicator(config, mesh, q, q_star): - r""" - Evaluate the DWR error indicator as a :math:`\mathbb P0` field. - - :arg mesh: the current mesh - :arg q: the forward solution, transferred into enriched space - :arg q_star: the adjoint solution in enriched space - """ - mesh_plus = q.function_space().mesh() - - # Extract indicator in enriched space - solver_obj = config.Solver(mesh_plus, q) - F = solver_obj.form - V = solver_obj.function_space - dwr_plus = get_dwr_indicator(F, q_star, test_space=V) - - # Project down to base space - P0 = FunctionSpace(mesh, "DG", 0) - dwr = project(dwr_plus, P0) - dwr.interpolate(abs(dwr)) - return dwr diff --git a/build/lib/nn_adapt/utility.py b/build/lib/nn_adapt/utility.py deleted file mode 100644 index 229de16..0000000 --- a/build/lib/nn_adapt/utility.py +++ /dev/null @@ -1,66 +0,0 @@ -__all__ = ["ConvergenceTracker"] - - -class ConvergenceTracker: - """ - Class for checking convergence of fixed point - iteration loops. - """ - - def __init__(self, mesh, parsed_args): - self.qoi_old = None - self.elements_old = mesh.num_cells() - self.estimator_old = None - self.converged_reason = None - self.qoi_rtol = parsed_args.qoi_rtol - self.element_rtol = parsed_args.element_rtol - self.estimator_rtol = parsed_args.estimator_rtol - self.fp_iteration = 0 - self.miniter = parsed_args.miniter - self.maxiter = parsed_args.maxiter - assert self.maxiter >= self.miniter - - def check_maxiter(self): - """ - Check for reaching maximum number of iterations. - """ - converged = False - if self.fp_iteration >= self.maxiter: - self.converged_reason = "reaching maximum iteration count" - converged = True - return converged - - def _chk(self, val, old, rtol, reason): - converged = False - if old is not None and self.fp_iteration >= self.miniter: - if abs(val - old) < rtol * abs(old): - self.converged_reason = reason - converged = True - return converged - - def check_qoi(self, val): - """ - Check for QoI convergence. - """ - r = "QoI convergence" - converged = self._chk(val, self.qoi_old, self.qoi_rtol, r) - self.qoi_old = val - return converged - - def check_estimator(self, val): - """ - Check for error estimator convergence. - """ - r = "error estimator convergence" - converged = self._chk(val, self.estimator_old, self.estimator_rtol, r) - self.estimator_old = val - return converged - - def check_elements(self, val): - """ - Check for mesh element count convergence. - """ - r = "element count convergence" - converged = self._chk(val, self.elements_old, self.element_rtol, r) - self.elements_old = val - return converged diff --git a/examples/a_text.py b/examples/a_text.py deleted file mode 100644 index cab171a..0000000 --- a/examples/a_text.py +++ /dev/null @@ -1,117 +0,0 @@ -from copy import deepcopy -from thetis import * -from firedrake.adjoint import * -from firedrake import * -from firedrake_adjoint import * - -import numpy as np - -lx = 40e3 -ly = 2e3 -nx = 25 -ny = 20 -mesh2d = RectangleMesh(nx, ny, lx, ly) - - -def get_function_space(mesh): - """ - Construct the (mixed) finite element space used for the - prognostic solution. - """ - P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) - P2_2d = get_functionspace(mesh, "CG", 2) - return P1v_2d * P2_2d - - -def get_qoi(mesh): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - def qoi(sol): - return inner(sol, sol) * ds(2) - - return qoi - - -P1_2d = FunctionSpace(mesh2d, 'CG', 1) -bathymetry_2d = Function(P1_2d, name='Bathymetry') -depth = 20.0 -bathymetry_2d.assign(depth) - -# total duration in seconds -t_end = 50 -# export interval in seconds -t_export = 10 - -solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d) -options = solver_obj.options -options.simulation_export_time = t_export -options.simulation_end_time = t_end -options.quadratic_drag_coefficient = Constant(0.0025) - -options.swe_timestepper_type = 'CrankNicolson' -options.timestep = 10.0 - -elev_init = Function(P1_2d, name='initial elevation') - -xy = SpatialCoordinate(mesh2d) -gauss_width = 4000. -gauss_ampl = 2.0 -gauss_expr = gauss_ampl * exp(-((xy[0]-lx/2)/gauss_width)**2) - -elev_init.interpolate(gauss_expr) - -tape = get_working_tape() -tape.clear_tape() - -# Setup forward solution -solver_obj.assign_initial_conditions(elev=elev_init) -solver_obj.iterate() -fwd_sol = solver_obj.fields.solution_2d - -stop_annotating(); -solve_blocks = get_solve_blocks() -J_form = inner(fwd_sol, fwd_sol)*ds(2) -J = assemble(J_form) -drag_func = Control(solver_obj.options.quadratic_drag_coefficient) -g = compute_gradient(J, drag_func) - -q_star = solve_blocks[0].adj_sol -print(q_star) - -# # Adjoint solver -# sp = { -# "mat_type": "aij", -# "snes_type": "newtonls", -# "snes_linesearch_type": "bt", -# "snes_rtol": 1.0e-08, -# "snes_max_it": 100, -# "ksp_type": "preonly", -# "pc_type": "lu", -# "pc_factor_mat_solver_type": "mumps", -# } - -# V = fwd_sol.function_space() -# q_star = Function(V) -# F = solver_obj.timestepper.F -# sol_temp = Function(V) -# sol_temp.assign(fwd_sol) -# J = get_qoi(mesh2d)(fwd_sol) -# dJdq = derivative(J, fwd_sol, TestFunction(V)) -# q_star = [] -# for i in range(10): -# dFdq = derivative(F, sol_temp, TrialFunction(V)) -# dFdq_transpose = adjoint(dFdq) -# print("this step") -# solve(dFdq_transpose == dJdq, sol_temp, solver_parameters=sp) -# q_star.append(sol_temp) - - -ee_file = File(f"out/adjoint.pvd") -ee_file.write(*q_star.split()) - -# for i in range(len(q_star)): -# ee_file.write(*q_star[i].split()) diff --git a/examples/makefile b/examples/makefile index 940718c..2ae9714 100644 --- a/examples/makefile +++ b/examples/makefile @@ -4,7 +4,7 @@ all: setup network test APPROACHES = anisotropic MODEL = steady_turbine -NUM_TRAINING_CASES = 100 +NUM_TRAINING_CASES = 1 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all @@ -169,6 +169,14 @@ snapshot_go: echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log echo "" >> timing.log +tessst: + for case in $(TESTING_CASES); do \ + for approach in $(APPROACHES); do \ + python3 a_tessst.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ + done; \ + done && \ + + # Apply data-driven adaptation to the test cases # ============================================== # diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py new file mode 100644 index 0000000..2a8862d --- /dev/null +++ b/examples/models/pyroteus_burgers.py @@ -0,0 +1,230 @@ +from firedrake import * +from pyroteus_adjoint import * +from firedrake.petsc import PETSc +import nn_adapt.model + +''' +A memory hungry method solving time dependent PDE. +''' +class Parameters(nn_adapt.model.Parameters): + """ + Class encapsulating all parameters required for a simple + Burgers equation test case. + """ + + qoi_name = "right boundary integral" + qoi_unit = r"m\,s^{-1}" + + # Adaptation parameters + h_min = 1.0e-10 # Minimum metric magnitude + h_max = 1.0 # Maximum metric magnitude + + # Physical parameters + viscosity_coefficient = 0.0001 + initial_speed = 1.0 + + # Timestepping parameters + timestep = 0.05 + + solver_parameters = {} + adjoint_solver_parameters = {} + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def drag(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + Note that there isn't really a concept of bathymetry + for Burgers equation. It is kept constant and should + be ignored by the network. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(1.0) + + def viscosity(self, mesh): + """ + Compute the viscosity coefficient on the current `mesh`. + """ + P0_2d = FunctionSpace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + def ic(self, mesh): + """ + Initial condition + """ + x, y = SpatialCoordinate(mesh) + expr = self.initial_speed * sin(pi * x) + return as_vector([expr, 0]) + + +def get_function_spaces(mesh): + return {"u": VectorFunctionSpace(mesh, "CG", 2)} + + +def get_form(mesh_seq): + def form(index, solutions): + u, u_ = solutions["u"] + P = mesh_seq.time_partition + dt = Constant(P.timesteps[index]) + + # Specify viscosity coefficient + nu = Constant(0.0001) + + # Setup variational problem + v = TestFunction(u.function_space()) + F = ( + inner((u - u_) / dt, v) * dx + + inner(dot(u, nabla_grad(u)), v) * dx + + nu * inner(grad(u), grad(v)) * dx + ) + return F + + return form + + +def get_solver(mesh_seq): + def solver(index, ic): + function_space = mesh_seq.function_spaces["u"][index] + u = Function(function_space) + + # Initialise 'lagged' solution + u_ = Function(function_space, name="u_old") + u_.assign(ic["u"]) + + # Define form + F = mesh_seq.form(index, {"u": (u, u_)}) + + # Time integrate from t_start to t_end + P = mesh_seq.time_partition + t_start, t_end = P.subintervals[index] + dt = P.timesteps[index] + t = t_start + step = 0 + while t < t_end - 1.0e-05: + step += 1 + print(step) + solve(F == 0, u, ad_block_tag="u") + u_.assign(u) + t += dt + return {"u": u} + + return solver + + +def get_initial_condition(mesh_seq): + fs = mesh_seq.function_spaces["u"][0] + x, y = SpatialCoordinate(mesh_seq[0]) + return {"u": interpolate(as_vector([sin(pi * x), 0]), fs)} + +def get_qoi(mesh_seq, solutions, index): + def end_time_qoi(): + u = solutions["u"] + return inner(u, u) * ds(2) + + def time_integrated_qoi(t): + dt = Constant(mesh_seq.time_partition[index].timestep) + u = solutions["u"] + return dt * inner(u, u) * ds(2) + + if mesh_seq.qoi_type == "end_time": + return end_time_qoi + else: + return time_integrated_qoi + + +PETSc.Sys.popErrorHandler() +parameters = Parameters() + +class pyroteus_burgers(): + + def __init__(self, meshes, ic, **kwargs): + + self.meshes = meshes + self.kwargs = kwargs + try: + self.nu = [parameters.viscosity(mesh) for mesh in meshes] + self.num_subintervals = len(meshes) + except: + self.nu = parameters.viscosity(meshes) + self.num_subintervals = 1 + + def setups(self): + + fields = ["u"] + + dt = 0.1 + steps_subintervals = 3 + end_time = self.num_subintervals * steps_subintervals * dt + + timesteps_per_export = 1 + + time_partition = TimePartition( + end_time, + self.num_subintervals, + dt, + fields, + timesteps_per_export=timesteps_per_export, + ) + + self._mesh_seq = GoalOrientedMeshSeq( + time_partition, + self.meshes, + get_function_spaces=get_function_spaces, + get_initial_condition=get_initial_condition, + get_form=get_form, + get_solver=get_solver, + get_qoi=get_qoi, + qoi_type="end_time", + ) + + + def iterate(self): + self.setups() + self._solutions, self._indicators = self._mesh_seq.indicate_errors( + enrichment_kwargs={"enrichment_method": "h"} + ) + + + def integrate(self, item): + result = [0 for _ in range(self._mesh_seq.num_subintervals)] + steps = self._mesh_seq.time_partition.timesteps_per_subinterval + + for id, list in enumerate(item): + for element in list: + result[id] += element + result[id] = product((result[id], 1/steps[id])) + + return result + + @property + def fwd_sol(self): + return self.integrate(self._solutions["u"]["forward"]) + + @property + def adj_sol(self): + return self.integrate(self._solutions["u"]["adjoint"]) + + @property + def qoi(self): + return self._mesh_seq.J + + @property + def indicators(self): + return self.integrate(self._indicators) + + +mesh = [UnitSquareMesh(15, 15), UnitSquareMesh(12, 17)] +ic = 0 +demo = Solver_n4one(mesh, ic) + +demo.iterate() + + \ No newline at end of file diff --git a/examples/models/pyroteus_turbine.py b/examples/models/pyroteus_turbine.py new file mode 100644 index 0000000..28fa169 --- /dev/null +++ b/examples/models/pyroteus_turbine.py @@ -0,0 +1,400 @@ +from thetis import * +from pyroteus import * +from pyroteus_adjoint import * +import nn_adapt.model + +class Parameters(nn_adapt.model.Parameters): + """ + Class encapsulating all parameters required for the tidal + farm modelling test case. + """ + + discrete = False + + qoi_name = "power output" + qoi_unit = "MW" + + # Adaptation parameters + h_min = 1.0e-08 + h_max = 500.0 + + # time dependent parameters + end_time = 100. + time_steps = 10. + + # Physical parameters + viscosity_coefficient = 0.5 + depth = 40.0 + drag_coefficient = Constant(0.0025) + inflow_speed = 5.0 + density = Constant(1030.0 * 1.0e-06) + + # Turbine parameters + turbine_diameter = 18.0 + turbine_width = None + turbine_coords = [] + thrust_coefficient = 0.8 + correct_thrust = True + + # Solver parameters + solver_parameters = { + "mat_type": "aij", + "snes_type": "newtonls", + "snes_linesearch_type": "bt", + "snes_rtol": 1.0e-08, + "snes_max_it": 100, + "ksp_type": "preonly", + "pc_type": "lu", + "pc_factor_mat_solver_type": "mumps", + } + adjoint_solver_parameters = solver_parameters + + @property + def num_turbines(self): + """ + Count the number of turbines based on the number + of coordinates. + """ + return len(self.turbine_coords) + + @property + def turbine_ids(self): + """ + Generate the list of turbine IDs, i.e. cell tags used + in the gmsh geometry file. + """ + if self.discrete: + return list(2 + np.arange(self.num_turbines, dtype=np.int32)) + else: + return ["everywhere"] + + @property + def footprint_area(self): + """ + Calculate the area of the turbine footprint in the horizontal. + """ + d = self.turbine_diameter + w = self.turbine_width or d + return d * w + + @property + def swept_area(self): + """ + Calculate the area swept by the turbine in the vertical. + """ + return pi * (0.5 * self.turbine_diameter) ** 2 + + @property + def cross_sectional_area(self): + """ + Calculate the cross-sectional area of the turbine footprint + in the vertical. + """ + return self.depth * self.turbine_diameter + + @property + def corrected_thrust_coefficient(self): + """ + Correct the thrust coefficient to account for the + fact that we use the velocity at the turbine, rather + than an upstream veloicity. + + See [Kramer and Piggott 2016] for details. + """ + Ct = self.thrust_coefficient + if not self.correct_thrust: + return Ct + At = self.swept_area + corr = 4.0 / (1.0 + sqrt(1.0 - Ct * At / self.cross_sectional_area)) ** 2 + return Ct * corr + + def bathymetry(self, mesh): + """ + Compute the bathymetry field on the current `mesh`. + """ + # NOTE: We assume a constant bathymetry field + P0_2d = get_functionspace(mesh, "DG", 0) + return Function(P0_2d).assign(parameters.depth) + + def u_inflow(self, mesh): + """ + Compute the inflow velocity based on the current `mesh`. + """ + # NOTE: We assume a constant inflow + return as_vector([self.inflow_speed, 0]) + + def ic(self, mesh): + """ + Initial condition. + """ + return self.u_inflow(mesh) + + def turbine_density(self, mesh): + """ + Compute the turbine density function on the current `mesh`. + """ + if self.discrete: + return Constant(1.0 / self.footprint_area, domain=mesh) + x, y = SpatialCoordinate(mesh) + r2 = self.turbine_diameter / 2 + r1 = r2 if self.turbine_width is None else self.turbine_width / 2 + + def bump(x0, y0, scale=1.0): + qx = ((x - x0) / r1) ** 2 + qy = ((y - y0) / r2) ** 2 + cond = And(qx < 1, qy < 1) + b = exp(1 - 1 / (1 - qx)) * exp(1 - 1 / (1 - qy)) + return conditional(cond, Constant(scale) * b, 0) + + bumps = 0 + for xy in self.turbine_coords: + bumps += bump(*xy, scale=1 / assemble(bump(*xy) * dx)) + return bumps + + def farm(self, mesh): + """ + Construct a dictionary of :class:`TidalTurbineFarmOptions` + objects based on the current `mesh`. + """ + Ct = self.corrected_thrust_coefficient + farm_options = TidalTurbineFarmOptions() + farm_options.turbine_density = self.turbine_density(mesh) + farm_options.turbine_options.diameter = self.turbine_diameter + farm_options.turbine_options.thrust_coefficient = Ct + return {farm_id: farm_options for farm_id in self.turbine_ids} + + def turbine_drag(self, mesh): + """ + Compute the contribution to the drag coefficient due to the + tidal turbine parametrisation on the current `mesh`. + """ + P0_2d = get_functionspace(mesh, "DG", 0) + p0test = TestFunction(P0_2d) + Ct = self.corrected_thrust_coefficient + At = self.swept_area + Cd = 0.5 * Ct * At * self.turbine_density(mesh) + return sum([p0test * Cd * dx(tag, domain=mesh) for tag in self.turbine_ids]) + + def drag(self, mesh, background=False): + r""" + Create a :math:`\mathbb P0` field for the drag on the current + `mesh`. + + :kwarg background: should we consider the background drag + alone, or should the turbine drag be included? + """ + P0_2d = get_functionspace(mesh, "DG", 0) + ret = Function(P0_2d) + + # Background drag + Cb = self.drag_coefficient + if background: + return ret.assign(Cb) + p0test = TestFunction(P0_2d) + expr = p0test * Cb * dx(domain=mesh) + + # Turbine drag + assemble(expr + self.turbine_drag(mesh), tensor=ret) + return ret + + def viscosity(self, mesh): + r""" + Create a :math:`\mathbb P0` field for the viscosity coefficient + on the current `mesh`. + """ + # NOTE: We assume a constant viscosity coefficient + P0_2d = get_functionspace(mesh, "DG", 0) + return Function(P0_2d).assign(self.viscosity_coefficient) + + +PETSc.Sys.popErrorHandler() +parameters = Parameters() +kwargs = {} + + +class Solver_one4n(): + + def __init__(self, mesh, **kwargs): + + fields = ["q"] + self.time_partition = TimeInterval(parameters.end_time, + parameters.time_steps, + fields, timesteps_per_export=1) + self.mesh = mesh + self.kwargs = kwargs + + + def setup(self): + def get_solver(mesh_seq): + + def solver(index, ic): + V = mesh_seq.function_spaces["q"][index] + q = ic["q"] + mesh_seq.form(index, {"q": (q, q)}) + u_init, eta_init = q.split() + mesh_seq._thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) + mesh_seq._thetis_solver.iterate(**self.kwargs) + + return {"q": mesh_seq._thetis_solver.fields.solution_2d} + + return solver + + + def get_form(mesh_seq): + def form(index, ic): + P = mesh_seq.time_partition + + bathymetry = parameters.bathymetry(mesh_seq[index]) + Cd = parameters.drag_coefficient + sp = self.kwargs.pop("solver_parameters", None) + + # Create solver object + mesh_seq._thetis_solver = solver2d.FlowSolver2d(mesh_seq[index], bathymetry) + options = mesh_seq._thetis_solver.options + options.element_family = "dg-cg" + options.timestep = P.timestep + options.simulation_export_time = P.timestep * P.timesteps_per_export[index] + options.simulation_end_time = P.end_time + options.swe_timestepper_type = "SteadyState" + options.swe_timestepper_options.solver_parameters = ( + sp or parameters.solver_parameters + ) + options.use_grad_div_viscosity_term = False + options.horizontal_viscosity = parameters.viscosity(mesh_seq[index]) + options.quadratic_drag_coefficient = Cd + options.use_lax_friedrichs_velocity = True + options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) + options.use_grad_depth_viscosity_term = False + options.no_exports = True + options.update(self.kwargs) + + # Apply boundary conditions + mesh_seq._thetis_solver.create_function_spaces() + P1v_2d = mesh_seq._thetis_solver.function_spaces.P1v_2d + u_inflow = interpolate(parameters.u_inflow(mesh_seq[index]), P1v_2d) + mesh_seq._thetis_solver.bnd_functions["shallow_water"] = { + 1: {"uv": u_inflow}, # inflow + 2: {"elev": Constant(0.0)}, # outflow + 3: {"un": Constant(0.0)}, # free-slip + 4: {"uv": Constant(as_vector([0.0, 0.0]))}, # no-slip + 5: {"elev": Constant(0.0), "un": Constant(0.0)} # weakly reflective + } + + # # Create tidal farm + options.tidal_turbine_farms = parameters.farm(mesh_seq[index]) + mesh_seq._thetis_solver.create_timestepper() + + # # Apply initial guess + # u_init, eta_init = ic["q"].split() + # thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) + + return mesh_seq._thetis_solver.timestepper.F + + return form + + + def get_function_space(mesh): + """ + Construct the (mixed) finite element space used for the + prognostic solution. + """ + P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) + P2_2d = get_functionspace(mesh, "CG", 2) + return {"q": P1v_2d * P2_2d} + + + def get_initial_condition(mesh_seq): + """ + Compute an initial condition based on the inflow velocity + and zero free surface elevation. + """ + V = mesh_seq.function_spaces["q"][0] + q = Function(V) + u, eta = q.split() + u.interpolate(parameters.ic(mesh_seq)) + return {"q": q} + + + def get_qoi(mesh_seq, solutions, index): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + dt = Constant(mesh_seq.time_partition[index].timestep) + rho = parameters.density + Ct = parameters.corrected_thrust_coefficient + At = parameters.swept_area + Cd = 0.5 * Ct * At * parameters.turbine_density(mesh_seq[index]) + tags = parameters.turbine_ids + sol = solutions["q"] + + def qoi(): + u, eta = split(sol) + return sum([rho * Cd * pow(dot(u, u), 1.5) * dx(tag) for tag in tags]) + + return qoi + + + self._mesh_seq = GoalOrientedMeshSeq( + self.time_partition, + self.mesh, + get_function_spaces=get_function_space, + get_initial_condition=get_initial_condition, + get_form=get_form, + get_solver=get_solver, + get_qoi=get_qoi, + qoi_type="end_time") + + + def iterate(self): + self.setup() + self._solutions, self._indicators = self._mesh_seq.indicate_errors( + enrichment_kwargs={"enrichment_method": "h"}) + + + def integrate(self, item): + result = [0 for _ in range(self._mesh_seq.num_subintervals)] + steps = self._mesh_seq.time_partition.timesteps_per_subinterval + + for id, list in enumerate(item): + for element in list: + result[id] += element + result[id] = product((result[id], 1/steps[id])) + + return result + + @property + def fwd_sol(self): + return self.integrate(self._solutions["u"]["forward"]) + + @property + def adj_sol(self): + return self.integrate(self._solutions["u"]["adjoint"]) + + @property + def qoi(self): + return self._mesh_seq.J + + @property + def indicators(self): + return self.integrate(self._indicators) + + + + +# print(solutions["q"].keys()) + +# fwd_file = File("./out/forward.pvd") +# for i in range(len(solutions["u"]["forward"][0])): +# fwd_file.write(*solutions["q"]["forward"][0][i].split()) + +# adj_file = File("./out/adjoint.pvd") +# for i in range(len(solutions["u"]["adjoint"][0])): +# adj_file.write(*solutions["q"]["adjoint"][0][i].split()) + +# adj_next_file = File("./out/adjoint_next.pvd") +# for i in range(len(solutions["u"]["adjoint_next"][0])): +# adj_next_file.write(*solutions["q"]["adjoint_next"][0][i].split()) + diff --git a/examples/models/steady_turbine.py b/examples/models/steady_turbine.py index b2ade51..e44ad17 100644 --- a/examples/models/steady_turbine.py +++ b/examples/models/steady_turbine.py @@ -18,6 +18,8 @@ class Parameters(nn_adapt.model.Parameters): # Adaptation parameters h_min = 1.0e-08 h_max = 500.0 + + tt_steps = 10 # Physical parameters viscosity_coefficient = 0.5 @@ -229,6 +231,8 @@ def __init__(self, mesh, ic, **kwargs): :arg mesh: the mesh to define the solver on :arg ic: the initial condition """ + self.mesh = mesh + bathymetry = parameters.bathymetry(mesh) Cd = parameters.drag_coefficient sp = kwargs.pop("solver_parameters", None) @@ -237,9 +241,10 @@ def __init__(self, mesh, ic, **kwargs): self._thetis_solver = solver2d.FlowSolver2d(mesh, bathymetry) options = self._thetis_solver.options options.element_family = "dg-cg" - options.timestep = 20.0 - options.simulation_export_time = 20.0 - options.simulation_end_time = 18.0 + options.timestep = 100.0 + options.simulation_export_time = 100.0 + options.simulation_end_time = options.timestep * parameters.tt_steps + options.simulation_end_time = 80.0 options.swe_timestepper_type = "SteadyState" options.swe_timestepper_options.solver_parameters = ( sp or parameters.solver_parameters @@ -285,6 +290,14 @@ def form(self): The weak form of the shallow water equations. """ return self._thetis_solver.timestepper.F + + def save2file(self, items, file): + ee_file = File(file) + try: + for i in range(len(items)): + ee_file.write(*items[i].split()) + except: + ee_file.write(*items.split()) def iterate(self, **kwargs): """ diff --git a/examples/run_adapt.py b/examples/run_adapt.py index 69d9cd4..4dc84a7 100644 --- a/examples/run_adapt.py +++ b/examples/run_adapt.py @@ -82,6 +82,9 @@ kwargs["target_complexity"] = ramp_complexity( base_complexity, target_complexity, ct.fp_iteration ) + + # out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) + # print(out) # Compute goal-oriented metric out = go_metric(mesh, setup, convergence_checker=ct, **kwargs) diff --git a/examples/steady_turbine/testing_cases.txt b/examples/steady_turbine/testing_cases.txt index 320c621..72c4630 100644 --- a/examples/steady_turbine/testing_cases.txt +++ b/examples/steady_turbine/testing_cases.txt @@ -1 +1 @@ -aligned offset aligned_reversed trench +aligned From 2ea4c76d940dc5296c673f7c5030e7bbab739682 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Sat, 20 Aug 2022 08:30:22 +0100 Subject: [PATCH 09/13] try(not work): adapt multiple meshes --- .DS_Store | Bin 6148 -> 6148 bytes adaptation_n2n/a_test.py | 36 -- adaptation_n2n/burgers_n2n/config.py | 38 -- adaptation_n2n/burgers_n2n/meshgen.py | 2 - adaptation_n2n/compute_importance.py | 102 ---- adaptation_n2n/makefile | 319 ----------- adaptation_n2n/meshgen.py | 34 -- adaptation_n2n/models/burgers_n2n.py | 276 --------- adaptation_n2n/plot_config.py | 65 --- adaptation_n2n/plot_convergence.py | 179 ------ adaptation_n2n/plot_importance.py | 75 --- adaptation_n2n/plot_progress.py | 48 -- adaptation_n2n/plot_timings.py | 72 --- adaptation_n2n/run_adapt.py | 153 ----- adaptation_n2n/run_adapt_ml.py | 167 ------ adaptation_n2n/run_adaptation_loop.py | 154 ------ adaptation_n2n/run_adaptation_loop_ml.py | 195 ------- adaptation_n2n/run_fixed_mesh.py | 48 -- adaptation_n2n/run_uniform_refinement.py | 76 --- adaptation_n2n/test_and_train.py | 266 --------- adaptation_one2n/a_test.py | 101 ---- adaptation_one2n/burgers_one2n/config.py | 160 ------ adaptation_one2n/burgers_one2n/network.py | 43 -- .../burgers_one2n/testing_cases.txt | 1 - adaptation_one2n/compute_importance.py | 102 ---- adaptation_one2n/makefile | 319 ----------- adaptation_one2n/meshgen.py | 34 -- adaptation_one2n/models/burgers_one2n.py | 522 ------------------ adaptation_one2n/plot_config.py | 65 --- adaptation_one2n/plot_convergence.py | 179 ------ adaptation_one2n/plot_importance.py | 75 --- adaptation_one2n/plot_progress.py | 48 -- adaptation_one2n/plot_timings.py | 72 --- adaptation_one2n/run_adapt.py | 148 ----- adaptation_one2n/run_adapt_ml.py | 167 ------ adaptation_one2n/run_adaptation_loop.py | 155 ------ adaptation_one2n/run_adaptation_loop_ml.py | 197 ------- adaptation_one2n/run_fixed_mesh.py | 48 -- adaptation_one2n/run_uniform_refinement.py | 76 --- adaptation_one2n/test_and_train.py | 266 --------- examples/.DS_Store | Bin 6148 -> 6148 bytes examples/burgers/network.py | 43 -- examples/burgers/testing_cases.txt | 1 - examples/makefile | 12 +- examples/models/burgers.py | 176 ------ examples/models/pyroteus_burgers.py | 114 ++-- examples/models/pyroteus_turbine.py | 292 +++++----- examples/models/steady_turbine.py | 341 ------------ .../pyroteus_burgers}/.DS_Store | Bin 6148 -> 6148 bytes .../{burgers => pyroteus_burgers}/config.py | 2 +- .../pyroteus_burgers}/meshgen.py | 0 .../pyroteus_burgers}/network.py | 0 .../pyroteus_burgers}/testing_cases.txt | 0 .../pyroteus_turbine}/.DS_Store | Bin 6148 -> 6148 bytes .../config.py | 2 +- .../{burgers => pyroteus_turbine}/meshgen.py | 0 .../network.py | 0 .../plot_pipe.py | 0 .../{burgers => pyroteus_turbine}/plotting.py | 0 examples/pyroteus_turbine/testing_cases.txt | 1 + examples/run_adapt.py | 40 +- examples/run_adapt_ml.py | 40 +- examples/run_adaptation_loop.py | 38 +- examples/run_adaptation_loop_ml.py | 52 +- examples/run_fixed_mesh.py | 14 +- examples/steady_turbine/meshes/headland.geo | 56 -- examples/steady_turbine/meshes/pipe.geo | 72 --- examples/steady_turbine/meshgen.py | 99 ---- examples/steady_turbine/plot_pipe.py | 32 -- examples/steady_turbine/plotting.py | 83 --- examples/steady_turbine/testing_cases.txt | 1 - examples/test_and_train.py | 2 +- examples/turbine/.DS_Store | Bin 6148 -> 0 bytes .../burgers_one2n => nn_adapt}/.DS_Store | Bin nn_adapt/ann.py | 13 +- nn_adapt/features.py | 21 +- nn_adapt/layout.py | 2 +- nn_adapt/metric.py | 93 +++- nn_adapt/metric_one2n.py | 112 ---- nn_adapt/parse.py | 5 - nn_adapt/solving.py | 190 ++----- nn_adapt/solving_n2n.py | 253 --------- nn_adapt/solving_one2n.py | 246 --------- 83 files changed, 325 insertions(+), 7206 deletions(-) delete mode 100644 adaptation_n2n/a_test.py delete mode 100644 adaptation_n2n/burgers_n2n/config.py delete mode 100644 adaptation_n2n/burgers_n2n/meshgen.py delete mode 100644 adaptation_n2n/compute_importance.py delete mode 100644 adaptation_n2n/makefile delete mode 100644 adaptation_n2n/meshgen.py delete mode 100644 adaptation_n2n/models/burgers_n2n.py delete mode 100644 adaptation_n2n/plot_config.py delete mode 100644 adaptation_n2n/plot_convergence.py delete mode 100644 adaptation_n2n/plot_importance.py delete mode 100644 adaptation_n2n/plot_progress.py delete mode 100644 adaptation_n2n/plot_timings.py delete mode 100644 adaptation_n2n/run_adapt.py delete mode 100644 adaptation_n2n/run_adapt_ml.py delete mode 100644 adaptation_n2n/run_adaptation_loop.py delete mode 100644 adaptation_n2n/run_adaptation_loop_ml.py delete mode 100644 adaptation_n2n/run_fixed_mesh.py delete mode 100644 adaptation_n2n/run_uniform_refinement.py delete mode 100644 adaptation_n2n/test_and_train.py delete mode 100644 adaptation_one2n/a_test.py delete mode 100644 adaptation_one2n/burgers_one2n/config.py delete mode 100644 adaptation_one2n/burgers_one2n/network.py delete mode 100644 adaptation_one2n/burgers_one2n/testing_cases.txt delete mode 100644 adaptation_one2n/compute_importance.py delete mode 100644 adaptation_one2n/makefile delete mode 100644 adaptation_one2n/meshgen.py delete mode 100644 adaptation_one2n/models/burgers_one2n.py delete mode 100644 adaptation_one2n/plot_config.py delete mode 100644 adaptation_one2n/plot_convergence.py delete mode 100644 adaptation_one2n/plot_importance.py delete mode 100644 adaptation_one2n/plot_progress.py delete mode 100644 adaptation_one2n/plot_timings.py delete mode 100644 adaptation_one2n/run_adapt.py delete mode 100644 adaptation_one2n/run_adapt_ml.py delete mode 100644 adaptation_one2n/run_adaptation_loop.py delete mode 100644 adaptation_one2n/run_adaptation_loop_ml.py delete mode 100644 adaptation_one2n/run_fixed_mesh.py delete mode 100644 adaptation_one2n/run_uniform_refinement.py delete mode 100644 adaptation_one2n/test_and_train.py delete mode 100644 examples/burgers/network.py delete mode 100644 examples/burgers/testing_cases.txt delete mode 100644 examples/models/burgers.py delete mode 100644 examples/models/steady_turbine.py rename {adaptation_n2n => examples/pyroteus_burgers}/.DS_Store (95%) rename examples/{burgers => pyroteus_burgers}/config.py (96%) rename {adaptation_one2n/burgers_one2n => examples/pyroteus_burgers}/meshgen.py (100%) rename {adaptation_n2n/burgers_n2n => examples/pyroteus_burgers}/network.py (100%) rename {adaptation_n2n/burgers_n2n => examples/pyroteus_burgers}/testing_cases.txt (100%) rename {adaptation_one2n => examples/pyroteus_turbine}/.DS_Store (95%) rename examples/{steady_turbine => pyroteus_turbine}/config.py (99%) rename examples/{burgers => pyroteus_turbine}/meshgen.py (100%) rename examples/{steady_turbine => pyroteus_turbine}/network.py (100%) rename examples/{burgers => pyroteus_turbine}/plot_pipe.py (100%) rename examples/{burgers => pyroteus_turbine}/plotting.py (100%) create mode 100644 examples/pyroteus_turbine/testing_cases.txt delete mode 100644 examples/steady_turbine/meshes/headland.geo delete mode 100644 examples/steady_turbine/meshes/pipe.geo delete mode 100644 examples/steady_turbine/meshgen.py delete mode 100644 examples/steady_turbine/plot_pipe.py delete mode 100644 examples/steady_turbine/plotting.py delete mode 100644 examples/steady_turbine/testing_cases.txt delete mode 100644 examples/turbine/.DS_Store rename {adaptation_one2n/burgers_one2n => nn_adapt}/.DS_Store (100%) delete mode 100644 nn_adapt/metric_one2n.py delete mode 100644 nn_adapt/solving_n2n.py delete mode 100644 nn_adapt/solving_one2n.py diff --git a/.DS_Store b/.DS_Store index 47e942be35914e107650bf0d163d4d66d15d1344..5f7d455a55f7f2f7d3438786865ed5ee0539ddca 100644 GIT binary patch delta 123 zcmZoMXffDe!NkHclXuDF0w$%&aZFrn^Uui5a}S=J$E1f~DhU*17zQWj=N2#k0n1_m zhRF`hDwBUO@vyK@2zw6Xs7=;m=3<+mGVdQyuN|`yf~hun4l@rk``&=f8=1pcCN{8b JX6N|J4*)ldCXWCB delta 119 zcmZoMXffDe!NkG<1d|JxlqSb9aj|`$yl8uj-{d?dJp@xppdiCAI5|JJ0HhQICOa^z xO#Z>d1JUcitTtJXnTyTIeer6bUOQ$Z1XFGD9A=))TbYAcCN}VHX6N|J4*&-cB5VKv diff --git a/adaptation_n2n/a_test.py b/adaptation_n2n/a_test.py deleted file mode 100644 index f3281b2..0000000 --- a/adaptation_n2n/a_test.py +++ /dev/null @@ -1,36 +0,0 @@ -from nn_adapt.features import * -from nn_adapt.features import extract_array -from nn_adapt.metric import * -from nn_adapt.parse import Parser -from nn_adapt.solving_one2n import * -from nn_adapt.solving_n2n import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt -from firedrake.petsc import PETSc - -import importlib -import numpy as np - -tt_steps = 10 - -# setup1 = importlib.import_module(f"burgers_n2n.config") -# meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] -# out1 = indicate_errors_n2n(meshes=meshes, config=setup1) -# print(out1) - -# mesh = UnitSquareMesh(20, 20) -# setup2 = importlib.import_module(f"burgers_one2n.config") -# out2 = indicate_errors_one2n(mesh=mesh, config=setup2) -# print(out2) - -# mesh = UnitSquareMesh(20, 20) -# setup2 = importlib.import_module(f"burgers_one2n.config") -# out2 = get_solutions_one2n(mesh=mesh, config=setup2) -# test_array = time_integrate(out2["forward"]) -# print(extract_array(test_array, centroid=True)) - -a = None -b = 1 -print(a or b) - diff --git a/adaptation_n2n/burgers_n2n/config.py b/adaptation_n2n/burgers_n2n/config.py deleted file mode 100644 index cff2483..0000000 --- a/adaptation_n2n/burgers_n2n/config.py +++ /dev/null @@ -1,38 +0,0 @@ -from models.burgers_n2n import * -from nn_adapt.ann import sample_uniform -import numpy as np - - -testing_cases = ["demo"] - - -def initialise(case, discrete=False): - """ - Given some training case (for which ``case`` - is an integer) or testing case (for which - ``case`` is a string), set up the physical - problems defining the Burgers problem. - - For training data, these values are chosen - randomly. - """ - parameters.case = case - parameters.discrete = discrete - if isinstance(case, int): - parameters.turbine_coords = [] - np.random.seed(100 * case) - - # Random initial speed from 0.01 m/s to 6 m/s - parameters.initial_speed = sample_uniform(0.01, 6.0) - - # Random viscosity from 0.00001 m^2/s to 1 m^2/s - parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) - return - elif "demo" in case: - parameters.viscosity_coefficient = 0.0001 - parameters.initial_speed = 1.0 - else: - raise ValueError(f"Test case {case} not recognised") - - if "reversed" in case: - parameters.initial_speed *= -1 diff --git a/adaptation_n2n/burgers_n2n/meshgen.py b/adaptation_n2n/burgers_n2n/meshgen.py deleted file mode 100644 index 3467cea..0000000 --- a/adaptation_n2n/burgers_n2n/meshgen.py +++ /dev/null @@ -1,2 +0,0 @@ -def generate_geo(config, reverse=False): - return diff --git a/adaptation_n2n/compute_importance.py b/adaptation_n2n/compute_importance.py deleted file mode 100644 index 3e2bf4c..0000000 --- a/adaptation_n2n/compute_importance.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Compute the sensitivities of a network trained on a -particular ``model`` to its input parameters. -""" -from nn_adapt.ann import * -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import git -import importlib -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="compute_importance.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "num_training_cases", - help="The number of training cases", - type=positive_int, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=git.Repo(search_parent_directories=True).head.object.hexsha, -) -parsed_args = parser.parse_args() -model = parsed_args.model -preproc = parsed_args.preproc -tag = parsed_args.tag - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() -loss_fn = Loss() - -# Compute (averaged) sensitivities of the network to the inputs -dJdm = torch.zeros(layout.num_inputs) -data_dir = f"{model}/data" -approaches = parsed_args.approaches -values = np.zeros((0, layout.num_inputs)) -for step in range(parsed_args.adaptation_steps): - for approach in approaches: - for test_case in range(1, parsed_args.num_training_cases + 1): - if test_case == 1 and approach != approaches[0]: - continue - suffix = f"{test_case}_GO{approach}_{step}" - - # Load some data and mark inputs as independent - data = { - key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") - for key in layout.inputs - } - features = collect_features(data, layout) - values = np.vstack((values, features)) - features = torch.from_numpy(features).type(torch.float32) - features.requires_grad_(True) - - # Run the model and sum the outputs - out = nn(features).sum(axis=0) - - # Backpropagate to get the gradient of the outputs w.r.t. the inputs - out.backward() - dJdm += features.grad.mean(axis=0) - -# Compute representative values for each parameter -dm = np.abs(np.mean(values, axis=0)) - -# Multiply by the variability -sensitivity = dJdm.abs().detach().numpy() * dm -np.save(f"{model}/data/sensitivities_{tag}.npy", sensitivity) diff --git a/adaptation_n2n/makefile b/adaptation_n2n/makefile deleted file mode 100644 index d26d52a..0000000 --- a/adaptation_n2n/makefile +++ /dev/null @@ -1,319 +0,0 @@ -all: setup network test - -# --- Configurable parameters - -APPROACHES = anisotropic -MODEL = burgers_one2n -NUM_TRAINING_CASES = 1 -TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) -PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 -TAG = all - -# --- Parameters that should not need modifying - -TRAINING_CASES = $(shell seq 1 ${NUM_TRAINING_CASES}) -CASES = ${TRAINING_CASES} ${TESTING_CASES} - -# --- Setup directories and meshes - -setup: dir mesh plot_config - -# Create the directory structure -# ============================== -# -# $(MODEL) -#    ├── data -#    ├── outputs -# │    └── $(TESTING_CASES) -#    └── plots -dir: - mkdir -p $(MODEL)/data - mkdir -p $(MODEL)/outputs - mkdir -p $(MODEL)/plots - for case in $(TESTING_CASES); do \ - mkdir -p $(MODEL)/outputs/$$case; \ - done - -# Generate meshes -# =============== -# -# Meshes are generated for all training and testing cases. -# * First, a gmsh geometry file is generated using the -# `meshgen.py` script. The definitions of these cases -# are based on the contents of $(MODEL)/config.py. -# For the `turbine` case, the training data is generated -# randomly. -# * Then the geometry files are used to construct meshes -# in the .msh format. -# -# Gmsh is set to use the "pack" algorithm, which means that -# the initial meshes are quasi-uniform. That is, they are as -# close to uniform as they can be, given that the turbines -# are to be explicitly meshed. -mesh: - touch timing.log - d=$$(date +%s) && \ - for case in $(CASES); do \ - python3 meshgen.py $(MODEL) $$case; \ - if [ -e $(MODEL)/meshes/$$case.geo ]; then \ - gmsh -2 -algo pack $(MODEL)/meshes/$$case.geo -o $(MODEL)/meshes/$$case.msh; \ - fi; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Meshes built in $$(($$(date +%s)-d)) seconds" >> timing.log - -# Plot configurations -# =================== -# -# Plot the configurations for a subset of the training cases -# and the testing cases that are listed in $(MODEL)/config.py. -# The domain geometry and turbine locations are shown, along -# with the physical parameters used. -plot_config: - python3 plot_config.py $(MODEL) 'train' - python3 plot_config.py $(MODEL) 'test' - -# Clean the model directory -# ========================= -# -# Delete all logs, data, outputs, plots and compiled code associated -# with the model. Note that this is a very destructive thing to do! -clean: - rm -rf timing.log - rm -rf $(MODEL)/data - rm -rf $(MODEL)/outputs - rm -rf $(MODEL)/plots - rm -rf $(MODEL)/__pycache__ - -# --- Construct the neural network - -network: features train plot_progress plot_importance - -# Generate feature data -# ===================== -# -# This involves applying mesh adaptation to all of the cases in the -# training data. In each case, feature data and "target" error indicator -# data are extracted and saved to file. -features: - touch timing.log - d=$$(date +%s) && \ - for case in $(TRAINING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt.py $(MODEL) $$case -a $$approach --no_outputs $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Features generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Train the network -# ================= -# -# Train a neural network based on the feature and target data that has -# been saved to file, for a specified number of training cases. The -# network is tagged (using the environment variable $(TAG)) to distinguish -# the model and its outputs. -train: - touch timing.log - d=$$(date +%s) && \ - python3 test_and_train.py -m $(MODEL) -n $(NUM_TRAINING_CASES) --tag $(TAG) && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Training completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ - echo "" >> timing.log - -# Plot loss functions -# =================== -# -# Once the network has been trained, plot the training and validation loss -# curves against iteration count. -plot_progress: - python3 plot_progress.py $(MODEL) --tag $(TAG) - -# Feature importance experiment -# ============================= -# -# Perform an experiment that tests how sensitive the trained network is to -# each of its inputs (i.e. the features). If it is particularly sensitive to -# one of the features then we deduce that the feature is in some sense -# "important" to the network. -plot_importance: - python3 compute_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) - python3 plot_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) - -# --- Test the neural network - -test: snapshot_go snapshot_ml uniform go ml plot_convergence - -# Apply goal-oriented adaptation to the test cases -# ================================================ -# -# Apply goal-oriented mesh adaptation to the testing cases, thereby -# generating lots of output data in Paraview format. These include -# the meshes, solution fields, error indicators and metrics. -snapshot_go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Apply data-driven adaptation to the test cases -# ============================================== -# -# Apply data-driven adaptation based on the trained network to the testing -# cases, thereby generating lots of output data in Paraview format. These -# include the meshes, solution fields, error indicators and metrics. -snapshot_ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for uniform refinement -# =========================================== -# -# Run the model on a sequence of uniformly refined meshes. -uniform: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_uniform_refinement.py $(MODEL) $$case; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Uniform refinement completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for goal-oriented adaptation -# ================================================= -# -# Run the model with the standard goal-oriented approach for -# a range of target metric complexities. -go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adaptation_loop.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for data-driven adaptation -# =============================================== -# -# Run the model with the data-driven approach based on the -# trained network for a range of target metric complexities. -ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adaptation_loop_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Plot convergence curves -# ======================= -# -# Plot the data points generated during the `uniform`, `go` and -# `ml` recipes and annotate with lines of best fit, where appropriate. -plot_convergence: - for case in $(TESTING_CASES); do \ - python3 plot_convergence.py $(MODEL) $$case --tag $(TAG); \ - done - -# --- Profiling experiments - -# NOTE: The following recipes are somewhat redundant. Similar information -# can be obtained from the outputs of the `uniform`, `go` and `ml` -# recipes by running `plot_timings.py` with the appropriate input -# parameters. - -# Profiling for uniform refinement -# ================================ -# -# Run the model on a fine fixed mesh generated by refining the initial -# mesh four times and output the PETSc logging information in a format -# that can be then turned into a flamegraph. -profile_uni: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_fixed_mesh.py $(MODEL) $$case --optimise --num_refinements 4 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Uniform refinement profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Uniform refinement ($$case)" logview.txt > $(MODEL)/outputs/$$case/uni.svg && \ - rm logview.txt; \ - done - -# Profiling for goal-oriented adaptation -# ====================================== -# -# Run the model using the standard goal-oriented approach with a fairly -# high target metric complexity of 64,000 and output the PETSc logging -# information in a format that can be then turned into a flamegraph. -profile_go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_adapt.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Goal-oriented adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/go.svg && \ - rm logview.txt; \ - done - -# Profiling for data-driven adaptation -# ==================================== -# -# Run the model using the data-driven adaptation approach based on the -# trained network with a fairly high target metric complexity of 64,000 -# and output the PETSc logging information in a format that can be then -# turned into a flamegraph. -profile_ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_adapt_ml.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) --tag all -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Data-driven adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/ml.svg && \ - rm logview.txt; \ - done diff --git a/adaptation_n2n/meshgen.py b/adaptation_n2n/meshgen.py deleted file mode 100644 index 1067e7c..0000000 --- a/adaptation_n2n/meshgen.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Generate the mesh for configuration ``case`` -of a given ``model``. -""" -import argparse -import importlib -import sys - - -# Parse for test case -parser = argparse.ArgumentParser(prog="meshgen.py") -parser.add_argument("model", help="The model") -parser.add_argument("case", help="The configuration file name") -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -reverse = False -try: - case = int(parsed_args.case) - assert case > 0 -except ValueError: - case = parsed_args.case - reverse = "reversed" in case - -# Load setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(case) -meshgen = importlib.import_module(f"{model}.meshgen") - -# Write geometry file -code = meshgen.generate_geo(setup, reverse=reverse) -if code is None: - sys.exit(0) -with open(f"{model}/meshes/{case}.geo", "w+") as meshfile: - meshfile.write(code) diff --git a/adaptation_n2n/models/burgers_n2n.py b/adaptation_n2n/models/burgers_n2n.py deleted file mode 100644 index bdd6cb3..0000000 --- a/adaptation_n2n/models/burgers_n2n.py +++ /dev/null @@ -1,276 +0,0 @@ -from copy import deepcopy -from firedrake import * -from firedrake.petsc import PETSc -from firedrake_adjoint import * -from firedrake.adjoint import get_solve_blocks -import nn_adapt.model -import nn_adapt.solving - -''' -A memory hungry method solving time dependent PDE. -''' - -class Parameters(nn_adapt.model.Parameters): - """ - Class encapsulating all parameters required for a simple - Burgers equation test case. - """ - - qoi_name = "right boundary integral" - qoi_unit = r"m\,s^{-1}" - - # Adaptation parameters - h_min = 1.0e-10 # Minimum metric magnitude - h_max = 1.0 # Maximum metric magnitude - - # Physical parameters - viscosity_coefficient = 0.0001 - initial_speed = 1.0 - - # Timestepping parameters - timestep = 0.05 - tt_steps = 20 - - solver_parameters = {} - adjoint_solver_parameters = {} - - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def drag(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def viscosity(self, mesh): - """ - Compute the viscosity coefficient on the current `mesh`. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(self.viscosity_coefficient) - - def ic(self, mesh): - """ - Initial condition - """ - x, y = SpatialCoordinate(mesh) - expr = self.initial_speed * sin(pi * x) - yside = self.initial_speed * sin(pi * y) - yside = 0 - return as_vector([expr, yside]) - - -PETSc.Sys.popErrorHandler() -parameters = Parameters() - - -def get_function_space(mesh): - r""" - Construct the :math:`\mathbb P2` finite element space - used for the prognostic solution. - """ - return VectorFunctionSpace(mesh, "CG", 2) - - -class Solver(nn_adapt.solving.Solver): - """ - Solver object based on current mesh and state. - """ - - def __init__(self, mesh, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the current state / initial condition - """ - self.mesh = mesh - - # Collect parameters - dt = Constant(parameters.timestep) - nu = parameters.viscosity(mesh) - - # Define variational formulation - V = self.function_space - u = Function(V) - u_ = Function(V) - v = TestFunction(V) - self._form = ( - inner((u - u_) / dt, v) * dx - + inner(dot(u, nabla_grad(u)), v) * dx - + nu * inner(grad(u), grad(v)) * dx - ) - problem = NonlinearVariationalProblem(self._form, u) - - # Set initial condition - u_.project(parameters.ic(mesh)) - - # Create solver - self._solver = NonlinearVariationalSolver(problem) - self._solution = u - - @property - def function_space(self): - r""" - The :math:`\mathbb P2` finite element space. - """ - return get_function_space(self.mesh) - - @property - def form(self): - """ - The weak form of Burgers equation - """ - return self._form - - @property - def solution(self): - return self._solution - - def iterate(self, **kwargs): - """ - Take a single timestep of Burgers equation - """ - self._solver.solve() - - -class Solver_n2n(nn_adapt.solving.Solver): - """ - Solver object based on current mesh and state. - """ - - def __init__(self, meshes, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the current state / initial condition - """ - self.meshes = meshes - - # Collect parameters - self.tt_steps = parameters.tt_steps - self.dt = Constant(parameters.timestep) - assert self.tt_steps == len(self.meshes) - - # Physical parameters - self.nu = Constant(parameters.viscosity_coefficient) - - @property - def function_space(self): - r""" - The :math:`\mathbb P2` finite element space. - """ - return get_function_space(self.meshes) - - @property - def form(self): - """ - The weak form of Burgers equation - """ - return self._form - - @property - def solution(self): - return self._solutions - - def adjoint_setup(self): - J_form = inner(self._u, self._u)*ds(2) - J = assemble(J_form) - - g = compute_gradient(J, Control(self.nu)) - - solve_blocks = get_solve_blocks() - - # 'Initial condition' for both adjoint - dJdu = assemble(derivative(J_form, self._u)) - - return dJdu, solve_blocks - - def iterate(self, **kwargs): - """ - Get the final timestep of Burgers equation - """ - - # Assign initial condition - V = get_function_space(self.meshes[0]) - ic = parameters.ic(self.meshes[0]) - u = Function(V) - u.project(ic) - - _solutions = [] - - tape = get_working_tape() - tape.clear_tape() - - # solve forward - for step in range(self.tt_steps): - # Define P2 function space and corresponding test function - V = get_function_space(self.meshes[step]) - v = TestFunction(V) - - # Create Functions for the solution and time-lagged solution - u_ = Function(V) - u_.project(u) - - u = Function(V, name="Velocity") - - # Define nonlinear form - F = (inner((u - u_)/self.dt, v) + inner(dot(u, nabla_grad(u)), v) + self.nu*inner(grad(u), grad(v)))*dx - - solve(F == 0, u) - - # Store forward solution at exports so we can plot again later - _solutions.append(u.copy(deepcopy=True)) - - self._form = F - self._solutions = _solutions - self._u = u - - stop_annotating(); - - -def get_initial_condition(function_space): - """ - Compute an initial condition based on the initial - speed parameter. - """ - u = Function(function_space) - u.interpolate(parameters.ic(function_space.mesh())) - return u - - -def get_qoi(mesh): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - - def qoi(sol): - return inner(sol, sol) * ds(2) - - return qoi - - -# Initial mesh for all test cases -initial_mesh = [UnitSquareMesh(30, 30) for i in range(parameters.tt_steps)] - - -# # A simple pretest -# a = time_dependent_Solver(meshes = initial_mesh, ic = 0, kwargs='0') -# a.iterate() -# b = a.solution - -# print(b[0].function_space()) diff --git a/adaptation_n2n/plot_config.py b/adaptation_n2n/plot_config.py deleted file mode 100644 index 159b4d7..0000000 --- a/adaptation_n2n/plot_config.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Plot the problem configurations for a given ``model``. -The ``mode`` is chosen from 'train' and 'test'. -""" -from firedrake import Mesh -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import importlib - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_config.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "mode", - help="Training or testing?", - type=str, - choices=["train", "test"], -) -parser.add_argument( - "--num_cols", - help="Number of columns in the plot", - type=positive_int, - default=4, -) -parser.add_argument( - "--num_rows", - help="Number of rows in the plot", - type=positive_int, - default=4, -) -parsed_args = parser.parse_args() -model = parsed_args.model -mode = parsed_args.mode -setup = importlib.import_module(f"{model}.config") -cases = setup.testing_cases -ncols = parsed_args.num_cols -if mode == "test": - ncols = len(cases) -nrows = parsed_args.num_rows -if mode == "test": - nrows = 1 -N = ncols * nrows -if mode == "train": - cases = range(1, N + 1) -p = importlib.import_module(f"{model}.plotting") - -# Plot all configurations -fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(3 * ncols, 1.5 * nrows)) -for i, case in enumerate(cases): - ax = axes[i] if nrows == 1 else axes[i // ncols, i % nrows] - setup.initialise(case, discrete=True) - mesh = Mesh(f"{model}/meshes/{case}.msh") - p.plot_config(setup, mesh, ax) -plt.tight_layout() -plt.savefig(f"{model}/plots/{mode}_config.pdf") diff --git a/adaptation_n2n/plot_convergence.py b/adaptation_n2n/plot_convergence.py deleted file mode 100644 index d8e2255..0000000 --- a/adaptation_n2n/plot_convergence.py +++ /dev/null @@ -1,179 +0,0 @@ -""" -Plot QoI convergence curves under uniform refinement, -goal-oriented mesh adaptation and data-driven mesh -adaptation, for a given ``test_case`` and ``model``. -""" -from nn_adapt.parse import Parser -from nn_adapt.plotting import * - -import importlib -from matplotlib.ticker import FormatStrFormatter -import numpy as np -import os -import sys - - -# Parse user input -parser = Parser("plot_convergence.py") -parser.parse_tag() -parsed_args = parser.parse_args() -model = parsed_args.model -test_case = parsed_args.test_case -tag = parsed_args.tag - -# Formatting -matplotlib.rcParams["font.size"] = 20 -approaches = { - "uniform": { - "label": "Uniform refinement", - "color": "cornflowerblue", - "marker": "x", - "linestyle": "-", - }, - "GOanisotropic": { - "label": "Goal-oriented adaptation", - "color": "orange", - "marker": "o", - "linestyle": "-", - }, - "MLanisotropic": { - "label": "Data-driven adaptation", - "color": "g", - "marker": "^", - "linestyle": "-", - }, -} -xlim = { - "dofs": [3.0e03, 3.0e06], - "times": [1.0e0, 2.0e03], -} - -# Load configuration -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -qoi_name = setup.parameters.qoi_name.capitalize() - -# Load outputs -dofs, qois, times, niter = {}, {}, {}, {} -for approach in approaches.copy(): - ext = f"_{tag}" if approach[:2] == "ML" else "" - try: - dofs[approach] = np.load(f"{model}/data/dofs_{approach}_{test_case}{ext}.npy") - qois[approach] = np.load(f"{model}/data/qois_{approach}_{test_case}{ext}.npy") - times[approach] = np.load(f"{model}/data/times_all_{approach}_{test_case}{ext}.npy") - niter[approach] = np.load(f"{model}/data/niter_{approach}_{test_case}{ext}.npy") - print(f"Iteration count for {approach}: {niter[approach]}") - except IOError: - print(f"Cannot load {approach} data for test case {test_case}") - approaches.pop(approach) - continue -if len(approaches.keys()) == 0: - print("Nothing to plot.") - sys.exit(0) - -# Drop first iteration because timings include compilation # FIXME: Why? -dofs["uniform"] = dofs["uniform"][1:] -qois["uniform"] = qois["uniform"][1:] -times["uniform"] = times["uniform"][1:] -niter["uniform"] = niter["uniform"][1:] - -# Plot QoI curves against DoF count -fig, axes = plt.subplots() -start = max(np.load(f"{model}/data/qois_uniform_{test_case}.npy")) -conv = np.load(f"{model}/data/qois_uniform_{test_case}.npy")[-1] -axes.hlines(conv, *xlim["dofs"], "k", label="Converged QoI") -for approach, metadata in approaches.items(): - axes.semilogx(dofs[approach], qois[approach], **metadata) -axes.set_xlim(xlim["dofs"]) -if test_case in ["aligned", "offset"]: - axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) -axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) -axes.set_xlabel("DoF count") -axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + r"}$)") -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_vs_dofs_{test_case}_{tag}.pdf") - -# Plot QoI curves against CPU time -fig, axes = plt.subplots() -axes.hlines(conv, *xlim["times"], "k", label="Converged QoI") -for approach, metadata in approaches.items(): - axes.semilogx(times[approach], qois[approach], **metadata) - for n, t, q in zip(niter[approach], times[approach], qois[approach]): - axes.annotate(str(n), (1.1 * t, q), color=metadata["color"], fontsize=14) -axes.set_xlim(xlim["times"]) -if test_case in ["aligned", "offset"]: - axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) -axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) -axes.set_xlabel(r"CPU time ($\mathrm{s}$)") -axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + "}$)") -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_vs_cputime_{test_case}_{tag}.pdf") -plt.close() - -# Plot CPU time curves against DoF count -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - axes.loglog(dofs[approach], times[approach], **metadata) - for n, t, d in zip(niter[approach], times[approach], dofs[approach]): - axes.annotate(str(n), (1.1 * d, t), color=metadata["color"], fontsize=14) -axes.set_xlabel("DoF count") -axes.set_ylabel(r"CPU time ($\mathrm{s}$)") -axes.set_xlim(xlim["dofs"]) -axes.set_ylim(xlim["times"]) -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/cputime_vs_dofs_{test_case}_{tag}.pdf") -plt.close() - -qois["uniform"] = qois["uniform"][:-1] -dofs["uniform"] = dofs["uniform"][:-1] -times["uniform"] = times["uniform"][:-1] - -# Plot QoI error curves against DoF count -errors = {} -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - errors[approach] = np.abs((qois[approach] - conv) / conv) - x, y = dofs[approach], errors[approach] - a, b = np.polyfit(np.log(x), np.log(y), 1) - print(f"QoI error vs. DoFs {approach}: gradient {a:.2f}") - axes.scatter(x, y, **metadata) - axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) -axes.set_xlabel("DoF count") -axes.set_ylabel(r"QoI error ($\%$)") -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_error_vs_dofs_{test_case}_{tag}.pdf") -plt.close() - -# Plot legend -fname = f"{model}/plots/legend.pdf" -if not os.path.exists(fname): - fig2, axes2 = plt.subplots() - lines, labels = axes.get_legend_handles_labels() - legend = axes2.legend(lines, labels, frameon=False, ncol=3) - fig2.canvas.draw() - axes2.set_axis_off() - bbox = legend.get_window_extent().transformed(fig2.dpi_scale_trans.inverted()) - plt.savefig(fname, bbox_inches=bbox) - -# Plot QoI error curves against CPU time -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - x, y = times[approach], errors[approach] - if approach == "uniform": - a, b = np.polyfit(np.log(x), np.log(y), 1) - print(f"QoI error vs. time {approach}: gradient {a:.2f}") - axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) - axes.scatter(x, y, **metadata) - for n, t, e in zip(niter[approach], x, errors[approach]): - axes.annotate(str(n), (1.1 * t, e), color=metadata["color"], fontsize=14) -axes.set_xlabel(r"CPU time ($\mathrm{s}$)") -axes.set_ylabel(r"QoI error ($\%$)") -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_error_vs_cputime_{test_case}_{tag}.pdf") -plt.close() diff --git a/adaptation_n2n/plot_importance.py b/adaptation_n2n/plot_importance.py deleted file mode 100644 index 596d80f..0000000 --- a/adaptation_n2n/plot_importance.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Plot the sensitivities of a network trained on a -particular ``model`` to its input parameters. -""" -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import git -import importlib -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_importance.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "num_training_cases", - help="The number of training cases", - type=positive_int, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=None, -) -parsed_args = parser.parse_args() -model = parsed_args.model -tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha - -# Separate sensitivity information by variable -data = np.load(f"{model}/data/sensitivities_{tag}.npy") -layout = importlib.import_module(f"{model}.network").NetLayout() -p = importlib.import_module(f"{model}.plotting") -sensitivities = p.process_sensitivities(data, layout) - -# Plot increases as a stacked bar chart -colours = ("b", "C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "0.3") -deriv = ("", "_x", "_y", "_{xx}", "_{xy}", "_{yy}") -N = len(sensitivities.keys()) -bottom = np.zeros(N) -fig, axes = plt.subplots(figsize=(1.5 * N, 4)) -for i, colour in enumerate(colours): - arr = np.array([S[i] for S in sensitivities.values()]) - label = r"$f%s(\mathbf x_K)$" % deriv[i] - axes.bar(sensitivities.keys(), arr, bottom=bottom, color=colour, label=label) - bottom += arr -xlim = axes.get_xlim() -axes.set_xlabel("Input parameters") -axes.set_ylabel("Network sensitivity") -axes.legend(ncol=2) -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/importance_{tag}.pdf") diff --git a/adaptation_n2n/plot_progress.py b/adaptation_n2n/plot_progress.py deleted file mode 100644 index 09482a3..0000000 --- a/adaptation_n2n/plot_progress.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Plot the training and validation loss curves for a network -trained on a particular ``model``. -""" -from nn_adapt.parse import argparse -from nn_adapt.plotting import * - -import git -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_progress.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine", "burgers"], -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=None, -) -parsed_args = parser.parse_args() -model = parsed_args.model -tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha - -# Load data -train_losses = np.load(f"{model}/data/train_losses_{tag}.npy") -validation_losses = np.load(f"{model}/data/validation_losses_{tag}.npy") -epochs = np.arange(len(train_losses)) + 1 - -# Plot losses -fig, axes = plt.subplots() -kw = dict(linewidth=0.5) -axes.loglog(epochs, train_losses, label="Training", color="deepskyblue", **kw) -axes.loglog(epochs, validation_losses, label="Validation", color="darkgreen", **kw) -axes.set_xlabel("Number of epochs") -axes.set_ylabel("Average loss") -axes.legend() -axes.grid(True) -axes.set_xlim([1, epochs[-1]]) -plt.tight_layout() -plt.savefig(f"{model}/plots/losses_{tag}.pdf") diff --git a/adaptation_n2n/plot_timings.py b/adaptation_n2n/plot_timings.py deleted file mode 100644 index d482a6e..0000000 --- a/adaptation_n2n/plot_timings.py +++ /dev/null @@ -1,72 +0,0 @@ -from nn_adapt.parse import Parser, nonnegative_int -from nn_adapt.plotting import * -import numpy as np - - -def get_times(model, approach, case, it, tag=None): - """ - Gather the timing data for some approach applied - to a given test case. - - :arg model: the PDE being solved - :arg approach: the mesh adaptation approach - :arg case: the test case name or number - :arg it: the run - :kwarg tag: the tag for the network - """ - ext = f"_{tag}" if approach[:2] == "ML" else "" - qoi = np.load(f"{model}/data/qois_{approach}_{case}{ext}.npy")[it] - conv = np.load(f"{model}/data/qois_uniform_{case}.npy")[-1] - print(f"{approach} QoI error: {abs((qoi-conv)/conv)*100:.3f} %") - split = { - "Forward solve": np.load(f"{model}/data/times_forward_{approach}_{case}{ext}.npy")[it], - "Adjoint solve": np.load(f"{model}/data/times_adjoint_{approach}_{case}{ext}.npy")[it], - "Error estimation": np.load(f"{model}/data/times_estimator_{approach}_{case}{ext}.npy")[it], - "Metric construction": np.load(f"{model}/data/times_metric_{approach}_{case}{ext}.npy")[it], - "Mesh adaptation": np.load(f"{model}/data/times_adapt_{approach}_{case}{ext}.npy")[it], - } - total = sum(split.values()) - for key, value in split.items(): - print(f"{approach} {key}: {value/total*100:.3f} %") - niter = np.load(f"{model}/data/niter_{approach}_{case}{ext}.npy")[it] - print(f"niter = {niter}") - return split - - -# Parse user input -parser = Parser(prog="plot_timings.py") -parser.parse_tag() -parser.add_argument( - "--iter", - help="Iteration", - type=nonnegative_int, - default=21, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -tag = parsed_args.tag -it = parsed_args.iter -approaches = ["GOanisotropic", "MLanisotropic"] - -# Plot bar chart -fig, axes = plt.subplots(figsize=(6, 4.5)) -colours = ["C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "darkgreen", "0.3"] -data = { - "Goal-oriented": get_times(model, "GOanisotropic", test_case, it, tag=tag), - "Data-driven": get_times(model, "MLanisotropic", test_case, it, tag=tag), -} -bottom = np.zeros(len(data.keys())) -for i, key in enumerate(data["Goal-oriented"].keys()): - arr = np.array([d[key] for d in data.values()]) - axes.bar(data.keys(), arr, bottom=bottom, label=key, color=colours[i]) - bottom += arr -axes.bar_label(axes.containers[-1]) -axes.legend(loc="upper right") -axes.set_ylabel("Runtime [seconds]") -plt.tight_layout() -plt.savefig(f"{model}/plots/timings_{test_case}_{it}_{tag}.pdf") diff --git a/adaptation_n2n/run_adapt.py b/adaptation_n2n/run_adapt.py deleted file mode 100644 index 9055cf3..0000000 --- a/adaptation_n2n/run_adapt.py +++ /dev/null @@ -1,153 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using goal-oriented -mesh adaptation in a fixed point iteration loop. - -This is the script where feature data is harvested to train -the neural network on. -""" -from nn_adapt.features import * -from nn_adapt.metric_one2n import * -from nn_adapt.parse import Parser -from nn_adapt.solving import * -from nn_adapt.solving_one2n import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt -from firedrake.petsc import PETSc - -import importlib -import numpy as np -from time import perf_counter -import matplotlib.pyplot as plt - - -set_log_level(ERROR) - -# Parse for test case and number of refinements -parser = Parser("run_adapt.py") -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_target_complexity() -parser.add_argument("--no_outputs", help="Turn off file outputs", action="store_true") -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -base_complexity = parsed_args.base_complexity -target_complexity = parsed_args.target_complexity -optimise = parsed_args.optimise -no_outputs = parsed_args.no_outputs or optimise -if not no_outputs: - from pyroteus.utility import File - -# Setup -start_time = perf_counter() -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - -# Run adaptation loop -kwargs = { - "interpolant": "Clement", - "enrichment_method": "h", - "average": True, - "anisotropic": approach == "anisotropic", - "retall": True, - "h_min": setup.parameters.h_min, - "h_max": setup.parameters.h_max, - "a_max": 1.0e5, -} -ct = ConvergenceTracker(mesh, parsed_args) -tt_steps = setup.parameters.tt_steps -if not no_outputs: - output_dir = f"{model}/outputs/{test_case}/GO/{approach}" - fwd_file = [File(f"{output_dir}/forward{step}.pvd") for step in range(tt_steps)] - adj_file = [File(f"{output_dir}/adjoint{step}.pvd") for step in range(tt_steps)] - ee_file = File(f"{output_dir}/estimator.pvd") - metric_file = File(f"{output_dir}/metric.pvd") - mesh_file = File(f"{output_dir}/mesh.pvd") - mesh_file.write(mesh.coordinates) -print(f"Test case {test_case}") -print(" Mesh 0") -print(f" Element count = {ct.elements_old}") -data_dir = f"{model}/data" -for ct.fp_iteration in range(ct.maxiter + 1): - suffix = f"{test_case}_GO{approach}_{ct.fp_iteration}" - - # Ramp up the target complexity - kwargs["target_complexity"] = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Compute goal-oriented metric - out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - if "adjoint" not in out: - break - estimator = out["estimator"] - print(f" Error estimator = {estimator}") - if "metric" not in out: - break - adj_sol, dwr, metric = out["adjoint"], out["dwr"], out["metric"] - - # fig, axes = plt.subplots(1,2) - # tricontourf(fwd_sol, axes=axes[0]) - # tricontourf(adj_sol, axes=axes[1]) - # plt.savefig("out.jpg") - - if not no_outputs: - for step in range(tt_steps): - fwd_file[step].write(*fwd_sol[step].split()) - adj_file[step].write(*adj_sol[step].split()) - ee_file.write(dwr) - metric_file.write(metric.function) - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol[-1]) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol[-1].split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # # Extract features - # if not optimise: - # features = extract_features(setup, fwd_sol, adj_sol) - # target = dwr.dat.data.flatten() - # assert not np.isnan(target).any() - # for key, value in features.items(): - # np.save(f"{data_dir}/feature_{key}_{suffix}", value) - # np.save(f"{data_dir}/target_{suffix}", target) - - # Adapt the mesh and check for element count convergence - with PETSc.Log.Event("Mesh adaptation"): - mesh = adapt(mesh, metric) - if not no_outputs: - mesh_file.write(mesh.coordinates) - elements = mesh.num_cells() - print(f" Mesh {ct.fp_iteration+1}") - print(f" Element count = {elements}") - if ct.check_elements(elements): - break - ct.check_maxiter() -print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") -print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_adapt_ml.py b/adaptation_n2n/run_adapt_ml.py deleted file mode 100644 index 9c79d29..0000000 --- a/adaptation_n2n/run_adapt_ml.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using data-driven -mesh adaptation in a fixed point iteration loop. -""" -from nn_adapt.ann import * -from nn_adapt.features import * -from nn_adapt.parse import Parser -from nn_adapt.metric import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import * - -import importlib -from time import perf_counter - - -# Parse user input -parser = Parser("run_adapt_ml.py") -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_preproc() -parser.parse_target_complexity() -parser.parse_tag() -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -base_complexity = parsed_args.base_complexity -target_complexity = parsed_args.target_complexity -preproc = parsed_args.preproc -optimise = parsed_args.optimise -tag = parsed_args.tag -if not optimise: - from pyroteus.utility import File - -# Setup -start_time = perf_counter() -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() - -# Run adaptation loop -ct = ConvergenceTracker(mesh, parsed_args) -if not optimise: - output_dir = f"{model}/outputs/{test_case}/ML/{approach}/{tag}" - fwd_file = File(f"{output_dir}/forward.pvd") - adj_file = File(f"{output_dir}/adjoint.pvd") - ee_file = File(f"{output_dir}/estimator.pvd") - metric_file = File(f"{output_dir}/metric.pvd") - mesh_file = File(f"{output_dir}/mesh.pvd") - mesh_file.write(mesh.coordinates) -kwargs = {} -print(f"Test case {test_case}") -print(" Mesh 0") -print(f" Element count = {ct.elements_old}") -for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - target_ramp = ramp_complexity(base_complexity, target_complexity, ct.fp_iteration) - - # Solve forward and adjoint and compute Hessians - out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - if "adjoint" not in out: - break - adj_sol = out["adjoint"] - if not optimise: - fwd_file.write(*fwd_sol.split()) - adj_file.write(*adj_sol.split()) - P0 = FunctionSpace(mesh, "DG", 0) - P1_ten = TensorFunctionSpace(mesh, "CG", 1) - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Extract features - with PETSc.Log.Event("Network"): - features = collect_features(extract_features(setup, fwd_sol, adj_sol), layout) - - # Run model - with PETSc.Log.Event("Propagate"): - test_targets = np.array([]) - with torch.no_grad(): - for i in range(features.shape[0]): - test_x = torch.Tensor(features[i]).to(device) - test_prediction = nn(test_x) - test_targets = np.concatenate( - (test_targets, np.array(test_prediction.cpu())) - ) - dwr = Function(P0) - dwr.dat.data[:] = np.abs(test_targets) - - # Check for error estimator convergence - with PETSc.Log.Event("Error estimation"): - estimator = dwr.vector().gather().sum() - print(f" Error estimator = {estimator}") - if ct.check_estimator(estimator): - break - if not optimise: - ee_file.write(dwr) - - # Construct metric - with PETSc.Log.Event("Metric construction"): - if approach == "anisotropic": - hessian = combine_metrics(*get_hessians(fwd_sol), average=True) - else: - hessian = None - M = anisotropic_metric( - dwr, - hessian=hessian, - target_complexity=target_ramp, - target_space=P1_ten, - interpolant="Clement", - ) - space_normalise(M, target_ramp, "inf") - enforce_element_constraints( - M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 - ) - metric = RiemannianMetric(mesh) - metric.assign(M) - if not optimise: - metric_file.write(M) - - # Adapt the mesh and check for element count convergence - with PETSc.Log.Event("Mesh adaptation"): - mesh = adapt(mesh, metric) - if not optimise: - mesh_file.write(mesh.coordinates) - elements = mesh.num_cells() - print(f" Mesh {ct.fp_iteration+1}") - print(f" Element count = {elements}") - if ct.check_elements(elements): - break - ct.check_maxiter() -print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") -print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_adaptation_loop.py b/adaptation_n2n/run_adaptation_loop.py deleted file mode 100644 index 126616a..0000000 --- a/adaptation_n2n/run_adaptation_loop.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using goal-oriented -mesh adaptation in a fixed point iteration loop, for a sequence -of increasing target metric complexities, -""" -from nn_adapt.features import * -from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt - -import importlib -import numpy as np -from time import perf_counter - - -set_log_level(ERROR) - -# Parse user input -parser = Parser("run_adaptation_loop.py") -parser.parse_num_refinements(default=24) -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_target_complexity() -parser.add_argument( - "--factor", - help="Power by which to increase target metric complexity", - type=positive_float, - default=0.25, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -num_refinements = parsed_args.num_refinements -base_complexity = parsed_args.base_complexity -f = parsed_args.factor - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit - -# Run adaptation loop -qois, dofs, elements, estimators, niter = [], [], [], [], [] -components = ("forward", "adjoint", "estimator", "metric", "adapt") -times = {c: [] for c in components} -times["all"] = [] -print(f"Test case {test_case}") -for i in range(num_refinements + 1): - try: - target_complexity = 100.0 * 2 ** (f * i) - kwargs = { - "enrichment_method": "h", - "interpolant": "Clement", - "average": True, - "anisotropic": approach == "anisotropic", - "retall": True, - "h_min": setup.parameters.h_min, - "h_max": setup.parameters.h_max, - "a_max": 1.0e5, - } - if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh - else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - ct = ConvergenceTracker(mesh, parsed_args) - print(f" Target {target_complexity}\n Mesh 0") - print(f" Element count = {ct.elements_old}") - times["all"].append(-perf_counter()) - for c in components: - times[c].append(0.0) - for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - kwargs["target_complexity"] = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Compute goal-oriented metric - out = go_metric(mesh, setup, convergence_checker=ct, **kwargs) - qoi = out["qoi"] - times["forward"][-1] += out["times"]["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - if "adjoint" not in out: - break - estimator = out["estimator"] - times["adjoint"][-1] += out["times"]["adjoint"] - times["estimator"][-1] += out["times"]["estimator"] - print(f" Error estimator = {estimator}") - if "metric" not in out: - break - times["metric"][-1] += out["times"]["metric"] - fwd_sol, adj_sol = ( - out["forward"], - out["adjoint"], - ) - dwr, metric = out["dwr"], out["metric"] - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Adapt the mesh - out["times"]["adapt"] = -perf_counter() - mesh = adapt(mesh, metric) - out["times"]["adapt"] += perf_counter() - times["adapt"][-1] += out["times"]["adapt"] - print(f" Mesh {ct.fp_iteration+1}") - cells = mesh.num_cells() - print(f" Element count = {cells}") - if ct.check_elements(cells): - break - ct.check_maxiter() - print( - f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" - ) - times["all"][-1] += perf_counter() - qois.append(qoi) - dofs.append(dof) - elements.append(cells) - estimators.append(estimator) - niter.append(ct.fp_iteration + 1) - np.save(f"{model}/data/qois_GO{approach}_{test_case}", qois) - np.save(f"{model}/data/dofs_GO{approach}_{test_case}", dofs) - np.save(f"{model}/data/elements_GO{approach}_{test_case}", elements) - np.save(f"{model}/data/estimators_GO{approach}_{test_case}", estimators) - np.save(f"{model}/data/niter_GO{approach}_{test_case}", niter) - np.save(f"{model}/data/times_all_GO{approach}_{test_case}", times["all"]) - for c in components: - np.save(f"{model}/data/times_{c}_GO{approach}_{test_case}", times[c]) - except ConvergenceError: - print("Skipping due to convergence error") - continue diff --git a/adaptation_n2n/run_adaptation_loop_ml.py b/adaptation_n2n/run_adaptation_loop_ml.py deleted file mode 100644 index 018329b..0000000 --- a/adaptation_n2n/run_adaptation_loop_ml.py +++ /dev/null @@ -1,195 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using data-driven -mesh adaptation in a fixed point iteration loop, for a sequence -of increasing target metric complexities, -""" -from nn_adapt.ann import * -from nn_adapt.features import * -from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import * - -import importlib -import numpy as np -from time import perf_counter - - -set_log_level(ERROR) - -# Parse user input -parser = Parser("run_adaptation_loop_ml.py") -parser.parse_num_refinements(default=24) -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_preproc() -parser.parse_tag() -parser.parse_target_complexity() -parser.add_argument( - "--factor", - help="Power by which to increase target metric complexity", - type=positive_float, - default=0.25, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -num_refinements = parsed_args.num_refinements -preproc = parsed_args.preproc -tag = parsed_args.tag -base_complexity = parsed_args.base_complexity -f = parsed_args.factor - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() - -# Run adaptation loop -qois, dofs, elements, estimators, niter = [], [], [], [], [] -components = ("forward", "adjoint", "estimator", "metric", "adapt") -times = {c: [] for c in components} -times["all"] = [] -print(f"Test case {test_case}") -for i in range(num_refinements + 1): - try: - target_complexity = 100.0 * 2 ** (f * i) - if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh - else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - ct = ConvergenceTracker(mesh, parsed_args) - kwargs = {} - print(f" Target {target_complexity}\n Mesh 0") - print(f" Element count = {ct.elements_old}") - times["all"].append(-perf_counter()) - for c in components: - times[c].append(0.0) - for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - target_ramp = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Solve forward and adjoint and compute Hessians - out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) - qoi = out["qoi"] - times["forward"][-1] += out["times"]["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - if "adjoint" not in out: - break - times["adjoint"][-1] += out["times"]["adjoint"] - fwd_sol, adj_sol = out["forward"], out["adjoint"] - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Extract features - out["times"]["estimator"] = -perf_counter() - features = extract_features(setup, fwd_sol, adj_sol) - features = collect_features(features, layout) - - # Run model - test_targets = np.array([]) - with torch.no_grad(): - for i in range(features.shape[0]): - test_x = torch.Tensor(features[i]).to(device) - test_prediction = nn(test_x) - test_targets = np.concatenate( - (test_targets, np.array(test_prediction.cpu())) - ) - P0 = FunctionSpace(mesh, "DG", 0) - dwr = Function(P0) - dwr.dat.data[:] = np.abs(test_targets) - - # Check for error estimator convergence - estimator = dwr.vector().gather().sum() - out["times"]["estimator"] += perf_counter() - times["estimator"][-1] += out["times"]["estimator"] - print(f" Error estimator = {estimator}") - if ct.check_estimator(estimator): - break - - # Construct metric - out["times"]["metric"] = -perf_counter() - if approach == "anisotropic": - hessian = combine_metrics(*get_hessians(fwd_sol), average=True) - else: - hessian = None - P1_ten = TensorFunctionSpace(mesh, "CG", 1) - M = anisotropic_metric( - dwr, - hessian=hessian, - target_complexity=target_ramp, - target_space=P1_ten, - interpolant="Clement", - ) - space_normalise(M, target_ramp, "inf") - enforce_element_constraints( - M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 - ) - metric = RiemannianMetric(mesh) - metric.assign(M) - out["times"]["metric"] += perf_counter() - times["metric"][-1] += out["times"]["metric"] - - # Adapt the mesh and check for element count convergence - out["times"]["adapt"] = -perf_counter() - mesh = adapt(mesh, metric) - out["times"]["adapt"] += perf_counter() - times["adapt"][-1] += out["times"]["adapt"] - print(f" Mesh {ct.fp_iteration+1}") - cells = mesh.num_cells() - print(f" Element count = {cells}") - if ct.check_elements(cells): - break - ct.check_maxiter() - print( - f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" - ) - times["all"][-1] += perf_counter() - qois.append(qoi) - dofs.append(dof) - elements.append(cells) - estimators.append(estimator) - niter.append(ct.fp_iteration + 1) - np.save(f"{model}/data/qois_ML{approach}_{test_case}_{tag}", qois) - np.save(f"{model}/data/dofs_ML{approach}_{test_case}_{tag}", dofs) - np.save(f"{model}/data/elements_ML{approach}_{test_case}_{tag}", elements) - np.save(f"{model}/data/estimators_ML{approach}_{test_case}_{tag}", estimators) - np.save(f"{model}/data/niter_ML{approach}_{test_case}_{tag}", niter) - np.save(f"{model}/data/times_all_ML{approach}_{test_case}_{tag}", times["all"]) - for c in components: - np.save(f"{model}/data/times_{c}_ML{approach}_{test_case}_{tag}", times[c]) - except ConvergenceError: - print("Skipping due to convergence error") - continue diff --git a/adaptation_n2n/run_fixed_mesh.py b/adaptation_n2n/run_fixed_mesh.py deleted file mode 100644 index f8ad698..0000000 --- a/adaptation_n2n/run_fixed_mesh.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` on the initial mesh alone. -""" -from nn_adapt.parse import Parser -from nn_adapt.solving import * -from thetis import print_output -from firedrake.petsc import PETSc -import importlib -from time import perf_counter - - -start_time = perf_counter() - -# Parse user input -parser = Parser("run_fixed_mesh.py") -parser.parse_num_refinements(default=0) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") -if parsed_args.num_refinements > 0: - with PETSc.Log.Event("Hierarchy"): - mesh = MeshHierarchy(mesh, parsed_args.num_refinements)[-1] - -# Solve and evaluate QoI -out = get_solutions(mesh, setup, solve_adjoint=not parsed_args.optimise) -qoi = out["qoi"] -print_output(f"QoI for test case {test_case} = {qoi:.8f} {unit}") -if not parsed_args.optimise: - File(f"{model}/outputs/{test_case}/fixed/forward.pvd").write( - *out["forward"].split() - ) - File(f"{model}/outputs/{test_case}/fixed/adjoint.pvd").write( - *out["adjoint"].split() - ) -print_output(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_n2n/run_uniform_refinement.py b/adaptation_n2n/run_uniform_refinement.py deleted file mode 100644 index be3b3a8..0000000 --- a/adaptation_n2n/run_uniform_refinement.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` on a sequence of -uniformly refined meshes generated from the initial mesh. -""" -from nn_adapt.parse import Parser -from nn_adapt.solving import * -from thetis import print_output -import importlib -import numpy as np -from time import perf_counter - - -start_time = perf_counter() - -# Parse user input -parser = Parser("run_uniform_refinement.py") -parser.parse_num_refinements(default=3) -parser.add_argument( - "--prolong", help="Use previous solution as initial guess", action="store_true" -) -parsed_args = parser.parse_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -num_refinements = parsed_args.num_refinements - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -mesh = Mesh(f"{model}/meshes/{test_case}.msh") -mh = [mesh] + list(MeshHierarchy(mesh, num_refinements)) -tm = TransferManager() -kwargs = {} - -# Run uniform refinement -qois, dofs, elements, times, niter = [], [], [], [], [] -setup_time = perf_counter() - start_time -print_output(f"Test case {test_case}") -print_output(f"Setup time: {setup_time:.2f} seconds") -for i, mesh in enumerate(mh): - start_time = perf_counter() - print_output(f" Mesh {i}") - print_output(f" Element count = {mesh.num_cells()}") - out = get_solutions(mesh, setup, solve_adjoint=False, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - - def prolong(V): - """ - After the first iteration, prolong the previous - solution as the initial guess. - """ - ic = Function(V) - tm.prolong(fwd_sol, ic) - return ic - - if parsed_args.prolong: - kwargs["init"] = prolong - fs = fwd_sol.function_space() - time = perf_counter() - start_time - print_output(f" Quantity of Interest = {qoi} {unit}") - print_output(f" Runtime: {time:.2f} seconds") - qois.append(qoi) - dofs.append(sum(fs.dof_count)) - times.append(time) - elements.append(mesh.num_cells()) - niter.append(1) - np.save(f"{model}/data/qois_uniform_{test_case}", qois) - np.save(f"{model}/data/dofs_uniform_{test_case}", dofs) - np.save(f"{model}/data/elements_uniform_{test_case}", elements) - np.save(f"{model}/data/times_all_uniform_{test_case}", times) - np.save(f"{model}/data/niter_uniform_{test_case}", niter) -print_output(f"Setup time: {setup_time:.2f} seconds") diff --git a/adaptation_n2n/test_and_train.py b/adaptation_n2n/test_and_train.py deleted file mode 100644 index e3cfd9b..0000000 --- a/adaptation_n2n/test_and_train.py +++ /dev/null @@ -1,266 +0,0 @@ -""" -Train a network on ``num_training_cases`` problem -specifications of a given ``model``. -""" -from nn_adapt.ann import * -from nn_adapt.parse import argparse, bounded_float, nonnegative_int, positive_float, positive_int - -import git -import importlib -import numpy as np -import os -from sklearn import model_selection -from time import perf_counter -import torch.optim.lr_scheduler as lr_scheduler - - -# Configuration -pwd = os.path.abspath(os.path.dirname(__file__)) -models = [ - name for name in os.listdir(pwd) - if os.path.isdir(name) and name not in ("__pycache__", "models") -] -parser = argparse.ArgumentParser( - prog="test_and_train.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "-m", - "--model", - help="The equation set being solved", - type=str, - choices=models, - default="steady_turbine", -) -parser.add_argument( - "-n", - "--num_training_cases", - help="The number of test cases to train on", - type=positive_int, - default=100, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--lr", - help="Initial learning rate", - type=positive_float, - default=1.0e-03, -) -parser.add_argument( - "--lr_adapt_num_steps", - help="Frequency of learning rate adaptation", - type=nonnegative_int, - default=0, -) -parser.add_argument( - "--lr_adapt_factor", - help="Learning rate reduction factor", - type=bounded_float(0, 1), - default=0.99, -) -parser.add_argument( - "--lr_adapt_threshold", - help="Learning rate threshold", - type=bounded_float(0, 1), - default=1.0e-04, -) -parser.add_argument( - "--lr_adapt_patience", - help="The number of iterations before early adapting the learning rate", - type=positive_int, - default=np.inf, -) -parser.add_argument( - "--num_epochs", - help="The number of iterations", - type=positive_int, - default=2000, -) -parser.add_argument( - "--stopping_patience", - help="The number of iterations before early stopping", - type=positive_int, - default=np.inf, -) -parser.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", -) -parser.add_argument( - "--batch_size", - help="Data points per training iteration", - type=positive_int, - default=500, -) -parser.add_argument( - "--test_batch_size", - help="Data points per validation iteration", - type=positive_int, - default=500, -) -parser.add_argument( - "--test_size", - help="Data proportion for validation", - type=bounded_float(0, 1), - default=0.3, -) -parser.add_argument( - "--seed", - help="Seed for random number generator", - type=positive_int, - default=42, -) -parser.add_argument( - "--tag", - help="Tag for labelling the model (defaults to current git sha)", - type=str, - default=git.Repo(search_parent_directories=True).head.object.hexsha, -) -parsed_args = parser.parse_args() -model = parsed_args.model -approaches = parsed_args.approaches -preproc = parsed_args.preproc -num_epochs = parsed_args.num_epochs -lr = parsed_args.lr -lr_adapt_num_steps = parsed_args.lr_adapt_num_steps -lr_adapt_factor = parsed_args.lr_adapt_factor -lr_adapt_threshold = parsed_args.lr_adapt_threshold -lr_adapt_patience = parsed_args.lr_adapt_patience -stopping_patience = parsed_args.stopping_patience -test_size = parsed_args.test_size -batch_size = parsed_args.batch_size -test_batch_size = parsed_args.test_batch_size -seed = parsed_args.seed -tag = parsed_args.tag - -# Load network layout -layout = importlib.import_module(f"{model}.network").NetLayout() - -# Setup model -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -optimizer = torch.optim.Adam(nn.parameters(), lr=lr) -scheduler1 = lr_scheduler.ReduceLROnPlateau( - optimizer, - factor=lr_adapt_factor, - threshold=lr_adapt_threshold, - patience=lr_adapt_patience, - verbose=True, -) -if lr_adapt_num_steps > 0: - scheduler2 = lr_scheduler.StepLR( - optimizer, - lr_adapt_num_steps, - gamma=lr_adapt_factor - ) -else: - scheduler2 = None -criterion = Loss() - -# Increase batch size if running on GPU -cuda = all(p.is_cuda for p in nn.parameters()) -print(f"Model parameters are{'' if cuda else ' not'} using GPU cores.") -if cuda: - dtype = torch.float32 - batch_size *= 4 - test_batch_size *= 4 -else: - dtype = torch.float - -# Load data -concat = lambda a, b: b if a is None else np.concatenate((a, b), axis=0) -features = None -targets = None -data_dir = f"{model}/data" -for step in range(parsed_args.adaptation_steps): - for approach in approaches: - for case in range(1, parsed_args.num_training_cases + 1): - if case == 1 and approach != approaches[0]: - continue - suffix = f"{case}_GO{approach}_{step}" - feature = { - key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") - for key in layout.inputs - } - features = concat(features, collect_features(feature)) - target = np.load(f"{data_dir}/target_{suffix}.npy") - targets = concat(targets, target) -print(f"Total number of features: {len(features.flatten())}") -print(f"Total number of targets: {len(targets)}") -features = torch.from_numpy(features).type(dtype) -targets = torch.from_numpy(targets).type(dtype) - -# Get train and validation datasets -xtrain, xval, ytrain, yval = model_selection.train_test_split( - features, targets, test_size=test_size, random_state=seed -) -train_data = torch.utils.data.TensorDataset(torch.Tensor(xtrain), torch.Tensor(ytrain)) -train_loader = torch.utils.data.DataLoader( - train_data, batch_size=batch_size, shuffle=True, num_workers=0 -) -validate_data = torch.utils.data.TensorDataset(torch.Tensor(xval), torch.Tensor(yval)) -validate_loader = torch.utils.data.DataLoader( - validate_data, batch_size=test_batch_size, shuffle=False, num_workers=0 -) - -# Train -train_losses, validation_losses, lr_adapt_steps = [], [], [] -set_seed(seed) -previous_loss = np.inf -trigger_times = 0 -for epoch in range(num_epochs): - - # Training step - start_time = perf_counter() - train = propagate(train_loader, nn, criterion, optimizer) - mid_time = perf_counter() - train_time = mid_time - start_time - - # Validation step - val = propagate(validate_loader, nn, criterion) - validation_time = perf_counter() - mid_time - - # Adapt learning rate - scheduler1.step(val) - if scheduler2 is not None: - scheduler2.step() - if epoch % lr_adapt_num_steps == 0: - lr_adapt_steps.append(epoch) - np.save(f"{model}/data/lr_adapt_steps_{tag}", lr_adapt_steps) - - # Stash progress - print( - f"Epoch {epoch:4d}/{num_epochs:d}" - f" avg loss: {train:.4e} / {val:.4e}" - f" wallclock: {train_time:.2f}s / {validation_time:.2f}s" - ) - train_losses.append(train) - validation_losses.append(val) - np.save(f"{model}/data/train_losses_{tag}", train_losses) - np.save(f"{model}/data/validation_losses_{tag}", validation_losses) - torch.save(nn.state_dict(), f"{model}/model_{tag}.pt") - - # Test for convergence - if val > previous_loss: - trigger_times += 1 - if trigger_times >= stopping_patience: - print("Early stopping") - break - else: - trigger_times = 0 - previous_loss = val diff --git a/adaptation_one2n/a_test.py b/adaptation_one2n/a_test.py deleted file mode 100644 index f32ef2f..0000000 --- a/adaptation_one2n/a_test.py +++ /dev/null @@ -1,101 +0,0 @@ -# from nn_adapt.features import * -# from nn_adapt.features import extract_array -# from nn_adapt.metric import * -# from nn_adapt.parse import Parser -# from nn_adapt.solving_one2n import * -# from nn_adapt.solving_n2n import * -# from nn_adapt.solving import * -# from nn_adapt.utility import ConvergenceTracker -# from firedrake.meshadapt import adapt -# from firedrake.petsc import PETSc - -# import importlib -# import numpy as np - -# tt_steps = 10 - -# # setup1 = importlib.import_module(f"burgers_n2n.config") -# # meshes = [UnitSquareMesh(20, 20) for _ in range(tt_steps)] -# # out1 = indicate_errors_n2n(meshes=meshes, config=setup1) -# # print(out1) - -# mesh = UnitSquareMesh(20, 20) -# setup2 = importlib.import_module(f"burgers_one2n.config") -# out2 = indicate_errors_one2n(mesh=mesh, config=setup2) -# print(out2) - -# # mesh = UnitSquareMesh(20, 20) -# # setup2 = importlib.import_module(f"burgers_one2n.config") -# # out2 = get_solutions_one2n(mesh=mesh, config=setup2) -# # fwd_sol = out2["forward"] - - -# lines = [[1,6,8,5], [1,3,7,6,5], [2,8,5]] -# length = 3 - -# end = 5 -# id_list = [0 for _ in range(length)] -# toend = 0 -# steps = 0 -# while not toend: -# steps += 1 -# forward = [1 for _ in range(length)] -# t = [lines[id][item] for id, item in enumerate(id_list)] -# toend = 1 -# for item in t: -# toend = 0 if item != end else 1 -# if toend == 1: -# break; - -# for id, item in enumerate(t): -# for line_id, line in enumerate(lines): -# if item in line[id_list[line_id]+1:]: -# forward[id] = 0 -# break; -# for i in range(length): -# id_list[i] += forward[i] - - -# print(steps) - - -# def dec2bin(input): -# return "{0:b}".format(input) - -# def bin2dec(input): -# length = len(input) -# output = 0 -# for id, item in enumerate(input): -# output += pow(2, length-1-id) * int(item) -# return output - -# def sp_add(input1, input2): -# min_len = min(len(input1), len(input2)) -# max_len = max(len(input1), len(input2)) -# input1 = input1[::-1] -# input2 = input2[::-1] - -# output = "" -# for i in range(max_len): -# if i < min_len: -# if input1[i] == input2[i]: -# output += "0" -# else: -# output += "1" -# else: -# try: -# output += input1[i] -# except: -# output += input2[i] - -# return output[::-1] - -# a = dec2bin(9) -# b = dec2bin(5) -# c = sp_add(a, b) -# print(a, b, c) - -a = [1,1,1,1] -for i, j in enumerate(a): - print(j) - diff --git a/adaptation_one2n/burgers_one2n/config.py b/adaptation_one2n/burgers_one2n/config.py deleted file mode 100644 index 344f728..0000000 --- a/adaptation_one2n/burgers_one2n/config.py +++ /dev/null @@ -1,160 +0,0 @@ -from models.burgers_one2n import * -from nn_adapt.ann import sample_uniform -import numpy as np - - -testing_cases = ["demo"] - - -def initialise(case, discrete=False): - """ - Given some training case (for which ``case`` - is an integer) or testing case (for which - ``case`` is a string), set up the physical - problems defining the Burgers problem. - - For training data, these values are chosen - randomly. - """ - parameters.case = case - parameters.discrete = discrete - if isinstance(case, int): - parameters.turbine_coords = [] - np.random.seed(100 * case) - - # Random initial speed from 0.01 m/s to 6 m/s - parameters.initial_speed = sample_uniform(0.01, 6.0) - - # Random viscosity from 0.00001 m^2/s to 1 m^2/s - parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) - return - elif "demo" in case: - parameters.viscosity_coefficient = 0.0001 - parameters.initial_speed = 1.0 - else: - raise ValueError(f"Test case {test_case} not recognised") - - if "reversed" in case: - parameters.initial_speed *= -1 - - -# def l2dist(xy, xyt): -# r""" -# Usual :math:`\ell_2` distance between -# two points in Euclidean space. -# """ -# diff = np.array(xy) - np.array(xyt) -# return np.sqrt(np.dot(diff, diff)) - - -# def initialise(case, discrete=False): -# """ -# Given some training case (for which ``case`` -# is an integer) or testing case (for which -# ``case`` is a string), set up the physical -# problems and turbine locations defining the -# tidal farm modelling problem. - -# For training data, these values are chosen -# randomly. -# """ -# parameters.case = case -# parameters.discrete = discrete -# if isinstance(case, int): -# parameters.turbine_coords = [] -# np.random.seed(100 * case) - -# # Random depth from 20m to 100m -# parameters.depth = sample_uniform(20.0, 100.0) - -# # Random inflow speed from 0.5 m/s to 6 m/s -# parameters.inflow_speed = sample_uniform(0.5, 6.0) - -# # Random viscosity from 0.1 m^2/s to 1 m^2/s -# parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) - -# # Randomise turbine configuration such that all -# # turbines are at least 50m from the domain -# # boundaries and each other -# num_turbines = np.random.randint(1, 8) -# tc = parameters.turbine_coords -# i = 0 -# while i < num_turbines: -# x = 50.0 + 1100.0 * np.random.rand() -# y = 50.0 + 400.0 * np.random.rand() -# valid = True -# for xyt in tc: -# if l2dist((x, y), xyt) < 50.0: -# valid = False -# if valid: -# tc.append((x, y)) -# i += 1 -# return -# elif "aligned" in case: -# parameters.viscosity_coefficient = 0.5 -# parameters.depth = 40.0 -# parameters.inflow_speed = 5.0 -# parameters.turbine_coords = [(456, 250), (744, 250)] -# elif "offset" in case: -# parameters.viscosity_coefficient = 0.5 -# parameters.depth = 40.0 -# parameters.inflow_speed = 5.0 -# parameters.turbine_coords = [(456, 232), (744, 268)] -# elif "trench" in case: -# bmin, bmax = Constant(160.0), Constant(200.0) -# w = Constant(500.0) - -# def bathy(mesh): -# y = SpatialCoordinate(mesh)[1] / w -# P0 = FunctionSpace(mesh, "DG", 0) -# b = Function(P0) -# b.interpolate(bmin + (bmax - bmin) * y * (1 - y)) -# return b - -# parameters.viscosity_coefficient = 2.0 -# parameters.bathymetry = bathy -# parameters.inflow_speed = 10.0 -# parameters.turbine_coords = [(456, 232), (744, 268)] -# elif "headland" in case: -# parameters.viscosity_coefficient = 100.0 -# parameters.depth = 40.0 -# parameters.inflow_speed = 5.0 -# parameters.turbine_diameter = 80.0 -# parameters.turbine_width = 100.0 -# parameters.turbine_coords = [(600, 250)] -# parameters.correct_thrust = False -# parameters.solver_parameters = { -# "mat_type": "aij", -# "snes_type": "newtonls", -# "snes_linesearch_type": "bt", -# "snes_rtol": 1.0e-08, -# "snes_max_it": 100, -# "snes_monitor": None, -# "ksp_type": "preonly", -# "ksp_converged_reason": None, -# "pc_type": "lu", -# "pc_factor_mat_solver_type": "mumps", -# } -# elif "pipe" in case: -# u_in = Constant(5.0) -# parameters.inflow_speed = u_in -# w = Constant(200.0) - -# def inflow(mesh): -# y = SpatialCoordinate(mesh)[1] / w -# yy = ((y - 0.5) / 0.5) ** 2 -# u_expr = conditional(yy < 1, exp(1 - 1 / (1 - yy)), 0) -# return as_vector([u_expr, 0]) - -# parameters.viscosity_coefficient = 20.0 -# parameters.depth = 40.0 -# parameters.u_inflow = inflow -# parameters.ic = lambda mesh: as_vector([u_in, 0.0]) -# parameters.turbine_coords = [(550, 300), (620, 390)] -# parameters.qoi_unit = "kW" -# parameters.density = Constant(1030.0 * 1.0e-03) -# else: -# raise ValueError(f"Test case {test_case} not recognised") - -# if "reversed" in case: -# parameters.inflow_speed *= -1 diff --git a/adaptation_one2n/burgers_one2n/network.py b/adaptation_one2n/burgers_one2n/network.py deleted file mode 100644 index b7f9907..0000000 --- a/adaptation_one2n/burgers_one2n/network.py +++ /dev/null @@ -1,43 +0,0 @@ -from nn_adapt.layout import NetLayoutBase - - -class NetLayout(NetLayoutBase): - """ - Default configuration - ===================== - - Input layer: - ------------ - [coarse-grained DWR] - + [viscosity coefficient] - + [element size] - + [element orientation] - + [element shape] - + [boundary element?] - + [12 forward DoFs per element] - + [12 adjoint DoFs per element] - = 30 - - Hidden layer: - ------------- - - 60 neurons - - Output layer: - ------------- - - [1 error indicator value] - """ - - inputs = ( - "estimator_coarse", - "physics_viscosity", - "mesh_d", - "mesh_h1", - "mesh_h2", - "mesh_bnd", - "forward_dofs", - "adjoint_dofs", - ) - num_hidden_neurons = 60 - dofs_per_element = 12 diff --git a/adaptation_one2n/burgers_one2n/testing_cases.txt b/adaptation_one2n/burgers_one2n/testing_cases.txt deleted file mode 100644 index 1549b67..0000000 --- a/adaptation_one2n/burgers_one2n/testing_cases.txt +++ /dev/null @@ -1 +0,0 @@ -demo diff --git a/adaptation_one2n/compute_importance.py b/adaptation_one2n/compute_importance.py deleted file mode 100644 index 3e2bf4c..0000000 --- a/adaptation_one2n/compute_importance.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Compute the sensitivities of a network trained on a -particular ``model`` to its input parameters. -""" -from nn_adapt.ann import * -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import git -import importlib -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="compute_importance.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "num_training_cases", - help="The number of training cases", - type=positive_int, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=git.Repo(search_parent_directories=True).head.object.hexsha, -) -parsed_args = parser.parse_args() -model = parsed_args.model -preproc = parsed_args.preproc -tag = parsed_args.tag - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() -loss_fn = Loss() - -# Compute (averaged) sensitivities of the network to the inputs -dJdm = torch.zeros(layout.num_inputs) -data_dir = f"{model}/data" -approaches = parsed_args.approaches -values = np.zeros((0, layout.num_inputs)) -for step in range(parsed_args.adaptation_steps): - for approach in approaches: - for test_case in range(1, parsed_args.num_training_cases + 1): - if test_case == 1 and approach != approaches[0]: - continue - suffix = f"{test_case}_GO{approach}_{step}" - - # Load some data and mark inputs as independent - data = { - key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") - for key in layout.inputs - } - features = collect_features(data, layout) - values = np.vstack((values, features)) - features = torch.from_numpy(features).type(torch.float32) - features.requires_grad_(True) - - # Run the model and sum the outputs - out = nn(features).sum(axis=0) - - # Backpropagate to get the gradient of the outputs w.r.t. the inputs - out.backward() - dJdm += features.grad.mean(axis=0) - -# Compute representative values for each parameter -dm = np.abs(np.mean(values, axis=0)) - -# Multiply by the variability -sensitivity = dJdm.abs().detach().numpy() * dm -np.save(f"{model}/data/sensitivities_{tag}.npy", sensitivity) diff --git a/adaptation_one2n/makefile b/adaptation_one2n/makefile deleted file mode 100644 index d26d52a..0000000 --- a/adaptation_one2n/makefile +++ /dev/null @@ -1,319 +0,0 @@ -all: setup network test - -# --- Configurable parameters - -APPROACHES = anisotropic -MODEL = burgers_one2n -NUM_TRAINING_CASES = 1 -TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) -PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 -TAG = all - -# --- Parameters that should not need modifying - -TRAINING_CASES = $(shell seq 1 ${NUM_TRAINING_CASES}) -CASES = ${TRAINING_CASES} ${TESTING_CASES} - -# --- Setup directories and meshes - -setup: dir mesh plot_config - -# Create the directory structure -# ============================== -# -# $(MODEL) -#    ├── data -#    ├── outputs -# │    └── $(TESTING_CASES) -#    └── plots -dir: - mkdir -p $(MODEL)/data - mkdir -p $(MODEL)/outputs - mkdir -p $(MODEL)/plots - for case in $(TESTING_CASES); do \ - mkdir -p $(MODEL)/outputs/$$case; \ - done - -# Generate meshes -# =============== -# -# Meshes are generated for all training and testing cases. -# * First, a gmsh geometry file is generated using the -# `meshgen.py` script. The definitions of these cases -# are based on the contents of $(MODEL)/config.py. -# For the `turbine` case, the training data is generated -# randomly. -# * Then the geometry files are used to construct meshes -# in the .msh format. -# -# Gmsh is set to use the "pack" algorithm, which means that -# the initial meshes are quasi-uniform. That is, they are as -# close to uniform as they can be, given that the turbines -# are to be explicitly meshed. -mesh: - touch timing.log - d=$$(date +%s) && \ - for case in $(CASES); do \ - python3 meshgen.py $(MODEL) $$case; \ - if [ -e $(MODEL)/meshes/$$case.geo ]; then \ - gmsh -2 -algo pack $(MODEL)/meshes/$$case.geo -o $(MODEL)/meshes/$$case.msh; \ - fi; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Meshes built in $$(($$(date +%s)-d)) seconds" >> timing.log - -# Plot configurations -# =================== -# -# Plot the configurations for a subset of the training cases -# and the testing cases that are listed in $(MODEL)/config.py. -# The domain geometry and turbine locations are shown, along -# with the physical parameters used. -plot_config: - python3 plot_config.py $(MODEL) 'train' - python3 plot_config.py $(MODEL) 'test' - -# Clean the model directory -# ========================= -# -# Delete all logs, data, outputs, plots and compiled code associated -# with the model. Note that this is a very destructive thing to do! -clean: - rm -rf timing.log - rm -rf $(MODEL)/data - rm -rf $(MODEL)/outputs - rm -rf $(MODEL)/plots - rm -rf $(MODEL)/__pycache__ - -# --- Construct the neural network - -network: features train plot_progress plot_importance - -# Generate feature data -# ===================== -# -# This involves applying mesh adaptation to all of the cases in the -# training data. In each case, feature data and "target" error indicator -# data are extracted and saved to file. -features: - touch timing.log - d=$$(date +%s) && \ - for case in $(TRAINING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt.py $(MODEL) $$case -a $$approach --no_outputs $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Features generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Train the network -# ================= -# -# Train a neural network based on the feature and target data that has -# been saved to file, for a specified number of training cases. The -# network is tagged (using the environment variable $(TAG)) to distinguish -# the model and its outputs. -train: - touch timing.log - d=$$(date +%s) && \ - python3 test_and_train.py -m $(MODEL) -n $(NUM_TRAINING_CASES) --tag $(TAG) && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Training completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ - echo "" >> timing.log - -# Plot loss functions -# =================== -# -# Once the network has been trained, plot the training and validation loss -# curves against iteration count. -plot_progress: - python3 plot_progress.py $(MODEL) --tag $(TAG) - -# Feature importance experiment -# ============================= -# -# Perform an experiment that tests how sensitive the trained network is to -# each of its inputs (i.e. the features). If it is particularly sensitive to -# one of the features then we deduce that the feature is in some sense -# "important" to the network. -plot_importance: - python3 compute_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) - python3 plot_importance.py $(MODEL) $(NUM_TRAINING_CASES) --tag $(TAG) - -# --- Test the neural network - -test: snapshot_go snapshot_ml uniform go ml plot_convergence - -# Apply goal-oriented adaptation to the test cases -# ================================================ -# -# Apply goal-oriented mesh adaptation to the testing cases, thereby -# generating lots of output data in Paraview format. These include -# the meshes, solution fields, error indicators and metrics. -snapshot_go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Apply data-driven adaptation to the test cases -# ============================================== -# -# Apply data-driven adaptation based on the trained network to the testing -# cases, thereby generating lots of output data in Paraview format. These -# include the meshes, solution fields, error indicators and metrics. -snapshot_ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adapt_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for uniform refinement -# =========================================== -# -# Run the model on a sequence of uniformly refined meshes. -uniform: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_uniform_refinement.py $(MODEL) $$case; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Uniform refinement completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for goal-oriented adaptation -# ================================================= -# -# Run the model with the standard goal-oriented approach for -# a range of target metric complexities. -go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adaptation_loop.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Convergence analysis for data-driven adaptation -# =============================================== -# -# Run the model with the data-driven approach based on the -# trained network for a range of target metric complexities. -ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 run_adaptation_loop_ml.py $(MODEL) $$case -a $$approach --tag $(TAG) $(PETSC_OPTIONS); \ - done; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven adaptation completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - -# Plot convergence curves -# ======================= -# -# Plot the data points generated during the `uniform`, `go` and -# `ml` recipes and annotate with lines of best fit, where appropriate. -plot_convergence: - for case in $(TESTING_CASES); do \ - python3 plot_convergence.py $(MODEL) $$case --tag $(TAG); \ - done - -# --- Profiling experiments - -# NOTE: The following recipes are somewhat redundant. Similar information -# can be obtained from the outputs of the `uniform`, `go` and `ml` -# recipes by running `plot_timings.py` with the appropriate input -# parameters. - -# Profiling for uniform refinement -# ================================ -# -# Run the model on a fine fixed mesh generated by refining the initial -# mesh four times and output the PETSc logging information in a format -# that can be then turned into a flamegraph. -profile_uni: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_fixed_mesh.py $(MODEL) $$case --optimise --num_refinements 4 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Uniform refinement profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log && \ - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Uniform refinement ($$case)" logview.txt > $(MODEL)/outputs/$$case/uni.svg && \ - rm logview.txt; \ - done - -# Profiling for goal-oriented adaptation -# ====================================== -# -# Run the model using the standard goal-oriented approach with a fairly -# high target metric complexity of 64,000 and output the PETSc logging -# information in a format that can be then turned into a flamegraph. -profile_go: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_adapt.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Goal-oriented adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Goal-oriented adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/go.svg && \ - rm logview.txt; \ - done - -# Profiling for data-driven adaptation -# ==================================== -# -# Run the model using the data-driven adaptation approach based on the -# trained network with a fairly high target metric complexity of 64,000 -# and output the PETSc logging information in a format that can be then -# turned into a flamegraph. -profile_ml: - touch timing.log - d=$$(date +%s) && \ - for case in $(TESTING_CASES); do \ - python3 run_adapt_ml.py $(MODEL) $$case -a anisotropic --optimise --target_complexity 64000 $(PETSC_OPTIONS) --tag all -log_view :logview.txt:ascii_flamegraph; \ - done && \ - date >> timing.log && \ - git log -n 1 --oneline >> timing.log && \ - echo "Data-driven adaptation profiling run completed in $$(($$(date +%s)-d)) seconds" >> timing.log - echo "" >> timing.log - for case in $(TESTING_CASES); do \ - flamegraph.pl --title "Data-driven adaptation ($$case)" logview.txt > $(MODEL)/outputs/$$case/ml.svg && \ - rm logview.txt; \ - done diff --git a/adaptation_one2n/meshgen.py b/adaptation_one2n/meshgen.py deleted file mode 100644 index 1067e7c..0000000 --- a/adaptation_one2n/meshgen.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Generate the mesh for configuration ``case`` -of a given ``model``. -""" -import argparse -import importlib -import sys - - -# Parse for test case -parser = argparse.ArgumentParser(prog="meshgen.py") -parser.add_argument("model", help="The model") -parser.add_argument("case", help="The configuration file name") -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -reverse = False -try: - case = int(parsed_args.case) - assert case > 0 -except ValueError: - case = parsed_args.case - reverse = "reversed" in case - -# Load setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(case) -meshgen = importlib.import_module(f"{model}.meshgen") - -# Write geometry file -code = meshgen.generate_geo(setup, reverse=reverse) -if code is None: - sys.exit(0) -with open(f"{model}/meshes/{case}.geo", "w+") as meshfile: - meshfile.write(code) diff --git a/adaptation_one2n/models/burgers_one2n.py b/adaptation_one2n/models/burgers_one2n.py deleted file mode 100644 index 6f2c7ed..0000000 --- a/adaptation_one2n/models/burgers_one2n.py +++ /dev/null @@ -1,522 +0,0 @@ -from copy import deepcopy -from firedrake import * -from firedrake.petsc import PETSc -from firedrake_adjoint import * -from firedrake.adjoint import get_solve_blocks -import nn_adapt.model -import nn_adapt.solving -from thetis import * - - -''' -A memory hungry method solving time dependent PDE. -''' -# class Parameters(nn_adapt.model.Parameters): -# """ -# Class encapsulating all parameters required for the tidal -# farm modelling test case. -# """ - -# discrete = False - -# qoi_name = "power output" -# qoi_unit = "MW" - -# # Adaptation parameters -# h_min = 1.0e-08 -# h_max = 500.0 - -# # time steps -# tt_steps = 10 -# timestep = 0.1 - -# # Physical parameters -# viscosity_coefficient = 0.5 -# depth = 40.0 -# drag_coefficient = Constant(0.0025) -# inflow_speed = 5.0 -# density = Constant(1030.0 * 1.0e-06) - -# # Additional setup -# viscosity_coefficient = 0.0001 -# initial_speed = 1.0 - -# # Turbine parameters -# turbine_diameter = 18.0 -# turbine_width = None -# turbine_coords = [] -# thrust_coefficient = 0.8 -# correct_thrust = True - -# # Solver parameters -# solver_parameters = { -# "mat_type": "aij", -# "snes_type": "newtonls", -# "snes_linesearch_type": "bt", -# "snes_rtol": 1.0e-08, -# "snes_max_it": 100, -# "ksp_type": "preonly", -# "pc_type": "lu", -# "pc_factor_mat_solver_type": "mumps", -# } -# adjoint_solver_parameters = solver_parameters - -# @property -# def num_turbines(self): -# """ -# Count the number of turbines based on the number -# of coordinates. -# """ -# return len(self.turbine_coords) - -# @property -# def turbine_ids(self): -# """ -# Generate the list of turbine IDs, i.e. cell tags used -# in the gmsh geometry file. -# """ -# if self.discrete: -# return list(2 + np.arange(self.num_turbines, dtype=np.int32)) -# else: -# return ["everywhere"] - -# @property -# def footprint_area(self): -# """ -# Calculate the area of the turbine footprint in the horizontal. -# """ -# d = self.turbine_diameter -# w = self.turbine_width or d -# return d * w - -# @property -# def swept_area(self): -# """ -# Calculate the area swept by the turbine in the vertical. -# """ -# return pi * (0.5 * self.turbine_diameter) ** 2 - -# @property -# def cross_sectional_area(self): -# """ -# Calculate the cross-sectional area of the turbine footprint -# in the vertical. -# """ -# return self.depth * self.turbine_diameter - -# @property -# def corrected_thrust_coefficient(self): -# """ -# Correct the thrust coefficient to account for the -# fact that we use the velocity at the turbine, rather -# than an upstream veloicity. - -# See [Kramer and Piggott 2016] for details. -# """ -# Ct = self.thrust_coefficient -# if not self.correct_thrust: -# return Ct -# At = self.swept_area -# corr = 4.0 / (1.0 + sqrt(1.0 - Ct * At / self.cross_sectional_area)) ** 2 -# return Ct * corr - -# def bathymetry(self, mesh): -# """ -# Compute the bathymetry field on the current `mesh`. -# """ -# # NOTE: We assume a constant bathymetry field -# P0_2d = get_functionspace(mesh, "DG", 0) -# return Function(P0_2d).assign(parameters.depth) - -# def u_inflow(self, mesh): -# """ -# Compute the inflow velocity based on the current `mesh`. -# """ -# # NOTE: We assume a constant inflow -# return as_vector([self.inflow_speed, 0]) - -# # def ic(self, mesh): -# # """ -# # Initial condition. -# # """ -# # return self.u_inflow(mesh) -# def ic(self, mesh): -# """ -# Initial condition -# """ -# x, y = SpatialCoordinate(mesh) -# expr = self.initial_speed * sin(pi * x) -# yside = self.initial_speed * sin(pi * y) -# yside = 0 -# return as_vector([expr, yside]) - -# def turbine_density(self, mesh): -# """ -# Compute the turbine density function on the current `mesh`. -# """ -# if self.discrete: -# return Constant(1.0 / self.footprint_area, domain=mesh) -# x, y = SpatialCoordinate(mesh) -# r2 = self.turbine_diameter / 2 -# r1 = r2 if self.turbine_width is None else self.turbine_width / 2 - -# def bump(x0, y0, scale=1.0): -# qx = ((x - x0) / r1) ** 2 -# qy = ((y - y0) / r2) ** 2 -# cond = And(qx < 1, qy < 1) -# b = exp(1 - 1 / (1 - qx)) * exp(1 - 1 / (1 - qy)) -# return conditional(cond, Constant(scale) * b, 0) - -# bumps = 0 -# for xy in self.turbine_coords: -# bumps += bump(*xy, scale=1 / assemble(bump(*xy) * dx)) -# return bumps - -# def farm(self, mesh): -# """ -# Construct a dictionary of :class:`TidalTurbineFarmOptions` -# objects based on the current `mesh`. -# """ -# Ct = self.corrected_thrust_coefficient -# farm_options = TidalTurbineFarmOptions() -# farm_options.turbine_density = self.turbine_density(mesh) -# farm_options.turbine_options.diameter = self.turbine_diameter -# farm_options.turbine_options.thrust_coefficient = Ct -# return {farm_id: farm_options for farm_id in self.turbine_ids} - -# def turbine_drag(self, mesh): -# """ -# Compute the contribution to the drag coefficient due to the -# tidal turbine parametrisation on the current `mesh`. -# """ -# P0_2d = get_functionspace(mesh, "DG", 0) -# p0test = TestFunction(P0_2d) -# Ct = self.corrected_thrust_coefficient -# At = self.swept_area -# Cd = 0.5 * Ct * At * self.turbine_density(mesh) -# return sum([p0test * Cd * dx(tag, domain=mesh) for tag in self.turbine_ids]) - -# def drag(self, mesh, background=False): -# r""" -# Create a :math:`\mathbb P0` field for the drag on the current -# `mesh`. - -# :kwarg background: should we consider the background drag -# alone, or should the turbine drag be included? -# """ -# P0_2d = get_functionspace(mesh, "DG", 0) -# ret = Function(P0_2d) - -# # Background drag -# Cb = self.drag_coefficient -# if background: -# return ret.assign(Cb) -# p0test = TestFunction(P0_2d) -# expr = p0test * Cb * dx(domain=mesh) - -# # Turbine drag -# assemble(expr + self.turbine_drag(mesh), tensor=ret) -# return ret - -# def viscosity(self, mesh): -# r""" -# Create a :math:`\mathbb P0` field for the viscosity coefficient -# on the current `mesh`. -# """ -# # NOTE: We assume a constant viscosity coefficient -# P0_2d = get_functionspace(mesh, "DG", 0) -# return Function(P0_2d).assign(self.viscosity_coefficient) - - -class Parameters(nn_adapt.model.Parameters): - """ - Class encapsulating all parameters required for a simple - Burgers equation test case. - """ - - qoi_name = "right boundary integral" - qoi_unit = r"m\,s^{-1}" - - # Adaptation parameters - h_min = 1.0e-10 # Minimum metric magnitude - h_max = 1.0 # Maximum metric magnitude - - # Physical parameters - viscosity_coefficient = 0.1 - initial_speed = 2.0 - - # Timestepping parameters - timestep = 0.01 - tt_steps = 10 - - solver_parameters = {} - adjoint_solver_parameters = {} - - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def drag(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def viscosity(self, mesh): - """ - Compute the viscosity coefficient on the current `mesh`. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(self.viscosity_coefficient) - - def ic(self, mesh): - """ - Initial condition - """ - x, y = SpatialCoordinate(mesh) - expr = self.initial_speed * sin(pi * x) - yside = self.initial_speed * sin(pi * y) - yside = 0 - return as_vector([expr, yside]) - - -PETSc.Sys.popErrorHandler() -parameters = Parameters() - - -def get_function_space(mesh): - r""" - Construct the :math:`\mathbb P2` finite element space - used for the prognostic solution. - """ - return VectorFunctionSpace(mesh, "CG", 2) - - -class Solver(nn_adapt.solving.Solver): - """ - Solver object based on current mesh and state. - """ - - def __init__(self, mesh, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the current state / initial condition - """ - self.mesh = mesh - - # Collect parameters - dt = Constant(parameters.timestep) - nu = parameters.viscosity(mesh) - - # Define variational formulation - V = self.function_space - u = Function(V) - u_ = Function(V) - v = TestFunction(V) - self._form = ( - inner((u - u_) / dt, v) * dx - + inner(dot(u, nabla_grad(u)), v) * dx - + nu * inner(grad(u), grad(v)) * dx - ) - problem = NonlinearVariationalProblem(self._form, u) - - # Set initial condition - u_.project(parameters.ic(mesh)) - - # Create solver - self._solver = NonlinearVariationalSolver(problem) - self._solution = u - - @property - def function_space(self): - r""" - The :math:`\mathbb P2` finite element space. - """ - return get_function_space(self.mesh) - - @property - def form(self): - """ - The weak form of Burgers equation - """ - return self._form - - @property - def solution(self): - return self._solution - - def iterate(self, **kwargs): - """ - Take a single timestep of Burgers equation - """ - self._solver.solve() - - -class Solver_one2n(nn_adapt.solving.Solver): - """ - Solver object based on current mesh and state. - """ - - def __init__(self, mesh, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the current state / initial condition - """ - self.mesh = mesh - - # Collect parameters - self.tt_steps = parameters.tt_steps - dt = Constant(parameters.timestep) - - # Physical parameters - nu = parameters.viscosity(mesh) - self.nu = nu - - # Define variational formulation - V = self.function_space - self.V = V - self.u = Function(V) - self.u_ = Function(V) - self.v = TestFunction(V) - - self._form = ( - inner((self.u - self.u_) / dt, self.v) * dx - + inner(dot(self.u, nabla_grad(self.u)), self.v) * dx - + nu * inner(grad(self.u), grad(self.v)) * dx - ) - - # Define initial conditions - ic = parameters.ic(self.mesh) - self.u.project(ic) - - # Set solutions - self._solutions = [] - - @property - def function_space(self): - r""" - The :math:`\mathbb P2` finite element space. - """ - return get_function_space(self.mesh) - - @property - def form(self): - """ - The weak form of Burgers equation - """ - return self._form - - @property - def solution(self): - return self._solutions - - @property - def adj_solution(self): - return self._adj_solution - - def adjoint_iteration(self): - """ - Get the adjoint solutions of Burgers equation - """ - J_form = inner(self.u, self.u)*ds(2) - J = assemble(J_form) - - g = compute_gradient(J, Control(self.nu)) - - solve_blocks = get_solve_blocks() - - # 'Initial condition' for both adjoint - dJdu = assemble(derivative(J_form, self.u)) - - self._adj_solution = [] - for step in range(self.tt_steps-1): - adj_sol = solve_blocks[step].adj_sol - self._adj_solution.append(adj_sol) - self._adj_solution.append(dJdu) - - def test(self): - - fwd_sol = self._solutions - F = self.form - q_star = Function(self.V) - adj_sol = [] - - J = get_qoi(self.mesh)(fwd_sol[-1]) - dFdq = derivative(F, fwd_sol[-1], TrialFunction(self.V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, fwd_sol[-1], TestFunction(self.V)) - solve(dFdq_transpose == dJdq, q_star) - adj_sol.append(q_star) - - for i in range(2, self.tt_steps): - remainder = product(dFdq_transpose, q_star) - - J = get_qoi(self.mesh)(fwd_sol[-i]) - dFdq = derivative(F, fwd_sol[-i], TrialFunction(self.V)) - dFdq_transpose = adjoint(dFdq) - dJdq = derivative(J, fwd_sol[-i], TestFunction(self.V)) - dJdq -= remainder - solve(dFdq_transpose == dJdq, q_star) - adj_sol.append(q_star) - - return adj_sol - - def iterate(self, **kwargs): - """ - Get the forward solutions of Burgers equation - """ - tape = get_working_tape() - tape.clear_tape() - - # solve forward - for _ in range(self.tt_steps): - - # Create Functions for the solution and time-lagged solution - self.u_.project(self.u) - - solve(self._form == 0, self.u) - - # Store forward solution at exports so we can plot again later - self._solutions.append(self.u.copy(deepcopy=True)) - - stop_annotating(); - - -def get_initial_condition(function_space): - """ - Compute an initial condition based on the initial - speed parameter. - """ - u = Function(function_space) - u.interpolate(parameters.ic(function_space.mesh())) - return u - - -def get_qoi(mesh): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - - def qoi(sol): - return inner(sol, sol) * ds(1) - - return qoi - - -# # Initial mesh for all test cases -# initial_mesh = UnitSquareMesh(30, 30) diff --git a/adaptation_one2n/plot_config.py b/adaptation_one2n/plot_config.py deleted file mode 100644 index 159b4d7..0000000 --- a/adaptation_one2n/plot_config.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Plot the problem configurations for a given ``model``. -The ``mode`` is chosen from 'train' and 'test'. -""" -from firedrake import Mesh -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import importlib - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_config.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "mode", - help="Training or testing?", - type=str, - choices=["train", "test"], -) -parser.add_argument( - "--num_cols", - help="Number of columns in the plot", - type=positive_int, - default=4, -) -parser.add_argument( - "--num_rows", - help="Number of rows in the plot", - type=positive_int, - default=4, -) -parsed_args = parser.parse_args() -model = parsed_args.model -mode = parsed_args.mode -setup = importlib.import_module(f"{model}.config") -cases = setup.testing_cases -ncols = parsed_args.num_cols -if mode == "test": - ncols = len(cases) -nrows = parsed_args.num_rows -if mode == "test": - nrows = 1 -N = ncols * nrows -if mode == "train": - cases = range(1, N + 1) -p = importlib.import_module(f"{model}.plotting") - -# Plot all configurations -fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(3 * ncols, 1.5 * nrows)) -for i, case in enumerate(cases): - ax = axes[i] if nrows == 1 else axes[i // ncols, i % nrows] - setup.initialise(case, discrete=True) - mesh = Mesh(f"{model}/meshes/{case}.msh") - p.plot_config(setup, mesh, ax) -plt.tight_layout() -plt.savefig(f"{model}/plots/{mode}_config.pdf") diff --git a/adaptation_one2n/plot_convergence.py b/adaptation_one2n/plot_convergence.py deleted file mode 100644 index d8e2255..0000000 --- a/adaptation_one2n/plot_convergence.py +++ /dev/null @@ -1,179 +0,0 @@ -""" -Plot QoI convergence curves under uniform refinement, -goal-oriented mesh adaptation and data-driven mesh -adaptation, for a given ``test_case`` and ``model``. -""" -from nn_adapt.parse import Parser -from nn_adapt.plotting import * - -import importlib -from matplotlib.ticker import FormatStrFormatter -import numpy as np -import os -import sys - - -# Parse user input -parser = Parser("plot_convergence.py") -parser.parse_tag() -parsed_args = parser.parse_args() -model = parsed_args.model -test_case = parsed_args.test_case -tag = parsed_args.tag - -# Formatting -matplotlib.rcParams["font.size"] = 20 -approaches = { - "uniform": { - "label": "Uniform refinement", - "color": "cornflowerblue", - "marker": "x", - "linestyle": "-", - }, - "GOanisotropic": { - "label": "Goal-oriented adaptation", - "color": "orange", - "marker": "o", - "linestyle": "-", - }, - "MLanisotropic": { - "label": "Data-driven adaptation", - "color": "g", - "marker": "^", - "linestyle": "-", - }, -} -xlim = { - "dofs": [3.0e03, 3.0e06], - "times": [1.0e0, 2.0e03], -} - -# Load configuration -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -qoi_name = setup.parameters.qoi_name.capitalize() - -# Load outputs -dofs, qois, times, niter = {}, {}, {}, {} -for approach in approaches.copy(): - ext = f"_{tag}" if approach[:2] == "ML" else "" - try: - dofs[approach] = np.load(f"{model}/data/dofs_{approach}_{test_case}{ext}.npy") - qois[approach] = np.load(f"{model}/data/qois_{approach}_{test_case}{ext}.npy") - times[approach] = np.load(f"{model}/data/times_all_{approach}_{test_case}{ext}.npy") - niter[approach] = np.load(f"{model}/data/niter_{approach}_{test_case}{ext}.npy") - print(f"Iteration count for {approach}: {niter[approach]}") - except IOError: - print(f"Cannot load {approach} data for test case {test_case}") - approaches.pop(approach) - continue -if len(approaches.keys()) == 0: - print("Nothing to plot.") - sys.exit(0) - -# Drop first iteration because timings include compilation # FIXME: Why? -dofs["uniform"] = dofs["uniform"][1:] -qois["uniform"] = qois["uniform"][1:] -times["uniform"] = times["uniform"][1:] -niter["uniform"] = niter["uniform"][1:] - -# Plot QoI curves against DoF count -fig, axes = plt.subplots() -start = max(np.load(f"{model}/data/qois_uniform_{test_case}.npy")) -conv = np.load(f"{model}/data/qois_uniform_{test_case}.npy")[-1] -axes.hlines(conv, *xlim["dofs"], "k", label="Converged QoI") -for approach, metadata in approaches.items(): - axes.semilogx(dofs[approach], qois[approach], **metadata) -axes.set_xlim(xlim["dofs"]) -if test_case in ["aligned", "offset"]: - axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) -axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) -axes.set_xlabel("DoF count") -axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + r"}$)") -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_vs_dofs_{test_case}_{tag}.pdf") - -# Plot QoI curves against CPU time -fig, axes = plt.subplots() -axes.hlines(conv, *xlim["times"], "k", label="Converged QoI") -for approach, metadata in approaches.items(): - axes.semilogx(times[approach], qois[approach], **metadata) - for n, t, q in zip(niter[approach], times[approach], qois[approach]): - axes.annotate(str(n), (1.1 * t, q), color=metadata["color"], fontsize=14) -axes.set_xlim(xlim["times"]) -if test_case in ["aligned", "offset"]: - axes.set_ylim([conv - 0.05 * (start - conv), start + 0.05 * (start - conv)]) -axes.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) -axes.set_xlabel(r"CPU time ($\mathrm{s}$)") -axes.set_ylabel(qoi_name + r" ($\mathrm{" + unit + "}$)") -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_vs_cputime_{test_case}_{tag}.pdf") -plt.close() - -# Plot CPU time curves against DoF count -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - axes.loglog(dofs[approach], times[approach], **metadata) - for n, t, d in zip(niter[approach], times[approach], dofs[approach]): - axes.annotate(str(n), (1.1 * d, t), color=metadata["color"], fontsize=14) -axes.set_xlabel("DoF count") -axes.set_ylabel(r"CPU time ($\mathrm{s}$)") -axes.set_xlim(xlim["dofs"]) -axes.set_ylim(xlim["times"]) -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/cputime_vs_dofs_{test_case}_{tag}.pdf") -plt.close() - -qois["uniform"] = qois["uniform"][:-1] -dofs["uniform"] = dofs["uniform"][:-1] -times["uniform"] = times["uniform"][:-1] - -# Plot QoI error curves against DoF count -errors = {} -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - errors[approach] = np.abs((qois[approach] - conv) / conv) - x, y = dofs[approach], errors[approach] - a, b = np.polyfit(np.log(x), np.log(y), 1) - print(f"QoI error vs. DoFs {approach}: gradient {a:.2f}") - axes.scatter(x, y, **metadata) - axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) -axes.set_xlabel("DoF count") -axes.set_ylabel(r"QoI error ($\%$)") -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_error_vs_dofs_{test_case}_{tag}.pdf") -plt.close() - -# Plot legend -fname = f"{model}/plots/legend.pdf" -if not os.path.exists(fname): - fig2, axes2 = plt.subplots() - lines, labels = axes.get_legend_handles_labels() - legend = axes2.legend(lines, labels, frameon=False, ncol=3) - fig2.canvas.draw() - axes2.set_axis_off() - bbox = legend.get_window_extent().transformed(fig2.dpi_scale_trans.inverted()) - plt.savefig(fname, bbox_inches=bbox) - -# Plot QoI error curves against CPU time -fig, axes = plt.subplots() -for approach, metadata in approaches.items(): - x, y = times[approach], errors[approach] - if approach == "uniform": - a, b = np.polyfit(np.log(x), np.log(y), 1) - print(f"QoI error vs. time {approach}: gradient {a:.2f}") - axes.loglog(x, x ** a * np.exp(b), color=metadata["color"]) - axes.scatter(x, y, **metadata) - for n, t, e in zip(niter[approach], x, errors[approach]): - axes.annotate(str(n), (1.1 * t, e), color=metadata["color"], fontsize=14) -axes.set_xlabel(r"CPU time ($\mathrm{s}$)") -axes.set_ylabel(r"QoI error ($\%$)") -axes.grid(True, which="both") -plt.tight_layout() -plt.savefig(f"{model}/plots/qoi_error_vs_cputime_{test_case}_{tag}.pdf") -plt.close() diff --git a/adaptation_one2n/plot_importance.py b/adaptation_one2n/plot_importance.py deleted file mode 100644 index 596d80f..0000000 --- a/adaptation_one2n/plot_importance.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Plot the sensitivities of a network trained on a -particular ``model`` to its input parameters. -""" -from nn_adapt.parse import argparse, positive_int -from nn_adapt.plotting import * - -import git -import importlib -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_importance.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine"], -) -parser.add_argument( - "num_training_cases", - help="The number of training cases", - type=positive_int, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=None, -) -parsed_args = parser.parse_args() -model = parsed_args.model -tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha - -# Separate sensitivity information by variable -data = np.load(f"{model}/data/sensitivities_{tag}.npy") -layout = importlib.import_module(f"{model}.network").NetLayout() -p = importlib.import_module(f"{model}.plotting") -sensitivities = p.process_sensitivities(data, layout) - -# Plot increases as a stacked bar chart -colours = ("b", "C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "0.3") -deriv = ("", "_x", "_y", "_{xx}", "_{xy}", "_{yy}") -N = len(sensitivities.keys()) -bottom = np.zeros(N) -fig, axes = plt.subplots(figsize=(1.5 * N, 4)) -for i, colour in enumerate(colours): - arr = np.array([S[i] for S in sensitivities.values()]) - label = r"$f%s(\mathbf x_K)$" % deriv[i] - axes.bar(sensitivities.keys(), arr, bottom=bottom, color=colour, label=label) - bottom += arr -xlim = axes.get_xlim() -axes.set_xlabel("Input parameters") -axes.set_ylabel("Network sensitivity") -axes.legend(ncol=2) -axes.grid(True) -plt.tight_layout() -plt.savefig(f"{model}/plots/importance_{tag}.pdf") diff --git a/adaptation_one2n/plot_progress.py b/adaptation_one2n/plot_progress.py deleted file mode 100644 index 09482a3..0000000 --- a/adaptation_one2n/plot_progress.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Plot the training and validation loss curves for a network -trained on a particular ``model``. -""" -from nn_adapt.parse import argparse -from nn_adapt.plotting import * - -import git -import numpy as np - - -# Parse model -parser = argparse.ArgumentParser( - prog="plot_progress.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "model", - help="The model", - type=str, - choices=["steady_turbine", "burgers"], -) -parser.add_argument( - "--tag", - help="Model tag (defaults to current git commit sha)", - default=None, -) -parsed_args = parser.parse_args() -model = parsed_args.model -tag = parsed_args.tag or git.Repo(search_parent_directories=True).head.object.hexsha - -# Load data -train_losses = np.load(f"{model}/data/train_losses_{tag}.npy") -validation_losses = np.load(f"{model}/data/validation_losses_{tag}.npy") -epochs = np.arange(len(train_losses)) + 1 - -# Plot losses -fig, axes = plt.subplots() -kw = dict(linewidth=0.5) -axes.loglog(epochs, train_losses, label="Training", color="deepskyblue", **kw) -axes.loglog(epochs, validation_losses, label="Validation", color="darkgreen", **kw) -axes.set_xlabel("Number of epochs") -axes.set_ylabel("Average loss") -axes.legend() -axes.grid(True) -axes.set_xlim([1, epochs[-1]]) -plt.tight_layout() -plt.savefig(f"{model}/plots/losses_{tag}.pdf") diff --git a/adaptation_one2n/plot_timings.py b/adaptation_one2n/plot_timings.py deleted file mode 100644 index d482a6e..0000000 --- a/adaptation_one2n/plot_timings.py +++ /dev/null @@ -1,72 +0,0 @@ -from nn_adapt.parse import Parser, nonnegative_int -from nn_adapt.plotting import * -import numpy as np - - -def get_times(model, approach, case, it, tag=None): - """ - Gather the timing data for some approach applied - to a given test case. - - :arg model: the PDE being solved - :arg approach: the mesh adaptation approach - :arg case: the test case name or number - :arg it: the run - :kwarg tag: the tag for the network - """ - ext = f"_{tag}" if approach[:2] == "ML" else "" - qoi = np.load(f"{model}/data/qois_{approach}_{case}{ext}.npy")[it] - conv = np.load(f"{model}/data/qois_uniform_{case}.npy")[-1] - print(f"{approach} QoI error: {abs((qoi-conv)/conv)*100:.3f} %") - split = { - "Forward solve": np.load(f"{model}/data/times_forward_{approach}_{case}{ext}.npy")[it], - "Adjoint solve": np.load(f"{model}/data/times_adjoint_{approach}_{case}{ext}.npy")[it], - "Error estimation": np.load(f"{model}/data/times_estimator_{approach}_{case}{ext}.npy")[it], - "Metric construction": np.load(f"{model}/data/times_metric_{approach}_{case}{ext}.npy")[it], - "Mesh adaptation": np.load(f"{model}/data/times_adapt_{approach}_{case}{ext}.npy")[it], - } - total = sum(split.values()) - for key, value in split.items(): - print(f"{approach} {key}: {value/total*100:.3f} %") - niter = np.load(f"{model}/data/niter_{approach}_{case}{ext}.npy")[it] - print(f"niter = {niter}") - return split - - -# Parse user input -parser = Parser(prog="plot_timings.py") -parser.parse_tag() -parser.add_argument( - "--iter", - help="Iteration", - type=nonnegative_int, - default=21, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -tag = parsed_args.tag -it = parsed_args.iter -approaches = ["GOanisotropic", "MLanisotropic"] - -# Plot bar chart -fig, axes = plt.subplots(figsize=(6, 4.5)) -colours = ["C0", "deepskyblue", "mediumturquoise", "mediumseagreen", "darkgreen", "0.3"] -data = { - "Goal-oriented": get_times(model, "GOanisotropic", test_case, it, tag=tag), - "Data-driven": get_times(model, "MLanisotropic", test_case, it, tag=tag), -} -bottom = np.zeros(len(data.keys())) -for i, key in enumerate(data["Goal-oriented"].keys()): - arr = np.array([d[key] for d in data.values()]) - axes.bar(data.keys(), arr, bottom=bottom, label=key, color=colours[i]) - bottom += arr -axes.bar_label(axes.containers[-1]) -axes.legend(loc="upper right") -axes.set_ylabel("Runtime [seconds]") -plt.tight_layout() -plt.savefig(f"{model}/plots/timings_{test_case}_{it}_{tag}.pdf") diff --git a/adaptation_one2n/run_adapt.py b/adaptation_one2n/run_adapt.py deleted file mode 100644 index 0b0ea21..0000000 --- a/adaptation_one2n/run_adapt.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using goal-oriented -mesh adaptation in a fixed point iteration loop. - -This is the script where feature data is harvested to train -the neural network on. -""" -from nn_adapt.features import * -from nn_adapt.metric_one2n import * -from nn_adapt.parse import Parser -from nn_adapt.solving_one2n import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt -from firedrake.petsc import PETSc - -import importlib -import numpy as np -from time import perf_counter -import matplotlib.pyplot as plt - - -set_log_level(ERROR) - -# Parse for test case and number of refinements -parser = Parser("run_adapt.py") -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_target_complexity() -parser.add_argument("--no_outputs", help="Turn off file outputs", action="store_true") -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -base_complexity = parsed_args.base_complexity -target_complexity = parsed_args.target_complexity -optimise = parsed_args.optimise -no_outputs = parsed_args.no_outputs or optimise -if not no_outputs: - from pyroteus.utility import File - -# Setup -start_time = perf_counter() -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - -# Run adaptation loop -kwargs = { - "interpolant": "Clement", - "enrichment_method": "h", - "average": True, - "anisotropic": approach == "anisotropic", - "retall": True, - "h_min": setup.parameters.h_min, - "h_max": setup.parameters.h_max, - "a_max": 1.0e5, -} -ct = ConvergenceTracker(mesh, parsed_args) -tt_steps = setup.parameters.tt_steps -if not no_outputs: - output_dir = f"{model}/outputs/{test_case}/GO/{approach}" - fwd_file = [File(f"{output_dir}/forward{step}.pvd") for step in range(tt_steps)] - adj_file = [File(f"{output_dir}/adjoint{step}.pvd") for step in range(tt_steps)] - ee_file = File(f"{output_dir}/estimator.pvd") - metric_file = File(f"{output_dir}/metric.pvd") - mesh_file = File(f"{output_dir}/mesh.pvd") - mesh_file.write(mesh.coordinates) -print(f"Test case {test_case}") -print(" Mesh 0") -print(f" Element count = {ct.elements_old}") -data_dir = f"{model}/data" -for ct.fp_iteration in range(ct.maxiter + 1): - suffix = f"{test_case}_GO{approach}_{ct.fp_iteration}" - - # Ramp up the target complexity - kwargs["target_complexity"] = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Compute goal-oriented metric - out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - if "adjoint" not in out: - break - estimator = out["estimator"] - print(f" Error estimator = {estimator}") - if "metric" not in out: - break - adj_sol, dwr, metric = out["adjoint"], out["dwr"], out["metric"] - if not no_outputs: - for step in range(tt_steps): - fwd_file[step].write(*fwd_sol[step].split()) - adj_file[step].write(*adj_sol[step].split()) - ee_file.write(dwr) - metric_file.write(metric.function) - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol[-1]) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol[-1].split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Extract features - if not optimise: - fwd_sol_integrate = time_integrate(fwd_sol) - adj_sol_integrate = time_integrate(adj_sol) - features = extract_features(setup, fwd_sol_integrate, adj_sol_integrate) - target = dwr.dat.data.flatten() - assert not np.isnan(target).any() - for key, value in features.items(): - np.save(f"{data_dir}/feature_{key}_{suffix}", value) - np.save(f"{data_dir}/target_{suffix}", target) - - # Adapt the mesh and check for element count convergence - with PETSc.Log.Event("Mesh adaptation"): - mesh = adapt(mesh, metric) - if not no_outputs: - mesh_file.write(mesh.coordinates) - elements = mesh.num_cells() - print(f" Mesh {ct.fp_iteration+1}") - print(f" Element count = {elements}") - if ct.check_elements(elements): - break - ct.check_maxiter() -print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") -print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_adapt_ml.py b/adaptation_one2n/run_adapt_ml.py deleted file mode 100644 index 9c79d29..0000000 --- a/adaptation_one2n/run_adapt_ml.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using data-driven -mesh adaptation in a fixed point iteration loop. -""" -from nn_adapt.ann import * -from nn_adapt.features import * -from nn_adapt.parse import Parser -from nn_adapt.metric import * -from nn_adapt.solving import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import * - -import importlib -from time import perf_counter - - -# Parse user input -parser = Parser("run_adapt_ml.py") -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_preproc() -parser.parse_target_complexity() -parser.parse_tag() -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -base_complexity = parsed_args.base_complexity -target_complexity = parsed_args.target_complexity -preproc = parsed_args.preproc -optimise = parsed_args.optimise -tag = parsed_args.tag -if not optimise: - from pyroteus.utility import File - -# Setup -start_time = perf_counter() -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() - -# Run adaptation loop -ct = ConvergenceTracker(mesh, parsed_args) -if not optimise: - output_dir = f"{model}/outputs/{test_case}/ML/{approach}/{tag}" - fwd_file = File(f"{output_dir}/forward.pvd") - adj_file = File(f"{output_dir}/adjoint.pvd") - ee_file = File(f"{output_dir}/estimator.pvd") - metric_file = File(f"{output_dir}/metric.pvd") - mesh_file = File(f"{output_dir}/mesh.pvd") - mesh_file.write(mesh.coordinates) -kwargs = {} -print(f"Test case {test_case}") -print(" Mesh 0") -print(f" Element count = {ct.elements_old}") -for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - target_ramp = ramp_complexity(base_complexity, target_complexity, ct.fp_iteration) - - # Solve forward and adjoint and compute Hessians - out = get_solutions(mesh, setup, convergence_checker=ct, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - dof = sum(np.array([fwd_sol.function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - if "adjoint" not in out: - break - adj_sol = out["adjoint"] - if not optimise: - fwd_file.write(*fwd_sol.split()) - adj_file.write(*adj_sol.split()) - P0 = FunctionSpace(mesh, "DG", 0) - P1_ten = TensorFunctionSpace(mesh, "CG", 1) - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Extract features - with PETSc.Log.Event("Network"): - features = collect_features(extract_features(setup, fwd_sol, adj_sol), layout) - - # Run model - with PETSc.Log.Event("Propagate"): - test_targets = np.array([]) - with torch.no_grad(): - for i in range(features.shape[0]): - test_x = torch.Tensor(features[i]).to(device) - test_prediction = nn(test_x) - test_targets = np.concatenate( - (test_targets, np.array(test_prediction.cpu())) - ) - dwr = Function(P0) - dwr.dat.data[:] = np.abs(test_targets) - - # Check for error estimator convergence - with PETSc.Log.Event("Error estimation"): - estimator = dwr.vector().gather().sum() - print(f" Error estimator = {estimator}") - if ct.check_estimator(estimator): - break - if not optimise: - ee_file.write(dwr) - - # Construct metric - with PETSc.Log.Event("Metric construction"): - if approach == "anisotropic": - hessian = combine_metrics(*get_hessians(fwd_sol), average=True) - else: - hessian = None - M = anisotropic_metric( - dwr, - hessian=hessian, - target_complexity=target_ramp, - target_space=P1_ten, - interpolant="Clement", - ) - space_normalise(M, target_ramp, "inf") - enforce_element_constraints( - M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 - ) - metric = RiemannianMetric(mesh) - metric.assign(M) - if not optimise: - metric_file.write(M) - - # Adapt the mesh and check for element count convergence - with PETSc.Log.Event("Mesh adaptation"): - mesh = adapt(mesh, metric) - if not optimise: - mesh_file.write(mesh.coordinates) - elements = mesh.num_cells() - print(f" Mesh {ct.fp_iteration+1}") - print(f" Element count = {elements}") - if ct.check_elements(elements): - break - ct.check_maxiter() -print(f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}") -print(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_adaptation_loop.py b/adaptation_one2n/run_adaptation_loop.py deleted file mode 100644 index 1c9f0e5..0000000 --- a/adaptation_one2n/run_adaptation_loop.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using goal-oriented -mesh adaptation in a fixed point iteration loop, for a sequence -of increasing target metric complexities, -""" -from nn_adapt.features import * -from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric_one2n import * -from nn_adapt.solving_one2n import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import adapt - -import importlib -import numpy as np -from time import perf_counter - - -set_log_level(ERROR) - -# Parse user input -parser = Parser("run_adaptation_loop.py") -parser.parse_num_refinements(default=24) -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_target_complexity() -parser.add_argument( - "--factor", - help="Power by which to increase target metric complexity", - type=positive_float, - default=0.25, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -num_refinements = parsed_args.num_refinements -base_complexity = parsed_args.base_complexity -f = parsed_args.factor - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit - -# Run adaptation loop -qois, dofs, elements, estimators, niter = [], [], [], [], [] -components = ("forward", "adjoint", "estimator", "metric", "adapt") -times = {c: [] for c in components} -times["all"] = [] -print(f"Test case {test_case}") -for i in range(num_refinements + 1): - print(f"\t{i} / {num_refinements}") - try: - target_complexity = 100.0 * 2 ** (f * i) - kwargs = { - "enrichment_method": "h", - "interpolant": "Clement", - "average": True, - "anisotropic": approach == "anisotropic", - "retall": True, - "h_min": setup.parameters.h_min, - "h_max": setup.parameters.h_max, - "a_max": 1.0e5, - } - if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh - else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - ct = ConvergenceTracker(mesh, parsed_args) - print(f" Target {target_complexity}\n Mesh 0") - print(f" Element count = {ct.elements_old}") - times["all"].append(-perf_counter()) - for c in components: - times[c].append(0.0) - for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - kwargs["target_complexity"] = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Compute goal-oriented metric - out = go_metric_one2n(mesh, setup, convergence_checker=ct, **kwargs) - qoi = out["qoi"] - times["forward"][-1] += out["times"]["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - if "adjoint" not in out: - break - estimator = out["estimator"] - times["adjoint"][-1] += out["times"]["adjoint"] - times["estimator"][-1] += out["times"]["estimator"] - print(f" Error estimator = {estimator}") - if "metric" not in out: - break - times["metric"][-1] += out["times"]["metric"] - fwd_sol, adj_sol = ( - out["forward"], - out["adjoint"], - ) - dwr, metric = out["dwr"], out["metric"] - dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol[-1]) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol[-1].split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Adapt the mesh - out["times"]["adapt"] = -perf_counter() - mesh = adapt(mesh, metric) - out["times"]["adapt"] += perf_counter() - times["adapt"][-1] += out["times"]["adapt"] - print(f" Mesh {ct.fp_iteration+1}") - cells = mesh.num_cells() - print(f" Element count = {cells}") - if ct.check_elements(cells): - break - ct.check_maxiter() - print( - f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" - ) - times["all"][-1] += perf_counter() - qois.append(qoi) - dofs.append(dof) - elements.append(cells) - estimators.append(estimator) - niter.append(ct.fp_iteration + 1) - np.save(f"{model}/data/qois_GO{approach}_{test_case}", qois) - np.save(f"{model}/data/dofs_GO{approach}_{test_case}", dofs) - np.save(f"{model}/data/elements_GO{approach}_{test_case}", elements) - np.save(f"{model}/data/estimators_GO{approach}_{test_case}", estimators) - np.save(f"{model}/data/niter_GO{approach}_{test_case}", niter) - np.save(f"{model}/data/times_all_GO{approach}_{test_case}", times["all"]) - for c in components: - np.save(f"{model}/data/times_{c}_GO{approach}_{test_case}", times[c]) - except ConvergenceError: - print("Skipping due to convergence error") - continue diff --git a/adaptation_one2n/run_adaptation_loop_ml.py b/adaptation_one2n/run_adaptation_loop_ml.py deleted file mode 100644 index fb8a1a6..0000000 --- a/adaptation_one2n/run_adaptation_loop_ml.py +++ /dev/null @@ -1,197 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` using data-driven -mesh adaptation in a fixed point iteration loop, for a sequence -of increasing target metric complexities, -""" -from nn_adapt.ann import * -from nn_adapt.features import * -from nn_adapt.parse import Parser, positive_float -from nn_adapt.metric_one2n import * -from nn_adapt.solving_one2n import * -from nn_adapt.utility import ConvergenceTracker -from firedrake.meshadapt import * - -import importlib -import numpy as np -from time import perf_counter - - -set_log_level(ERROR) - -# Parse user input -parser = Parser("run_adaptation_loop_ml.py") -parser.parse_num_refinements(default=24) -parser.parse_approach() -parser.parse_convergence_criteria() -parser.parse_preproc() -parser.parse_tag() -parser.parse_target_complexity() -parser.add_argument( - "--factor", - help="Power by which to increase target metric complexity", - type=positive_float, - default=0.25, -) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -approach = parsed_args.approach -num_refinements = parsed_args.num_refinements -preproc = parsed_args.preproc -tag = parsed_args.tag -base_complexity = parsed_args.base_complexity -f = parsed_args.factor - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit - -# Load the model -layout = importlib.import_module(f"{model}.network").NetLayout() -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -nn.load_state_dict(torch.load(f"{model}/model_{tag}.pt")) -nn.eval() - -# Run adaptation loop -qois, dofs, elements, estimators, niter = [], [], [], [], [] -components = ("forward", "adjoint", "estimator", "metric", "adapt") -times = {c: [] for c in components} -times["all"] = [] -print(f"Test case {test_case}") -for i in range(num_refinements + 1): - try: - target_complexity = 100.0 * 2 ** (f * i) - if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh - else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") - ct = ConvergenceTracker(mesh, parsed_args) - kwargs = {} - print(f" Target {target_complexity}\n Mesh 0") - print(f" Element count = {ct.elements_old}") - times["all"].append(-perf_counter()) - for c in components: - times[c].append(0.0) - for ct.fp_iteration in range(ct.maxiter + 1): - - # Ramp up the target complexity - target_ramp = ramp_complexity( - base_complexity, target_complexity, ct.fp_iteration - ) - - # Solve forward and adjoint and compute Hessians - out = get_solutions_one2n(mesh, setup, convergence_checker=ct, **kwargs) - qoi = out["qoi"] - times["forward"][-1] += out["times"]["forward"] - print(f" Quantity of Interest = {qoi} {unit}") - if "adjoint" not in out: - break - times["adjoint"][-1] += out["times"]["adjoint"] - fwd_sol, adj_sol = out["forward"], out["adjoint"] - dof = sum(np.array([fwd_sol[0].function_space().dof_count]).flatten()) - print(f" DoF count = {dof}") - - def proj(V): - """ - After the first iteration, project the previous - solution as the initial guess. - """ - ic = Function(V) - try: - ic.project(fwd_sol) - except NotImplementedError: - for c_init, c in zip(ic.split(), fwd_sol.split()): - c_init.project(c) - return ic - - # Use previous solution for initial guess - if parsed_args.transfer: - kwargs["init"] = proj - - # Extract features - out["times"]["estimator"] = -perf_counter() - fwd_sol_integrate = time_integrate(fwd_sol) - adj_sol_integrate = time_integrate(adj_sol) - features = extract_features(setup, fwd_sol_integrate, adj_sol_integrate) - features = collect_features_sample(features, layout) - - # Run model - test_targets = np.array([]) - with torch.no_grad(): - for i in range(features.shape[0]): - test_x = torch.Tensor(features[i]).to(device) - test_prediction = nn(test_x) - test_targets = np.concatenate( - (test_targets, np.array(test_prediction.cpu())) - ) - P0 = FunctionSpace(mesh, "DG", 0) - dwr = Function(P0) - dwr.dat.data[:] = np.abs(test_targets) - - # Check for error estimator convergence - estimator = dwr.vector().gather().sum() - out["times"]["estimator"] += perf_counter() - times["estimator"][-1] += out["times"]["estimator"] - print(f" Error estimator = {estimator}") - if ct.check_estimator(estimator): - break - - # Construct metric - out["times"]["metric"] = -perf_counter() - if approach == "anisotropic": - hessian = combine_metrics(*get_hessians(fwd_sol_integrate), average=True) - else: - hessian = None - P1_ten = TensorFunctionSpace(mesh, "CG", 1) - M = anisotropic_metric( - dwr, - hessian=hessian, - target_complexity=target_ramp, - target_space=P1_ten, - interpolant="Clement", - ) - space_normalise(M, target_ramp, "inf") - enforce_element_constraints( - M, setup.parameters.h_min, setup.parameters.h_max, 1.0e05 - ) - metric = RiemannianMetric(mesh) - metric.assign(M) - out["times"]["metric"] += perf_counter() - times["metric"][-1] += out["times"]["metric"] - - # Adapt the mesh and check for element count convergence - out["times"]["adapt"] = -perf_counter() - mesh = adapt(mesh, metric) - out["times"]["adapt"] += perf_counter() - times["adapt"][-1] += out["times"]["adapt"] - print(f" Mesh {ct.fp_iteration+1}") - cells = mesh.num_cells() - print(f" Element count = {cells}") - if ct.check_elements(cells): - break - ct.check_maxiter() - print( - f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" - ) - times["all"][-1] += perf_counter() - qois.append(qoi) - dofs.append(dof) - elements.append(cells) - estimators.append(estimator) - niter.append(ct.fp_iteration + 1) - np.save(f"{model}/data/qois_ML{approach}_{test_case}_{tag}", qois) - np.save(f"{model}/data/dofs_ML{approach}_{test_case}_{tag}", dofs) - np.save(f"{model}/data/elements_ML{approach}_{test_case}_{tag}", elements) - np.save(f"{model}/data/estimators_ML{approach}_{test_case}_{tag}", estimators) - np.save(f"{model}/data/niter_ML{approach}_{test_case}_{tag}", niter) - np.save(f"{model}/data/times_all_ML{approach}_{test_case}_{tag}", times["all"]) - for c in components: - np.save(f"{model}/data/times_{c}_ML{approach}_{test_case}_{tag}", times[c]) - except ConvergenceError: - print("Skipping due to convergence error") - continue diff --git a/adaptation_one2n/run_fixed_mesh.py b/adaptation_one2n/run_fixed_mesh.py deleted file mode 100644 index f8ad698..0000000 --- a/adaptation_one2n/run_fixed_mesh.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` on the initial mesh alone. -""" -from nn_adapt.parse import Parser -from nn_adapt.solving import * -from thetis import print_output -from firedrake.petsc import PETSc -import importlib -from time import perf_counter - - -start_time = perf_counter() - -# Parse user input -parser = Parser("run_fixed_mesh.py") -parser.parse_num_refinements(default=0) -parsed_args, unknown_args = parser.parse_known_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh -else: - mesh = Mesh(f"{model}/meshes/{test_case}.msh") -if parsed_args.num_refinements > 0: - with PETSc.Log.Event("Hierarchy"): - mesh = MeshHierarchy(mesh, parsed_args.num_refinements)[-1] - -# Solve and evaluate QoI -out = get_solutions(mesh, setup, solve_adjoint=not parsed_args.optimise) -qoi = out["qoi"] -print_output(f"QoI for test case {test_case} = {qoi:.8f} {unit}") -if not parsed_args.optimise: - File(f"{model}/outputs/{test_case}/fixed/forward.pvd").write( - *out["forward"].split() - ) - File(f"{model}/outputs/{test_case}/fixed/adjoint.pvd").write( - *out["adjoint"].split() - ) -print_output(f" Total time taken: {perf_counter() - start_time:.2f} seconds") diff --git a/adaptation_one2n/run_uniform_refinement.py b/adaptation_one2n/run_uniform_refinement.py deleted file mode 100644 index be3b3a8..0000000 --- a/adaptation_one2n/run_uniform_refinement.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Run a given ``test_case`` of a ``model`` on a sequence of -uniformly refined meshes generated from the initial mesh. -""" -from nn_adapt.parse import Parser -from nn_adapt.solving import * -from thetis import print_output -import importlib -import numpy as np -from time import perf_counter - - -start_time = perf_counter() - -# Parse user input -parser = Parser("run_uniform_refinement.py") -parser.parse_num_refinements(default=3) -parser.add_argument( - "--prolong", help="Use previous solution as initial guess", action="store_true" -) -parsed_args = parser.parse_args() -model = parsed_args.model -try: - test_case = int(parsed_args.test_case) - assert test_case > 0 -except ValueError: - test_case = parsed_args.test_case -num_refinements = parsed_args.num_refinements - -# Setup -setup = importlib.import_module(f"{model}.config") -setup.initialise(test_case) -unit = setup.parameters.qoi_unit -mesh = Mesh(f"{model}/meshes/{test_case}.msh") -mh = [mesh] + list(MeshHierarchy(mesh, num_refinements)) -tm = TransferManager() -kwargs = {} - -# Run uniform refinement -qois, dofs, elements, times, niter = [], [], [], [], [] -setup_time = perf_counter() - start_time -print_output(f"Test case {test_case}") -print_output(f"Setup time: {setup_time:.2f} seconds") -for i, mesh in enumerate(mh): - start_time = perf_counter() - print_output(f" Mesh {i}") - print_output(f" Element count = {mesh.num_cells()}") - out = get_solutions(mesh, setup, solve_adjoint=False, **kwargs) - qoi, fwd_sol = out["qoi"], out["forward"] - - def prolong(V): - """ - After the first iteration, prolong the previous - solution as the initial guess. - """ - ic = Function(V) - tm.prolong(fwd_sol, ic) - return ic - - if parsed_args.prolong: - kwargs["init"] = prolong - fs = fwd_sol.function_space() - time = perf_counter() - start_time - print_output(f" Quantity of Interest = {qoi} {unit}") - print_output(f" Runtime: {time:.2f} seconds") - qois.append(qoi) - dofs.append(sum(fs.dof_count)) - times.append(time) - elements.append(mesh.num_cells()) - niter.append(1) - np.save(f"{model}/data/qois_uniform_{test_case}", qois) - np.save(f"{model}/data/dofs_uniform_{test_case}", dofs) - np.save(f"{model}/data/elements_uniform_{test_case}", elements) - np.save(f"{model}/data/times_all_uniform_{test_case}", times) - np.save(f"{model}/data/niter_uniform_{test_case}", niter) -print_output(f"Setup time: {setup_time:.2f} seconds") diff --git a/adaptation_one2n/test_and_train.py b/adaptation_one2n/test_and_train.py deleted file mode 100644 index e3cfd9b..0000000 --- a/adaptation_one2n/test_and_train.py +++ /dev/null @@ -1,266 +0,0 @@ -""" -Train a network on ``num_training_cases`` problem -specifications of a given ``model``. -""" -from nn_adapt.ann import * -from nn_adapt.parse import argparse, bounded_float, nonnegative_int, positive_float, positive_int - -import git -import importlib -import numpy as np -import os -from sklearn import model_selection -from time import perf_counter -import torch.optim.lr_scheduler as lr_scheduler - - -# Configuration -pwd = os.path.abspath(os.path.dirname(__file__)) -models = [ - name for name in os.listdir(pwd) - if os.path.isdir(name) and name not in ("__pycache__", "models") -] -parser = argparse.ArgumentParser( - prog="test_and_train.py", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) -parser.add_argument( - "-m", - "--model", - help="The equation set being solved", - type=str, - choices=models, - default="steady_turbine", -) -parser.add_argument( - "-n", - "--num_training_cases", - help="The number of test cases to train on", - type=positive_int, - default=100, -) -parser.add_argument( - "-a", - "--approaches", - nargs="+", - help="Adaptive approaches to consider", - choices=["isotropic", "anisotropic"], - default=["anisotropic"], -) -parser.add_argument( - "--adaptation_steps", - help="Steps to learn from", - type=positive_int, - default=3, -) -parser.add_argument( - "--lr", - help="Initial learning rate", - type=positive_float, - default=1.0e-03, -) -parser.add_argument( - "--lr_adapt_num_steps", - help="Frequency of learning rate adaptation", - type=nonnegative_int, - default=0, -) -parser.add_argument( - "--lr_adapt_factor", - help="Learning rate reduction factor", - type=bounded_float(0, 1), - default=0.99, -) -parser.add_argument( - "--lr_adapt_threshold", - help="Learning rate threshold", - type=bounded_float(0, 1), - default=1.0e-04, -) -parser.add_argument( - "--lr_adapt_patience", - help="The number of iterations before early adapting the learning rate", - type=positive_int, - default=np.inf, -) -parser.add_argument( - "--num_epochs", - help="The number of iterations", - type=positive_int, - default=2000, -) -parser.add_argument( - "--stopping_patience", - help="The number of iterations before early stopping", - type=positive_int, - default=np.inf, -) -parser.add_argument( - "--preproc", - help="Data preprocess function", - type=str, - choices=["none", "arctan", "tanh", "logabs"], - default="arctan", -) -parser.add_argument( - "--batch_size", - help="Data points per training iteration", - type=positive_int, - default=500, -) -parser.add_argument( - "--test_batch_size", - help="Data points per validation iteration", - type=positive_int, - default=500, -) -parser.add_argument( - "--test_size", - help="Data proportion for validation", - type=bounded_float(0, 1), - default=0.3, -) -parser.add_argument( - "--seed", - help="Seed for random number generator", - type=positive_int, - default=42, -) -parser.add_argument( - "--tag", - help="Tag for labelling the model (defaults to current git sha)", - type=str, - default=git.Repo(search_parent_directories=True).head.object.hexsha, -) -parsed_args = parser.parse_args() -model = parsed_args.model -approaches = parsed_args.approaches -preproc = parsed_args.preproc -num_epochs = parsed_args.num_epochs -lr = parsed_args.lr -lr_adapt_num_steps = parsed_args.lr_adapt_num_steps -lr_adapt_factor = parsed_args.lr_adapt_factor -lr_adapt_threshold = parsed_args.lr_adapt_threshold -lr_adapt_patience = parsed_args.lr_adapt_patience -stopping_patience = parsed_args.stopping_patience -test_size = parsed_args.test_size -batch_size = parsed_args.batch_size -test_batch_size = parsed_args.test_batch_size -seed = parsed_args.seed -tag = parsed_args.tag - -# Load network layout -layout = importlib.import_module(f"{model}.network").NetLayout() - -# Setup model -nn = SingleLayerFCNN(layout, preproc=preproc).to(device) -optimizer = torch.optim.Adam(nn.parameters(), lr=lr) -scheduler1 = lr_scheduler.ReduceLROnPlateau( - optimizer, - factor=lr_adapt_factor, - threshold=lr_adapt_threshold, - patience=lr_adapt_patience, - verbose=True, -) -if lr_adapt_num_steps > 0: - scheduler2 = lr_scheduler.StepLR( - optimizer, - lr_adapt_num_steps, - gamma=lr_adapt_factor - ) -else: - scheduler2 = None -criterion = Loss() - -# Increase batch size if running on GPU -cuda = all(p.is_cuda for p in nn.parameters()) -print(f"Model parameters are{'' if cuda else ' not'} using GPU cores.") -if cuda: - dtype = torch.float32 - batch_size *= 4 - test_batch_size *= 4 -else: - dtype = torch.float - -# Load data -concat = lambda a, b: b if a is None else np.concatenate((a, b), axis=0) -features = None -targets = None -data_dir = f"{model}/data" -for step in range(parsed_args.adaptation_steps): - for approach in approaches: - for case in range(1, parsed_args.num_training_cases + 1): - if case == 1 and approach != approaches[0]: - continue - suffix = f"{case}_GO{approach}_{step}" - feature = { - key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") - for key in layout.inputs - } - features = concat(features, collect_features(feature)) - target = np.load(f"{data_dir}/target_{suffix}.npy") - targets = concat(targets, target) -print(f"Total number of features: {len(features.flatten())}") -print(f"Total number of targets: {len(targets)}") -features = torch.from_numpy(features).type(dtype) -targets = torch.from_numpy(targets).type(dtype) - -# Get train and validation datasets -xtrain, xval, ytrain, yval = model_selection.train_test_split( - features, targets, test_size=test_size, random_state=seed -) -train_data = torch.utils.data.TensorDataset(torch.Tensor(xtrain), torch.Tensor(ytrain)) -train_loader = torch.utils.data.DataLoader( - train_data, batch_size=batch_size, shuffle=True, num_workers=0 -) -validate_data = torch.utils.data.TensorDataset(torch.Tensor(xval), torch.Tensor(yval)) -validate_loader = torch.utils.data.DataLoader( - validate_data, batch_size=test_batch_size, shuffle=False, num_workers=0 -) - -# Train -train_losses, validation_losses, lr_adapt_steps = [], [], [] -set_seed(seed) -previous_loss = np.inf -trigger_times = 0 -for epoch in range(num_epochs): - - # Training step - start_time = perf_counter() - train = propagate(train_loader, nn, criterion, optimizer) - mid_time = perf_counter() - train_time = mid_time - start_time - - # Validation step - val = propagate(validate_loader, nn, criterion) - validation_time = perf_counter() - mid_time - - # Adapt learning rate - scheduler1.step(val) - if scheduler2 is not None: - scheduler2.step() - if epoch % lr_adapt_num_steps == 0: - lr_adapt_steps.append(epoch) - np.save(f"{model}/data/lr_adapt_steps_{tag}", lr_adapt_steps) - - # Stash progress - print( - f"Epoch {epoch:4d}/{num_epochs:d}" - f" avg loss: {train:.4e} / {val:.4e}" - f" wallclock: {train_time:.2f}s / {validation_time:.2f}s" - ) - train_losses.append(train) - validation_losses.append(val) - np.save(f"{model}/data/train_losses_{tag}", train_losses) - np.save(f"{model}/data/validation_losses_{tag}", validation_losses) - torch.save(nn.state_dict(), f"{model}/model_{tag}.pt") - - # Test for convergence - if val > previous_loss: - trigger_times += 1 - if trigger_times >= stopping_patience: - print("Early stopping") - break - else: - trigger_times = 0 - previous_loss = val diff --git a/examples/.DS_Store b/examples/.DS_Store index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0140f22951c2c1047468c65c7da5fa4c533fa75c 100644 GIT binary patch literal 6148 zcmeHKu};G<5Iu*Y7J;gcu;m9Bx-x|-2r)24LTr@KQjl6wHFRJ=%4aY!v$4~ak6_^w zkPsh$c;~Z1N?PcI5W17j&$-;i=VvQt6On0+J58bn5!IlK-U^x-!tYs2;yKG2(AaNu zX+&wi*Nje*lD8WEA_Kg3Rf=hhmFrPq{RTr$cX{K1x5gKbK@x^>KZ)_Cc2K|B-#dKV zp5@h9z-xHMyTy&R)y8M2PAOd?mt*SC6&<0^*~N5D7u1#ACH~%K^!Af~G1tla({(7h zemlk>`OJLLnBqLV(JRlRIzMxB|9o2ZDCTL z2{rDDVI&-O-}wa&GeHR_V@Jk!+{nh=P>eh27~i^+2?VuO29$wm2A1Tp!S#P<{{4SC zNS~AeW#C^iU@Bo0wlF1ETT7GUTI)mGP!{&f1oIGdY$=8>m*RD(71(|500W1aAS@6) N2v{1lQ3igMfe-5efm;9o delta 70 zcmZoMXfc=|#>AjHu~2NHo+1YW5HK<@2y7PQ5M$Xaz%q+@Gdl-A2T;LgMwai)llesy VIf3#F2_S`&O?Z?y$B3+81_0!d4M+e0 diff --git a/examples/burgers/network.py b/examples/burgers/network.py deleted file mode 100644 index b7f9907..0000000 --- a/examples/burgers/network.py +++ /dev/null @@ -1,43 +0,0 @@ -from nn_adapt.layout import NetLayoutBase - - -class NetLayout(NetLayoutBase): - """ - Default configuration - ===================== - - Input layer: - ------------ - [coarse-grained DWR] - + [viscosity coefficient] - + [element size] - + [element orientation] - + [element shape] - + [boundary element?] - + [12 forward DoFs per element] - + [12 adjoint DoFs per element] - = 30 - - Hidden layer: - ------------- - - 60 neurons - - Output layer: - ------------- - - [1 error indicator value] - """ - - inputs = ( - "estimator_coarse", - "physics_viscosity", - "mesh_d", - "mesh_h1", - "mesh_h2", - "mesh_bnd", - "forward_dofs", - "adjoint_dofs", - ) - num_hidden_neurons = 60 - dofs_per_element = 12 diff --git a/examples/burgers/testing_cases.txt b/examples/burgers/testing_cases.txt deleted file mode 100644 index 1549b67..0000000 --- a/examples/burgers/testing_cases.txt +++ /dev/null @@ -1 +0,0 @@ -demo diff --git a/examples/makefile b/examples/makefile index 2ae9714..000cac7 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,7 +3,7 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = steady_turbine +MODEL = pyroteus_turbine NUM_TRAINING_CASES = 1 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 @@ -89,8 +89,6 @@ clean: # --- Construct the neural network network: features train plot_progress plot_importance -train_one: train plot_progress plot_importance -test_one: snapshot_ml ml # Generate feature data # ===================== @@ -169,14 +167,6 @@ snapshot_go: echo "Goal-oriented snapshots generated in $$(($$(date +%s)-d)) seconds" >> timing.log echo "" >> timing.log -tessst: - for case in $(TESTING_CASES); do \ - for approach in $(APPROACHES); do \ - python3 a_tessst.py $(MODEL) $$case -a $$approach $(PETSC_OPTIONS); \ - done; \ - done && \ - - # Apply data-driven adaptation to the test cases # ============================================== # diff --git a/examples/models/burgers.py b/examples/models/burgers.py deleted file mode 100644 index 9debd1b..0000000 --- a/examples/models/burgers.py +++ /dev/null @@ -1,176 +0,0 @@ -from firedrake import * -from firedrake.petsc import PETSc -import nn_adapt.model -import nn_adapt.solving - -from firedrake_adjoint import * -from firedrake.adjoint import get_solve_blocks - - -class Parameters(nn_adapt.model.Parameters): - """ - Class encapsulating all parameters required for a simple - Burgers equation test case. - """ - - qoi_name = "right boundary integral" - qoi_unit = r"m\,s^{-1}" - - # Adaptation parameters - h_min = 1.0e-10 # Minimum metric magnitude - h_max = 1.0 # Maximum metric magnitude - - # Physical parameters - viscosity_coefficient = 0.0001 - initial_speed = 1.0 - - # Turbine parameters - turbine_diameter = 18.0 - turbine_width = None - turbine_coords = [] - thrust_coefficient = 0.8 - correct_thrust = True - - # Timestepping parameters - timestep = 0.05 - - solver_parameters = {} - adjoint_solver_parameters = {} - - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def drag(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - - Note that there isn't really a concept of bathymetry - for Burgers equation. It is kept constant and should - be ignored by the network. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(1.0) - - def viscosity(self, mesh): - """ - Compute the viscosity coefficient on the current `mesh`. - """ - P0_2d = FunctionSpace(mesh, "DG", 0) - return Function(P0_2d).assign(self.viscosity_coefficient) - - def ic(self, mesh): - """ - Initial condition - """ - x, y = SpatialCoordinate(mesh) - expr = self.initial_speed * sin(pi * x) - expy = self.initial_speed * cos(pi * y) + self.initial_speed * exp(x * y) - return as_vector([expr, 0]) - - -PETSc.Sys.popErrorHandler() -parameters = Parameters() - - -def get_function_space(mesh): - r""" - Construct the :math:`\mathbb P2` finite element space - used for the prognostic solution. - """ - return VectorFunctionSpace(mesh, "CG", 2) - - -class Solver(nn_adapt.solving.Solver): - """ - Solver object based on current mesh and state. - """ - - def __init__(self, mesh, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the current state / initial condition - """ - self.mesh = mesh - - # Collect parameters - dt = Constant(parameters.timestep) - nu = parameters.viscosity(mesh) - - # Define variational formulation - V = self.function_space - u = Function(V) - u_ = Function(V) - v = TestFunction(V) - self._form = ( - inner((u - u_) / dt, v) * dx - + inner(dot(u, nabla_grad(u)), v) * dx - + nu * inner(grad(u), grad(v)) * dx - ) - problem = NonlinearVariationalProblem(self._form, u) - - # Set initial condition - u_.project(parameters.ic(mesh)) - - # Create solver - self._solver = NonlinearVariationalSolver(problem) - self._solution = u - - @property - def function_space(self): - r""" - The :math:`\mathbb P2` finite element space. - """ - return get_function_space(self.mesh) - - @property - def form(self): - """ - The weak form of Burgers equation - """ - return self._form - - @property - def solution(self): - return self._solution - - def iterate(self, **kwargs): - """ - Take a single timestep of Burgers equation - """ - self._solver.solve() - - -def get_initial_condition(function_space): - """ - Compute an initial condition based on the initial - speed parameter. - """ - u = Function(function_space) - u.interpolate(parameters.ic(function_space.mesh())) - return u - - -def get_qoi(mesh): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - - def qoi(sol): - return inner(sol, sol) * ds(2) - - return qoi - - -# # Initial mesh for all test cases -# initial_mesh = UnitSquareMesh(30, 30) diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py index 2a8862d..0b4fd14 100644 --- a/examples/models/pyroteus_burgers.py +++ b/examples/models/pyroteus_burgers.py @@ -1,5 +1,6 @@ from firedrake import * -from pyroteus_adjoint import * +from pyroteus import * +import pyroteus.go_mesh_seq from firedrake.petsc import PETSc import nn_adapt.model @@ -110,7 +111,6 @@ def solver(index, ic): step = 0 while t < t_end - 1.0e-05: step += 1 - print(step) solve(F == 0, u, ad_block_tag="u") u_.assign(u) t += dt @@ -143,88 +143,40 @@ def time_integrated_qoi(t): PETSc.Sys.popErrorHandler() parameters = Parameters() -class pyroteus_burgers(): +def GoalOrientedMeshSeq(mesh, **kwargs): + fields = ["u"] - def __init__(self, meshes, ic, **kwargs): - - self.meshes = meshes - self.kwargs = kwargs - try: - self.nu = [parameters.viscosity(mesh) for mesh in meshes] - self.num_subintervals = len(meshes) - except: - self.nu = parameters.viscosity(meshes) - self.num_subintervals = 1 + try: + num_subintervals = len(mesh) + except: + num_subintervals = 1 + + # setup time steps and export steps + dt = 0.1 + steps_subintervals = 10 + end_time = num_subintervals * steps_subintervals * dt + timesteps_per_export = 1 - def setups(self): - - fields = ["u"] - - dt = 0.1 - steps_subintervals = 3 - end_time = self.num_subintervals * steps_subintervals * dt - - timesteps_per_export = 1 - - time_partition = TimePartition( - end_time, - self.num_subintervals, - dt, - fields, - timesteps_per_export=timesteps_per_export, + # setup pyroteus time_partition + time_partition = TimePartition( + end_time, + num_subintervals, + dt, + fields, + timesteps_per_export=timesteps_per_export, ) - - self._mesh_seq = GoalOrientedMeshSeq( - time_partition, - self.meshes, - get_function_spaces=get_function_spaces, - get_initial_condition=get_initial_condition, - get_form=get_form, - get_solver=get_solver, - get_qoi=get_qoi, - qoi_type="end_time", - ) - - - def iterate(self): - self.setups() - self._solutions, self._indicators = self._mesh_seq.indicate_errors( - enrichment_kwargs={"enrichment_method": "h"} - ) - - - def integrate(self, item): - result = [0 for _ in range(self._mesh_seq.num_subintervals)] - steps = self._mesh_seq.time_partition.timesteps_per_subinterval - - for id, list in enumerate(item): - for element in list: - result[id] += element - result[id] = product((result[id], 1/steps[id])) - - return result - - @property - def fwd_sol(self): - return self.integrate(self._solutions["u"]["forward"]) - - @property - def adj_sol(self): - return self.integrate(self._solutions["u"]["adjoint"]) - @property - def qoi(self): - return self._mesh_seq.J + mesh_seq = pyroteus.go_mesh_seq.GoalOrientedMeshSeq( + time_partition, + mesh, + get_function_spaces=get_function_spaces, + get_initial_condition=get_initial_condition, + get_form=get_form, + get_solver=get_solver, + get_qoi=get_qoi, + qoi_type="end_time", + ) + return mesh_seq - @property - def indicators(self): - return self.integrate(self._indicators) - - -mesh = [UnitSquareMesh(15, 15), UnitSquareMesh(12, 17)] -ic = 0 -demo = Solver_n4one(mesh, ic) - -demo.iterate() - +initial_mesh = UnitSquareMesh(30, 30) \ No newline at end of file diff --git a/examples/models/pyroteus_turbine.py b/examples/models/pyroteus_turbine.py index 28fa169..bc26513 100644 --- a/examples/models/pyroteus_turbine.py +++ b/examples/models/pyroteus_turbine.py @@ -1,6 +1,6 @@ from thetis import * from pyroteus import * -from pyroteus_adjoint import * +import pyroteus.go_mesh_seq import nn_adapt.model class Parameters(nn_adapt.model.Parameters): @@ -210,176 +210,138 @@ def viscosity(self, mesh): PETSc.Sys.popErrorHandler() parameters = Parameters() kwargs = {} - - -class Solver_one4n(): - - def __init__(self, mesh, **kwargs): - - fields = ["q"] - self.time_partition = TimeInterval(parameters.end_time, - parameters.time_steps, - fields, timesteps_per_export=1) - self.mesh = mesh - self.kwargs = kwargs - - - def setup(self): - def get_solver(mesh_seq): - - def solver(index, ic): - V = mesh_seq.function_spaces["q"][index] - q = ic["q"] - mesh_seq.form(index, {"q": (q, q)}) - u_init, eta_init = q.split() - mesh_seq._thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) - mesh_seq._thetis_solver.iterate(**self.kwargs) - - return {"q": mesh_seq._thetis_solver.fields.solution_2d} - - return solver - - - def get_form(mesh_seq): - def form(index, ic): - P = mesh_seq.time_partition - - bathymetry = parameters.bathymetry(mesh_seq[index]) - Cd = parameters.drag_coefficient - sp = self.kwargs.pop("solver_parameters", None) - - # Create solver object - mesh_seq._thetis_solver = solver2d.FlowSolver2d(mesh_seq[index], bathymetry) - options = mesh_seq._thetis_solver.options - options.element_family = "dg-cg" - options.timestep = P.timestep - options.simulation_export_time = P.timestep * P.timesteps_per_export[index] - options.simulation_end_time = P.end_time - options.swe_timestepper_type = "SteadyState" - options.swe_timestepper_options.solver_parameters = ( - sp or parameters.solver_parameters - ) - options.use_grad_div_viscosity_term = False - options.horizontal_viscosity = parameters.viscosity(mesh_seq[index]) - options.quadratic_drag_coefficient = Cd - options.use_lax_friedrichs_velocity = True - options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) - options.use_grad_depth_viscosity_term = False - options.no_exports = True - options.update(self.kwargs) - - # Apply boundary conditions - mesh_seq._thetis_solver.create_function_spaces() - P1v_2d = mesh_seq._thetis_solver.function_spaces.P1v_2d - u_inflow = interpolate(parameters.u_inflow(mesh_seq[index]), P1v_2d) - mesh_seq._thetis_solver.bnd_functions["shallow_water"] = { - 1: {"uv": u_inflow}, # inflow - 2: {"elev": Constant(0.0)}, # outflow - 3: {"un": Constant(0.0)}, # free-slip - 4: {"uv": Constant(as_vector([0.0, 0.0]))}, # no-slip - 5: {"elev": Constant(0.0), "un": Constant(0.0)} # weakly reflective - } - - # # Create tidal farm - options.tidal_turbine_farms = parameters.farm(mesh_seq[index]) - mesh_seq._thetis_solver.create_timestepper() - - # # Apply initial guess - # u_init, eta_init = ic["q"].split() - # thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) - - return mesh_seq._thetis_solver.timestepper.F - - return form - - - def get_function_space(mesh): - """ - Construct the (mixed) finite element space used for the - prognostic solution. - """ - P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) - P2_2d = get_functionspace(mesh, "CG", 2) - return {"q": P1v_2d * P2_2d} - - - def get_initial_condition(mesh_seq): - """ - Compute an initial condition based on the inflow velocity - and zero free surface elevation. - """ - V = mesh_seq.function_spaces["q"][0] - q = Function(V) - u, eta = q.split() - u.interpolate(parameters.ic(mesh_seq)) - return {"q": q} - - - def get_qoi(mesh_seq, solutions, index): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - dt = Constant(mesh_seq.time_partition[index].timestep) - rho = parameters.density - Ct = parameters.corrected_thrust_coefficient - At = parameters.swept_area - Cd = 0.5 * Ct * At * parameters.turbine_density(mesh_seq[index]) - tags = parameters.turbine_ids - sol = solutions["q"] - - def qoi(): - u, eta = split(sol) - return sum([rho * Cd * pow(dot(u, u), 1.5) * dx(tag) for tag in tags]) - - return qoi - - - self._mesh_seq = GoalOrientedMeshSeq( - self.time_partition, - self.mesh, - get_function_spaces=get_function_space, - get_initial_condition=get_initial_condition, - get_form=get_form, - get_solver=get_solver, - get_qoi=get_qoi, - qoi_type="end_time") - - - def iterate(self): - self.setup() - self._solutions, self._indicators = self._mesh_seq.indicate_errors( - enrichment_kwargs={"enrichment_method": "h"}) +def GoalOrientedMeshSeq(mesh, **kwargs): - def integrate(self, item): - result = [0 for _ in range(self._mesh_seq.num_subintervals)] - steps = self._mesh_seq.time_partition.timesteps_per_subinterval - - for id, list in enumerate(item): - for element in list: - result[id] += element - result[id] = product((result[id], 1/steps[id])) + fields = ["q"] + time_partition = TimeInterval(parameters.end_time, + parameters.time_steps, + fields, timesteps_per_export=1) + + + def get_solver(mesh_seq): + + def solver(index, ic): + V = mesh_seq.function_spaces["q"][index] + q = ic["q"] + mesh_seq.form(index, {"q": (q, q)}) + u_init, eta_init = q.split() + mesh_seq._thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) + mesh_seq._thetis_solver.iterate(**kwargs) - return result - - @property - def fwd_sol(self): - return self.integrate(self._solutions["u"]["forward"]) + return {"q": mesh_seq._thetis_solver.fields.solution_2d} + + return solver + + + def get_form(mesh_seq): + def form(index, ic): + P = mesh_seq.time_partition + + bathymetry = parameters.bathymetry(mesh_seq[index]) + Cd = parameters.drag_coefficient + sp = kwargs.pop("solver_parameters", None) + + # Create solver object + mesh_seq._thetis_solver = solver2d.FlowSolver2d(mesh_seq[index], bathymetry) + options = mesh_seq._thetis_solver.options + options.element_family = "dg-cg" + options.timestep = P.timestep + options.simulation_export_time = P.timestep * P.timesteps_per_export[index] + options.simulation_end_time = P.end_time + options.swe_timestepper_type = "SteadyState" + options.swe_timestepper_options.solver_parameters = ( + sp or parameters.solver_parameters + ) + options.use_grad_div_viscosity_term = False + options.horizontal_viscosity = parameters.viscosity(mesh_seq[index]) + options.quadratic_drag_coefficient = Cd + options.use_lax_friedrichs_velocity = True + options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) + options.use_grad_depth_viscosity_term = False + options.no_exports = True + options.update(kwargs) + + # Apply boundary conditions + mesh_seq._thetis_solver.create_function_spaces() + P1v_2d = mesh_seq._thetis_solver.function_spaces.P1v_2d + u_inflow = interpolate(parameters.u_inflow(mesh_seq[index]), P1v_2d) + mesh_seq._thetis_solver.bnd_functions["shallow_water"] = { + 1: {"uv": u_inflow}, # inflow + 2: {"elev": Constant(0.0)}, # outflow + 3: {"un": Constant(0.0)}, # free-slip + 4: {"uv": Constant(as_vector([0.0, 0.0]))}, # no-slip + 5: {"elev": Constant(0.0), "un": Constant(0.0)} # weakly reflective + } + + # # Create tidal farm + options.tidal_turbine_farms = parameters.farm(mesh_seq[index]) + mesh_seq._thetis_solver.create_timestepper() + + # # Apply initial guess + # u_init, eta_init = ic["q"].split() + # thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) + + return mesh_seq._thetis_solver.timestepper.F + + return form + + + def get_function_space(mesh): + """ + Construct the (mixed) finite element space used for the + prognostic solution. + """ + P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) + P2_2d = get_functionspace(mesh, "CG", 2) + return {"q": P1v_2d * P2_2d} + + + def get_initial_condition(mesh_seq): + """ + Compute an initial condition based on the inflow velocity + and zero free surface elevation. + """ + V = mesh_seq.function_spaces["q"][0] + q = Function(V) + u, eta = q.split() + u.interpolate(parameters.ic(mesh_seq)) + return {"q": q} + + + def get_qoi(mesh_seq, solutions, index): + """ + Extract the quantity of interest function from the :class:`Parameters` + object. + + It should have one argument - the prognostic solution. + """ + dt = Constant(mesh_seq.time_partition[index].timestep) + rho = parameters.density + Ct = parameters.corrected_thrust_coefficient + At = parameters.swept_area + Cd = 0.5 * Ct * At * parameters.turbine_density(mesh_seq[index]) + tags = parameters.turbine_ids + sol = solutions["q"] + + def qoi(): + u, eta = split(sol) + return sum([rho * Cd * pow(dot(u, u), 1.5) * dx(tag) for tag in tags]) + + return qoi - @property - def adj_sol(self): - return self.integrate(self._solutions["u"]["adjoint"]) - @property - def qoi(self): - return self._mesh_seq.J + mesh_seq = pyroteus.go_mesh_seq.GoalOrientedMeshSeq( + time_partition, + mesh, + get_function_spaces=get_function_space, + get_initial_condition=get_initial_condition, + get_form=get_form, + get_solver=get_solver, + get_qoi=get_qoi, + qoi_type="end_time") - @property - def indicators(self): - return self.integrate(self._indicators) + return mesh_seq diff --git a/examples/models/steady_turbine.py b/examples/models/steady_turbine.py deleted file mode 100644 index e44ad17..0000000 --- a/examples/models/steady_turbine.py +++ /dev/null @@ -1,341 +0,0 @@ -from thetis import * -import nn_adapt.model -import nn_adapt.solving -import numpy as np - - -class Parameters(nn_adapt.model.Parameters): - """ - Class encapsulating all parameters required for the tidal - farm modelling test case. - """ - - discrete = False - - qoi_name = "power output" - qoi_unit = "MW" - - # Adaptation parameters - h_min = 1.0e-08 - h_max = 500.0 - - tt_steps = 10 - - # Physical parameters - viscosity_coefficient = 0.5 - depth = 40.0 - drag_coefficient = Constant(0.0025) - inflow_speed = 5.0 - density = Constant(1030.0 * 1.0e-06) - - # Turbine parameters - turbine_diameter = 18.0 - turbine_width = None - turbine_coords = [] - thrust_coefficient = 0.8 - correct_thrust = True - - # Solver parameters - solver_parameters = { - "mat_type": "aij", - "snes_type": "newtonls", - "snes_linesearch_type": "bt", - "snes_rtol": 1.0e-08, - "snes_max_it": 100, - "ksp_type": "preonly", - "pc_type": "lu", - "pc_factor_mat_solver_type": "mumps", - } - adjoint_solver_parameters = solver_parameters - - @property - def num_turbines(self): - """ - Count the number of turbines based on the number - of coordinates. - """ - return len(self.turbine_coords) - - @property - def turbine_ids(self): - """ - Generate the list of turbine IDs, i.e. cell tags used - in the gmsh geometry file. - """ - if self.discrete: - return list(2 + np.arange(self.num_turbines, dtype=np.int32)) - else: - return ["everywhere"] - - @property - def footprint_area(self): - """ - Calculate the area of the turbine footprint in the horizontal. - """ - d = self.turbine_diameter - w = self.turbine_width or d - return d * w - - @property - def swept_area(self): - """ - Calculate the area swept by the turbine in the vertical. - """ - return pi * (0.5 * self.turbine_diameter) ** 2 - - @property - def cross_sectional_area(self): - """ - Calculate the cross-sectional area of the turbine footprint - in the vertical. - """ - return self.depth * self.turbine_diameter - - @property - def corrected_thrust_coefficient(self): - """ - Correct the thrust coefficient to account for the - fact that we use the velocity at the turbine, rather - than an upstream veloicity. - - See [Kramer and Piggott 2016] for details. - """ - Ct = self.thrust_coefficient - if not self.correct_thrust: - return Ct - At = self.swept_area - corr = 4.0 / (1.0 + sqrt(1.0 - Ct * At / self.cross_sectional_area)) ** 2 - return Ct * corr - - def bathymetry(self, mesh): - """ - Compute the bathymetry field on the current `mesh`. - """ - # NOTE: We assume a constant bathymetry field - P0_2d = get_functionspace(mesh, "DG", 0) - return Function(P0_2d).assign(parameters.depth) - - def u_inflow(self, mesh): - """ - Compute the inflow velocity based on the current `mesh`. - """ - # NOTE: We assume a constant inflow - return as_vector([self.inflow_speed, 0]) - - def ic(self, mesh): - """ - Initial condition. - """ - return self.u_inflow(mesh) - - def turbine_density(self, mesh): - """ - Compute the turbine density function on the current `mesh`. - """ - if self.discrete: - return Constant(1.0 / self.footprint_area, domain=mesh) - x, y = SpatialCoordinate(mesh) - r2 = self.turbine_diameter / 2 - r1 = r2 if self.turbine_width is None else self.turbine_width / 2 - - def bump(x0, y0, scale=1.0): - qx = ((x - x0) / r1) ** 2 - qy = ((y - y0) / r2) ** 2 - cond = And(qx < 1, qy < 1) - b = exp(1 - 1 / (1 - qx)) * exp(1 - 1 / (1 - qy)) - return conditional(cond, Constant(scale) * b, 0) - - bumps = 0 - for xy in self.turbine_coords: - bumps += bump(*xy, scale=1 / assemble(bump(*xy) * dx)) - return bumps - - def farm(self, mesh): - """ - Construct a dictionary of :class:`TidalTurbineFarmOptions` - objects based on the current `mesh`. - """ - Ct = self.corrected_thrust_coefficient - farm_options = TidalTurbineFarmOptions() - farm_options.turbine_density = self.turbine_density(mesh) - farm_options.turbine_options.diameter = self.turbine_diameter - farm_options.turbine_options.thrust_coefficient = Ct - return {farm_id: farm_options for farm_id in self.turbine_ids} - - def turbine_drag(self, mesh): - """ - Compute the contribution to the drag coefficient due to the - tidal turbine parametrisation on the current `mesh`. - """ - P0_2d = get_functionspace(mesh, "DG", 0) - p0test = TestFunction(P0_2d) - Ct = self.corrected_thrust_coefficient - At = self.swept_area - Cd = 0.5 * Ct * At * self.turbine_density(mesh) - return sum([p0test * Cd * dx(tag, domain=mesh) for tag in self.turbine_ids]) - - def drag(self, mesh, background=False): - r""" - Create a :math:`\mathbb P0` field for the drag on the current - `mesh`. - - :kwarg background: should we consider the background drag - alone, or should the turbine drag be included? - """ - P0_2d = get_functionspace(mesh, "DG", 0) - ret = Function(P0_2d) - - # Background drag - Cb = self.drag_coefficient - if background: - return ret.assign(Cb) - p0test = TestFunction(P0_2d) - expr = p0test * Cb * dx(domain=mesh) - - # Turbine drag - assemble(expr + self.turbine_drag(mesh), tensor=ret) - return ret - - def viscosity(self, mesh): - r""" - Create a :math:`\mathbb P0` field for the viscosity coefficient - on the current `mesh`. - """ - # NOTE: We assume a constant viscosity coefficient - P0_2d = get_functionspace(mesh, "DG", 0) - return Function(P0_2d).assign(self.viscosity_coefficient) - - -PETSc.Sys.popErrorHandler() -parameters = Parameters() - - -def get_function_space(mesh): - """ - Construct the (mixed) finite element space used for the - prognostic solution. - """ - P1v_2d = get_functionspace(mesh, "DG", 1, vector=True) - P2_2d = get_functionspace(mesh, "CG", 2) - return P1v_2d * P2_2d - - -class Solver(nn_adapt.solving.Solver): - """ - Set up a Thetis :class:`FlowSolver2d` object, based on - the current mesh and initial condition. - """ - - def __init__(self, mesh, ic, **kwargs): - """ - :arg mesh: the mesh to define the solver on - :arg ic: the initial condition - """ - self.mesh = mesh - - bathymetry = parameters.bathymetry(mesh) - Cd = parameters.drag_coefficient - sp = kwargs.pop("solver_parameters", None) - - # Create solver object - self._thetis_solver = solver2d.FlowSolver2d(mesh, bathymetry) - options = self._thetis_solver.options - options.element_family = "dg-cg" - options.timestep = 100.0 - options.simulation_export_time = 100.0 - options.simulation_end_time = options.timestep * parameters.tt_steps - options.simulation_end_time = 80.0 - options.swe_timestepper_type = "SteadyState" - options.swe_timestepper_options.solver_parameters = ( - sp or parameters.solver_parameters - ) - options.use_grad_div_viscosity_term = False - options.horizontal_viscosity = parameters.viscosity(mesh) - options.quadratic_drag_coefficient = Cd - options.use_lax_friedrichs_velocity = True - options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) - options.use_grad_depth_viscosity_term = False - options.no_exports = True - options.update(kwargs) - self._thetis_solver.create_equations() - - # Apply boundary conditions - P1v_2d = self._thetis_solver.function_spaces.P1v_2d - u_inflow = interpolate(parameters.u_inflow(mesh), P1v_2d) - self._thetis_solver.bnd_functions["shallow_water"] = { - 1: {"uv": u_inflow}, # inflow - 2: {"elev": Constant(0.0)}, # outflow - 3: {"un": Constant(0.0)}, # free-slip - 4: {"uv": Constant(as_vector([0.0, 0.0]))}, # no-slip - 5: {"elev": Constant(0.0), "un": Constant(0.0)} # weakly reflective - } - - # Create tidal farm - options.tidal_turbine_farms = parameters.farm(mesh) - - # Apply initial guess - u_init, eta_init = ic.split() - self._thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) - - @property - def function_space(self): - r""" - The :math:`\mathbb P1_{DG}-\mathbb P2` function space. - """ - return self._thetis_solver.function_spaces.V_2d - - @property - def form(self): - """ - The weak form of the shallow water equations. - """ - return self._thetis_solver.timestepper.F - - def save2file(self, items, file): - ee_file = File(file) - try: - for i in range(len(items)): - ee_file.write(*items[i].split()) - except: - ee_file.write(*items.split()) - - def iterate(self, **kwargs): - """ - Solve the nonlinear shallow water equations. - """ - self._thetis_solver.iterate(**kwargs) - - @property - def solution(self): - return self._thetis_solver.fields.solution_2d - - -def get_initial_condition(function_space): - """ - Compute an initial condition based on the inflow velocity - and zero free surface elevation. - """ - q = Function(function_space) - u, eta = q.split() - u.interpolate(parameters.ic(function_space.mesh())) - return q - - -def get_qoi(mesh): - """ - Extract the quantity of interest function from the :class:`Parameters` - object. - - It should have one argument - the prognostic solution. - """ - rho = parameters.density - Ct = parameters.corrected_thrust_coefficient - At = parameters.swept_area - Cd = 0.5 * Ct * At * parameters.turbine_density(mesh) - tags = parameters.turbine_ids - - def qoi(sol): - u, eta = split(sol) - return sum([rho * Cd * pow(dot(u, u), 1.5) * dx(tag) for tag in tags]) - - return qoi diff --git a/adaptation_n2n/.DS_Store b/examples/pyroteus_burgers/.DS_Store similarity index 95% rename from adaptation_n2n/.DS_Store rename to examples/pyroteus_burgers/.DS_Store index 6a931b9e14b9dd13623164089ed4b57dd30d02b5..416cd27cb6458b2ce8f7d2a7ee6310d096ec68bf 100644 GIT binary patch delta 164 zcmZoMXfc=&$(qVgoRdDej&W^03j+fK8$&JwP=p}^MCPO$1}Ep|768Q=7%qggqRZ#z vySOCf*a0}BHK11m!=Lq0H`v3^G!fov2NDFjp$WFTyqn6Y^C2F5$Ao7p-3@&f=Fs3Zaa diff --git a/examples/burgers/config.py b/examples/pyroteus_burgers/config.py similarity index 96% rename from examples/burgers/config.py rename to examples/pyroteus_burgers/config.py index 0bd51e3..9e84362 100644 --- a/examples/burgers/config.py +++ b/examples/pyroteus_burgers/config.py @@ -1,4 +1,4 @@ -from models.burgers import * +from models.pyroteus_burgers import * from nn_adapt.ann import sample_uniform import numpy as np diff --git a/adaptation_one2n/burgers_one2n/meshgen.py b/examples/pyroteus_burgers/meshgen.py similarity index 100% rename from adaptation_one2n/burgers_one2n/meshgen.py rename to examples/pyroteus_burgers/meshgen.py diff --git a/adaptation_n2n/burgers_n2n/network.py b/examples/pyroteus_burgers/network.py similarity index 100% rename from adaptation_n2n/burgers_n2n/network.py rename to examples/pyroteus_burgers/network.py diff --git a/adaptation_n2n/burgers_n2n/testing_cases.txt b/examples/pyroteus_burgers/testing_cases.txt similarity index 100% rename from adaptation_n2n/burgers_n2n/testing_cases.txt rename to examples/pyroteus_burgers/testing_cases.txt diff --git a/adaptation_one2n/.DS_Store b/examples/pyroteus_turbine/.DS_Store similarity index 95% rename from adaptation_one2n/.DS_Store rename to examples/pyroteus_turbine/.DS_Store index 5acd31712950cf5014acdc43701c916eb3dbb5d2..a90647e66c052593dad52702a4834eb068b0a487 100644 GIT binary patch delta 164 zcmZoMXfc=&$(qVgoRdDej&W^03j+fK8$&JwP=p}^MCPO$1}Ep|768Q=7&VRj(dBdV vU0jlK@{@q#9IP|XoOX{m;)tmxg`k>(43IsXP>b{(Hcw*a0}BHK11m!=Lq0 0, 0, 1)) -tricontourf(footprints, axes=axes, cmap="Blues", levels=[0, 1]) - -# Annotate with physical parameters -txt = r"""$\nu = 100.0$ -$b = 50.0$ -$u_{\mathrm{in}} = \widetilde{y}^2(1-\widetilde{y})^2$""" -xy = (940, 10) -axes.annotate(txt, xy=xy, bbox={"fc": "w"}) - -axes.axis(False) -plt.tight_layout() -plt.savefig("plots/pipe.pdf") diff --git a/examples/steady_turbine/plotting.py b/examples/steady_turbine/plotting.py deleted file mode 100644 index 295f10b..0000000 --- a/examples/steady_turbine/plotting.py +++ /dev/null @@ -1,83 +0,0 @@ -from firedrake import * -import matplotlib -import numpy as np - - -matplotlib.rcParams["font.size"] = 12 - - -def plot_config(config, mesh, axes): - """ - Plot a given configuration of a problem on a given - mesh and axes. - """ - tags = config.parameters.turbine_ids - P0 = FunctionSpace(mesh, "DG", 0) - footprints = assemble(sum(TestFunction(P0) * dx(tag) for tag in tags)) - footprints.interpolate(conditional(footprints > 0, 0, 1)) - triplot( - mesh, - axes=axes, - boundary_kw={"color": "dodgerblue"}, - interior_kw={"edgecolor": "w"}, - ) - tricontourf(footprints, axes=axes, cmap="Blues", levels=[0, 1]) - - # Bounding box - xmin = 0 - xmax = 1200 - ymin = 0 - ymax = 500 - eps = 5 - - # Adjust axes - W = assemble(Constant(1.0, domain=mesh) * ds(1)) - L = 0.5 * assemble( - Constant(1.0, domain=mesh) * ds(3) - ) # NOTE: both top and bottom are tagged as 3 - dL = 0.5 * (xmax - L) - dW = 0.5 * (ymax - W) - axes.axis(False) - axes.set_xlim([xmin - dL - eps, xmax - dL + eps]) - axes.set_ylim([ymin - dW - eps, ymax - dW + eps]) - - # Annotate with viscosity coefficient and bathymetry - nu = config.parameters.viscosity_coefficient - b = config.parameters.depth - u_in = config.parameters.inflow_speed - txt = r"$\nu$ = %.3f, $b$ = %.2f, $u_{\mathrm{in}}$ = %.2f" % (nu, b, u_in) - axes.annotate( - txt, xy=(0.025 * L, -0.25 * W), bbox={"fc": "w"}, annotation_clip=False - ) - - -def process_sensitivities(data, layout): - """ - Separate sensitivity experiment data by variable. - - :arg data: the output of `compute_importance.py` - :arg layout: the :class:`NetLayout` instance - """ - i = 0 - sensitivities = {} - dofs = {"u": 3, "v": 3, r"\eta": 6} - for label in ("estimator", "physics", "mesh", "forward", "adjoint"): - n = layout.count_inputs(label) - if n == 0: - continue - if label in ("forward", "adjoint"): - assert n == sum(dofs.values()) - for key, dof in dofs.items(): - S = np.zeros(6) - for j in range(dof): - S[j] = data[i + j] - l = (r"$%s$" if label == "forward" else r"$%s^*$") % key - sensitivities[l] = S - i += dof - else: - S = np.zeros(6) - for j in range(n): - S[j] = data[i + j] - i += n - sensitivities[label.capitalize()] = S - return sensitivities diff --git a/examples/steady_turbine/testing_cases.txt b/examples/steady_turbine/testing_cases.txt deleted file mode 100644 index 72c4630..0000000 --- a/examples/steady_turbine/testing_cases.txt +++ /dev/null @@ -1 +0,0 @@ -aligned diff --git a/examples/test_and_train.py b/examples/test_and_train.py index e3cfd9b..992bdcc 100644 --- a/examples/test_and_train.py +++ b/examples/test_and_train.py @@ -197,7 +197,7 @@ key: np.load(f"{data_dir}/feature_{key}_{suffix}.npy") for key in layout.inputs } - features = concat(features, collect_features(feature)) + features = concat(features, collect_features(feature, layout)) target = np.load(f"{data_dir}/target_{suffix}.npy") targets = concat(targets, target) print(f"Total number of features: {len(features.flatten())}") diff --git a/examples/turbine/.DS_Store b/examples/turbine/.DS_Store deleted file mode 100644 index f823c1fdaf801e254bff610ce47bbbd79077c8af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK!AiqG5S^`6Q;N`oipK@71tUd^cnMYU;6;q+L8UgPXfVx|CT)>Y$WedDPw{)4 z+1*M@^(G=^24>&x?7W113A;N0AR7H}7oY+F4mx3>gv~cb^W<|@u#qC7(9cLuFV5Y5 zn)T*#Iq*Lfpl`PfU5G(J2GifK2xkyM0^w{%Fig8atOAU_2i-VIGuQnPg{9*1%Br*K zl$|^ONe%t1mG#p`t9L`AOQqsq(r*PEjcU+aDdVQ}d_Yd~Qocce2zPbdNo1|{3ErkS)9Nd zcBWQ5@Ow$Dl1GdZV|pgd;_Ri{euz7R37L@O;61z3UKRDkve ziB9NQ%nj Date: Sat, 20 Aug 2022 09:16:55 +0100 Subject: [PATCH 10/13] solver: adapt multiple meshes, but could not make features or output --- examples/makefile | 2 +- examples/models/pyroteus_burgers.py | 4 ++-- examples/run_adapt.py | 29 +++++++++++++++++++++++------ nn_adapt/metric.py | 11 +++++------ 4 files changed, 31 insertions(+), 15 deletions(-) diff --git a/examples/makefile b/examples/makefile index 000cac7..fe8503a 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,7 +3,7 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = pyroteus_turbine +MODEL = pyroteus_burgers NUM_TRAINING_CASES = 1 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py index 0b4fd14..aa773ad 100644 --- a/examples/models/pyroteus_burgers.py +++ b/examples/models/pyroteus_burgers.py @@ -153,7 +153,7 @@ def GoalOrientedMeshSeq(mesh, **kwargs): # setup time steps and export steps dt = 0.1 - steps_subintervals = 10 + steps_subintervals = 3 end_time = num_subintervals * steps_subintervals * dt timesteps_per_export = 1 @@ -178,5 +178,5 @@ def GoalOrientedMeshSeq(mesh, **kwargs): ) return mesh_seq -initial_mesh = UnitSquareMesh(30, 30) +initial_mesh = [UnitSquareMesh(30, 30) for _ in range(2)] \ No newline at end of file diff --git a/examples/run_adapt.py b/examples/run_adapt.py index 3d5e0ba..7e34c3d 100644 --- a/examples/run_adapt.py +++ b/examples/run_adapt.py @@ -38,6 +38,7 @@ target_complexity = parsed_args.target_complexity optimise = parsed_args.optimise no_outputs = parsed_args.no_outputs or optimise +no_outputs = 1 if not no_outputs: from pyroteus.utility import File @@ -50,6 +51,11 @@ mesh = setup.initial_mesh else: mesh = Mesh(f"{model}/meshes/{test_case}.msh") + +try: + num_subinterval = len(mesh) +except: + num_subinterval = 1 # Run adaptation loop kwargs = { @@ -62,7 +68,7 @@ "h_max": setup.parameters.h_max, "a_max": 1.0e5, } -ct = ConvergenceTracker(mesh, parsed_args) +ct = ConvergenceTracker(mesh[0], parsed_args) if not no_outputs: output_dir = f"{model}/outputs/{test_case}/GO/{approach}" fwd_file = File(f"{output_dir}/forward.pvd") @@ -70,7 +76,7 @@ ee_file = File(f"{output_dir}/estimator.pvd") metric_file = File(f"{output_dir}/metric.pvd") mesh_file = File(f"{output_dir}/mesh.pvd") - mesh_file.write(mesh.coordinates) + # mesh_file.write(mesh.coordinates) print(f"Test case {test_case}") print(" Mesh 0") print(f" Element count = {ct.elements_old}") @@ -121,12 +127,23 @@ # Adapt the mesh and check for element count convergence with PETSc.Log.Event("Mesh adaptation"): - mesh = adapt(mesh, metric) + if num_subinterval == 1: + mesh = adapt(mesh, metric) + else: + for id in range(num_subinterval): + mesh[id] = adapt(mesh[id], metric[id]) if not no_outputs: mesh_file.write(mesh.coordinates) - elements = mesh.num_cells() - print(f" Mesh {ct.fp_iteration+1}") - print(f" Element count = {elements}") + + if num_subinterval == 1: + elements = mesh.num_cells() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element count = {elements}") + else: + elements_list = np.array([mesh_i.num_cells() for mesh_i in mesh]) + elements = elements_list.mean() + print(f" Mesh {ct.fp_iteration+1}") + print(f" Element list = {elements_list}") if ct.check_elements(elements): break ct.check_maxiter() diff --git a/nn_adapt/metric.py b/nn_adapt/metric.py index 79b8cdf..634e147 100644 --- a/nn_adapt/metric.py +++ b/nn_adapt/metric.py @@ -6,7 +6,6 @@ from nn_adapt.features import split_into_scalars from nn_adapt.solving import * from firedrake.meshadapt import RiemannianMetric -import numpy as np def get_hessians(f, **kwargs): @@ -89,7 +88,7 @@ def go_metric( # single mesh for whole time interval if num_subintervals == 1: - out["estimator"] = out["dwr"][0].vector().gather().sum() # FIXME: Only uses 0th + out["estimator"] = out["dwr"][0].vector().gather().sum() if convergence_checker is not None: if convergence_checker.check_estimator(out["estimator"]): return out @@ -98,7 +97,7 @@ def go_metric( else: out["estimator"] = [0 for _ in range(num_subintervals)] for id in range(num_subintervals): - out["estimator"][id] = out["dwr"][id].vector().gather().sum() # FIXME: Only uses 0th + out["estimator"][id] = out["dwr"][id].vector().gather().sum() if convergence_checker is not None: max_estimator = np.array(out["estimator"]).mean() if convergence_checker.check_estimator(max_estimator): @@ -109,13 +108,13 @@ def go_metric( if num_subintervals == 1: if anisotropic: field = list(out["forward"].keys())[0] - fwd = out["forward"][field][0] # FIXME: Only uses 0th + fwd = out["forward"][field][0] hessians = sum([get_hessians(sol) for sol in fwd], start=()) hessian = combine_metrics(*hessians, average=average) else: hessian = None metric = anisotropic_metric( - out["dwr"][0], # FIXME: Only uses 0th + out["dwr"][0], hessian=hessian, target_complexity=target_complexity, target_space=TensorFunctionSpace(mesh[0], "CG", 1), @@ -138,7 +137,7 @@ def go_metric( else: hessian = None metric = anisotropic_metric( - out["dwr"][0], # FIXME: Only uses 0th + out["dwr"][id], hessian=hessian, target_complexity=target_complexity, target_space=TensorFunctionSpace(mesh[id], "CG", 1), From c42a74b53b9702a029cab9df593142ca20c4a9d0 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Wed, 24 Aug 2022 14:43:52 +0100 Subject: [PATCH 11/13] some error, could not run run_adaptation_loop.py --- examples/compute_importance.py | 2 +- examples/makefile | 2 +- examples/models/pyroteus_burgers.py | 21 ++++--- examples/models/pyroteus_turbine.py | 16 ++---- examples/plot_importance.py | 2 +- examples/plot_progress.py | 2 +- examples/pyroteus_burgers/config.py | 14 +++-- examples/pyroteus_burgers/network.py | 2 +- examples/pyroteus_burgers/plotting.py | 83 +++++++++++++++++++++++++++ examples/run_adapt.py | 8 ++- examples/run_uniform_refinement.py | 5 +- nn_adapt/layout.py | 2 + 12 files changed, 126 insertions(+), 33 deletions(-) create mode 100644 examples/pyroteus_burgers/plotting.py diff --git a/examples/compute_importance.py b/examples/compute_importance.py index 3e2bf4c..2751ad4 100644 --- a/examples/compute_importance.py +++ b/examples/compute_importance.py @@ -20,7 +20,7 @@ "model", help="The model", type=str, - choices=["steady_turbine"], + choices=["steady_turbine", "pyroteus_burgers"], ) parser.add_argument( "num_training_cases", diff --git a/examples/makefile b/examples/makefile index fe8503a..1820ad1 100644 --- a/examples/makefile +++ b/examples/makefile @@ -4,7 +4,7 @@ all: setup network test APPROACHES = anisotropic MODEL = pyroteus_burgers -NUM_TRAINING_CASES = 1 +NUM_TRAINING_CASES = 100 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py index aa773ad..83a9d14 100644 --- a/examples/models/pyroteus_burgers.py +++ b/examples/models/pyroteus_burgers.py @@ -23,6 +23,10 @@ class Parameters(nn_adapt.model.Parameters): # Physical parameters viscosity_coefficient = 0.0001 initial_speed = 1.0 + + # Offset for creating more initial conditions + x_offset = 0 + y_offset = 0 # Timestepping parameters timestep = 0.05 @@ -62,8 +66,11 @@ def ic(self, mesh): Initial condition """ x, y = SpatialCoordinate(mesh) - expr = self.initial_speed * sin(pi * x) - return as_vector([expr, 0]) + # x_expr = self.initial_speed * sin(pi * x + self.x_offset) + # y_expr = self.initial_speed * cos(pi * y + self.y_offset) + x_expr = self.initial_speed * sin(pi * x) + y_expr = 0 + return as_vector([x_expr, y_expr]) def get_function_spaces(mesh): @@ -77,7 +84,7 @@ def form(index, solutions): dt = Constant(P.timesteps[index]) # Specify viscosity coefficient - nu = Constant(0.0001) + nu = parameters.viscosity_coefficient # Setup variational problem v = TestFunction(u.function_space()) @@ -121,8 +128,7 @@ def solver(index, ic): def get_initial_condition(mesh_seq): fs = mesh_seq.function_spaces["u"][0] - x, y = SpatialCoordinate(mesh_seq[0]) - return {"u": interpolate(as_vector([sin(pi * x), 0]), fs)} + return {"u": interpolate(parameters.ic(fs), fs)} def get_qoi(mesh_seq, solutions, index): def end_time_qoi(): @@ -153,7 +159,7 @@ def GoalOrientedMeshSeq(mesh, **kwargs): # setup time steps and export steps dt = 0.1 - steps_subintervals = 3 + steps_subintervals = 10 end_time = num_subintervals * steps_subintervals * dt timesteps_per_export = 1 @@ -178,5 +184,6 @@ def GoalOrientedMeshSeq(mesh, **kwargs): ) return mesh_seq -initial_mesh = [UnitSquareMesh(30, 30) for _ in range(2)] + +initial_mesh = UnitSquareMesh(30, 30) \ No newline at end of file diff --git a/examples/models/pyroteus_turbine.py b/examples/models/pyroteus_turbine.py index bc26513..a56a820 100644 --- a/examples/models/pyroteus_turbine.py +++ b/examples/models/pyroteus_turbine.py @@ -209,9 +209,8 @@ def viscosity(self, mesh): PETSc.Sys.popErrorHandler() parameters = Parameters() -kwargs = {} -def GoalOrientedMeshSeq(mesh, **kwargs): +def GoalOrientedMeshSeq(mesh): fields = ["q"] time_partition = TimeInterval(parameters.end_time, @@ -227,7 +226,7 @@ def solver(index, ic): mesh_seq.form(index, {"q": (q, q)}) u_init, eta_init = q.split() mesh_seq._thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) - mesh_seq._thetis_solver.iterate(**kwargs) + mesh_seq._thetis_solver.iterate() return {"q": mesh_seq._thetis_solver.fields.solution_2d} @@ -240,7 +239,6 @@ def form(index, ic): bathymetry = parameters.bathymetry(mesh_seq[index]) Cd = parameters.drag_coefficient - sp = kwargs.pop("solver_parameters", None) # Create solver object mesh_seq._thetis_solver = solver2d.FlowSolver2d(mesh_seq[index], bathymetry) @@ -250,9 +248,7 @@ def form(index, ic): options.simulation_export_time = P.timestep * P.timesteps_per_export[index] options.simulation_end_time = P.end_time options.swe_timestepper_type = "SteadyState" - options.swe_timestepper_options.solver_parameters = ( - sp or parameters.solver_parameters - ) + options.swe_timestepper_options.solver_parameters = parameters.solver_parameters options.use_grad_div_viscosity_term = False options.horizontal_viscosity = parameters.viscosity(mesh_seq[index]) options.quadratic_drag_coefficient = Cd @@ -260,7 +256,7 @@ def form(index, ic): options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) options.use_grad_depth_viscosity_term = False options.no_exports = True - options.update(kwargs) + options.update() # Apply boundary conditions mesh_seq._thetis_solver.create_function_spaces() @@ -277,10 +273,6 @@ def form(index, ic): # # Create tidal farm options.tidal_turbine_farms = parameters.farm(mesh_seq[index]) mesh_seq._thetis_solver.create_timestepper() - - # # Apply initial guess - # u_init, eta_init = ic["q"].split() - # thetis_solver.assign_initial_conditions(uv=u_init, elev=eta_init) return mesh_seq._thetis_solver.timestepper.F diff --git a/examples/plot_importance.py b/examples/plot_importance.py index 596d80f..d605068 100644 --- a/examples/plot_importance.py +++ b/examples/plot_importance.py @@ -19,7 +19,7 @@ "model", help="The model", type=str, - choices=["steady_turbine"], + choices=["steady_turbine", "pyroteus_burgers"], ) parser.add_argument( "num_training_cases", diff --git a/examples/plot_progress.py b/examples/plot_progress.py index 09482a3..ed8c5d2 100644 --- a/examples/plot_progress.py +++ b/examples/plot_progress.py @@ -18,7 +18,7 @@ "model", help="The model", type=str, - choices=["steady_turbine", "burgers"], + choices=["steady_turbine", "pyroteus_burgers"], ) parser.add_argument( "--tag", diff --git a/examples/pyroteus_burgers/config.py b/examples/pyroteus_burgers/config.py index 9e84362..a23c993 100644 --- a/examples/pyroteus_burgers/config.py +++ b/examples/pyroteus_burgers/config.py @@ -25,12 +25,18 @@ def initialise(case, discrete=False): # Random initial speed from 0.01 m/s to 6 m/s parameters.initial_speed = sample_uniform(0.01, 6.0) - # Random viscosity from 0.00001 m^2/s to 1 m^2/s - parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + # # Random viscosity from 0.00001 m^2/s to 1 m^2/s + # parameters.viscosity_coefficient = sample_uniform(0.1, 1.0) * 10 ** np.random.randint(-3, 1) + # Random viscosity from 0.001 m^2/s to 1 m^2/s + parameters.viscosity_coefficient = sample_uniform(0.01, 1.0) * 10 ** np.random.randint(-1, 1) + + # Random offset for initial conditions + parameters.x_offset = sample_uniform(0, 2*pi) + parameters.y_offset = sample_uniform(0, 2*pi) return elif "demo" in case: - parameters.viscosity_coefficient = 0.0001 - parameters.initial_speed = 1.0 + parameters.viscosity_coefficient = 0.001 + parameters.initial_speed = 1 else: raise ValueError(f"Test case {test_case} not recognised") diff --git a/examples/pyroteus_burgers/network.py b/examples/pyroteus_burgers/network.py index b7f9907..dcf3ba9 100644 --- a/examples/pyroteus_burgers/network.py +++ b/examples/pyroteus_burgers/network.py @@ -30,7 +30,7 @@ class NetLayout(NetLayoutBase): """ inputs = ( - "estimator_coarse", + # "estimator_coarse", "physics_viscosity", "mesh_d", "mesh_h1", diff --git a/examples/pyroteus_burgers/plotting.py b/examples/pyroteus_burgers/plotting.py new file mode 100644 index 0000000..b5b629a --- /dev/null +++ b/examples/pyroteus_burgers/plotting.py @@ -0,0 +1,83 @@ +from firedrake import * +import matplotlib +import numpy as np + + +matplotlib.rcParams["font.size"] = 12 + + +def plot_config(config, mesh, axes): + """ + Plot a given configuration of a problem on a given + mesh and axes. + """ + tags = config.parameters.turbine_ids + P0 = FunctionSpace(mesh, "DG", 0) + footprints = assemble(sum(TestFunction(P0) * dx(tag) for tag in tags)) + footprints.interpolate(conditional(footprints > 0, 0, 1)) + triplot( + mesh, + axes=axes, + boundary_kw={"color": "dodgerblue"}, + interior_kw={"edgecolor": "w"}, + ) + tricontourf(footprints, axes=axes, cmap="Blues", levels=[0, 1]) + + # Bounding box + xmin = 0 + xmax = 1200 + ymin = 0 + ymax = 500 + eps = 5 + + # Adjust axes + W = assemble(Constant(1.0, domain=mesh) * ds(1)) + L = 0.5 * assemble( + Constant(1.0, domain=mesh) * ds(3) + ) # NOTE: both top and bottom are tagged as 3 + dL = 0.5 * (xmax - L) + dW = 0.5 * (ymax - W) + axes.axis(False) + axes.set_xlim([xmin - dL - eps, xmax - dL + eps]) + axes.set_ylim([ymin - dW - eps, ymax - dW + eps]) + + # Annotate with viscosity coefficient and bathymetry + nu = config.parameters.viscosity_coefficient + b = config.parameters.depth + u_in = config.parameters.inflow_speed + txt = r"$\nu$ = %.3f, $b$ = %.2f, $u_{\mathrm{in}}$ = %.2f" % (nu, b, u_in) + axes.annotate( + txt, xy=(0.025 * L, -0.25 * W), bbox={"fc": "w"}, annotation_clip=False + ) + + +def process_sensitivities(data, layout): + """ + Separate sensitivity experiment data by variable. + + :arg data: the output of `compute_importance.py` + :arg layout: the :class:`NetLayout` instance + """ + i = 0 + sensitivities = {} + dofs = {"u": 3, "v": 3, r"\eta": 6} + for label in ("physics", "mesh", "forward", "adjoint"): + n = layout.count_inputs(label) + if n == 0: + continue + if label in ("forward", "adjoint"): + assert n == sum(dofs.values()) + for key, dof in dofs.items(): + S = np.zeros(6) + for j in range(dof): + S[j] = data[i + j] + l = (r"$%s$" if label == "forward" else r"$%s^*$") % key + sensitivities[l] = S + i += dof + else: + S = np.zeros(6) + for j in range(n): + S[j] = data[i + j] + i += n + sensitivities[label.capitalize()] = S + return sensitivities diff --git a/examples/run_adapt.py b/examples/run_adapt.py index 7e34c3d..b863190 100644 --- a/examples/run_adapt.py +++ b/examples/run_adapt.py @@ -38,7 +38,6 @@ target_complexity = parsed_args.target_complexity optimise = parsed_args.optimise no_outputs = parsed_args.no_outputs or optimise -no_outputs = 1 if not no_outputs: from pyroteus.utility import File @@ -68,7 +67,10 @@ "h_max": setup.parameters.h_max, "a_max": 1.0e5, } -ct = ConvergenceTracker(mesh[0], parsed_args) +if num_subinterval == 1: + ct = ConvergenceTracker(mesh, parsed_args) +else: + ct = ConvergenceTracker(mesh[0], parsed_args) if not no_outputs: output_dir = f"{model}/outputs/{test_case}/GO/{approach}" fwd_file = File(f"{output_dir}/forward.pvd") @@ -76,7 +78,7 @@ ee_file = File(f"{output_dir}/estimator.pvd") metric_file = File(f"{output_dir}/metric.pvd") mesh_file = File(f"{output_dir}/mesh.pvd") - # mesh_file.write(mesh.coordinates) + mesh_file.write(mesh.coordinates) print(f"Test case {test_case}") print(" Mesh 0") print(f" Element count = {ct.elements_old}") diff --git a/examples/run_uniform_refinement.py b/examples/run_uniform_refinement.py index be3b3a8..9d95858 100644 --- a/examples/run_uniform_refinement.py +++ b/examples/run_uniform_refinement.py @@ -59,12 +59,13 @@ def prolong(V): if parsed_args.prolong: kwargs["init"] = prolong - fs = fwd_sol.function_space() + spaces = [sol[0][0].function_space() for sol in fwd_sol.values()] time = perf_counter() - start_time print_output(f" Quantity of Interest = {qoi} {unit}") print_output(f" Runtime: {time:.2f} seconds") qois.append(qoi) - dofs.append(sum(fs.dof_count)) + dof = sum(np.array([fs.dof_count for fs in spaces]).flatten()) + dofs.append(dof) times.append(time) elements.append(mesh.num_cells()) niter.append(1) diff --git a/nn_adapt/layout.py b/nn_adapt/layout.py index 2ce6909..15e0651 100644 --- a/nn_adapt/layout.py +++ b/nn_adapt/layout.py @@ -13,6 +13,8 @@ class NetLayoutBase(object): for each of these parameters. """ + inputs = None + num_hidden_neurons = None # TODO: Allow more general networks colours = { From e5c0bec6b971336873e41629bd261566f18f7250 Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Sun, 28 Aug 2022 16:23:05 +0100 Subject: [PATCH 12/13] some fixes --- examples/makefile | 4 ++-- examples/models/pyroteus_burgers.py | 2 +- examples/models/pyroteus_turbine.py | 1 - examples/plot_convergence.py | 1 + examples/run_adaptation_loop.py | 7 ++++++- examples/run_adaptation_loop_ml.py | 7 ++++++- nn_adapt/solving.py | 4 ++-- 7 files changed, 18 insertions(+), 8 deletions(-) diff --git a/examples/makefile b/examples/makefile index 1820ad1..000cac7 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,8 +3,8 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = pyroteus_burgers -NUM_TRAINING_CASES = 100 +MODEL = pyroteus_turbine +NUM_TRAINING_CASES = 1 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py index 83a9d14..070f1d2 100644 --- a/examples/models/pyroteus_burgers.py +++ b/examples/models/pyroteus_burgers.py @@ -185,5 +185,5 @@ def GoalOrientedMeshSeq(mesh, **kwargs): return mesh_seq -initial_mesh = UnitSquareMesh(30, 30) +initial_mesh = lambda: UnitSquareMesh(30, 30) \ No newline at end of file diff --git a/examples/models/pyroteus_turbine.py b/examples/models/pyroteus_turbine.py index a56a820..00260e6 100644 --- a/examples/models/pyroteus_turbine.py +++ b/examples/models/pyroteus_turbine.py @@ -256,7 +256,6 @@ def form(index, ic): options.lax_friedrichs_velocity_scaling_factor = Constant(1.0) options.use_grad_depth_viscosity_term = False options.no_exports = True - options.update() # Apply boundary conditions mesh_seq._thetis_solver.create_function_spaces() diff --git a/examples/plot_convergence.py b/examples/plot_convergence.py index d8e2255..1df914f 100644 --- a/examples/plot_convergence.py +++ b/examples/plot_convergence.py @@ -71,6 +71,7 @@ if len(approaches.keys()) == 0: print("Nothing to plot.") sys.exit(0) +print(qois) # Drop first iteration because timings include compilation # FIXME: Why? dofs["uniform"] = dofs["uniform"][1:] diff --git a/examples/run_adaptation_loop.py b/examples/run_adaptation_loop.py index bfe72d0..91c508e 100644 --- a/examples/run_adaptation_loop.py +++ b/examples/run_adaptation_loop.py @@ -12,6 +12,7 @@ import importlib import numpy as np +from time import perf_counter set_log_level(ERROR) @@ -48,6 +49,7 @@ # Run adaptation loop qois, dofs, elements, estimators, niter = [], [], [], [], [] components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = [] print(f"Test case {test_case}") for i in range(num_refinements + 1): try: @@ -63,12 +65,13 @@ "a_max": 1.0e5, } if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh + mesh = setup.initial_mesh() else: mesh = Mesh(f"{model}/meshes/{test_case}.msh") ct = ConvergenceTracker(mesh, parsed_args) print(f" Target {target_complexity}\n Mesh 0") print(f" Element count = {ct.elements_old}") + times.append(-perf_counter()) for ct.fp_iteration in range(ct.maxiter + 1): # Ramp up the target complexity @@ -106,6 +109,7 @@ print( f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" ) + times[-1] += perf_counter() qois.append(qoi) dofs.append(dof) elements.append(cells) @@ -116,6 +120,7 @@ np.save(f"{model}/data/elements_GO{approach}_{test_case}", elements) np.save(f"{model}/data/estimators_GO{approach}_{test_case}", estimators) np.save(f"{model}/data/niter_GO{approach}_{test_case}", niter) + np.save(f"{model}/data/times_all_GO{approach}_{test_case}", times) except ConvergenceError: print("Skipping due to convergence error") continue diff --git a/examples/run_adaptation_loop_ml.py b/examples/run_adaptation_loop_ml.py index 1d58344..0011264 100644 --- a/examples/run_adaptation_loop_ml.py +++ b/examples/run_adaptation_loop_ml.py @@ -13,6 +13,7 @@ import importlib import numpy as np +from time import perf_counter set_log_level(ERROR) @@ -59,18 +60,20 @@ # Run adaptation loop qois, dofs, elements, estimators, niter = [], [], [], [], [] components = ("forward", "adjoint", "estimator", "metric", "adapt") +times = [] print(f"Test case {test_case}") for i in range(num_refinements + 1): try: target_complexity = 100.0 * 2 ** (f * i) if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh + mesh = setup.initial_mesh() else: mesh = Mesh(f"{model}/meshes/{test_case}.msh") ct = ConvergenceTracker(mesh, parsed_args) kwargs = {} print(f" Target {target_complexity}\n Mesh 0") print(f" Element count = {ct.elements_old}") + times.append(-perf_counter()) for ct.fp_iteration in range(ct.maxiter + 1): # Ramp up the target complexity @@ -148,6 +151,7 @@ print( f" Terminated after {ct.fp_iteration+1} iterations due to {ct.converged_reason}" ) + times[-1] += perf_counter() qois.append(qoi) dofs.append(dof) elements.append(cells) @@ -158,6 +162,7 @@ np.save(f"{model}/data/elements_ML{approach}_{test_case}_{tag}", elements) np.save(f"{model}/data/estimators_ML{approach}_{test_case}_{tag}", estimators) np.save(f"{model}/data/niter_ML{approach}_{test_case}_{tag}", niter) + np.save(f"{model}/data/times_all_ML{approach}_{test_case}_{tag}", times) except ConvergenceError: print("Skipping due to convergence error") continue diff --git a/nn_adapt/solving.py b/nn_adapt/solving.py index a05f645..5c2a2c3 100644 --- a/nn_adapt/solving.py +++ b/nn_adapt/solving.py @@ -36,7 +36,7 @@ def get_solutions( # NOTE: None of the timings will work! # Solve forward problem in base space - mesh_seq = config.GoalOrientedMeshSeq(mesh, **kwargs) + mesh_seq = config.GoalOrientedMeshSeq(mesh) solutions = mesh_seq.solve_adjoint() fields = mesh_seq.fields qoi = mesh_seq.J @@ -80,7 +80,7 @@ def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs) out = {} if not enrichment_method == "h": raise NotImplementedError # TODO - mesh_seq = config.GoalOrientedMeshSeq(mesh, **kwargs) + mesh_seq = config.GoalOrientedMeshSeq(mesh) fields = mesh_seq.fields kw = {"enrichment_method": enrichment_method} solutions, indicators = mesh_seq.indicate_errors(enrichment_kwargs=kw) From 0832cbd0e80aa4c986c722a68899ca6dc05c024f Mon Sep 17 00:00:00 2001 From: acse-xt221 Date: Tue, 6 Sep 2022 04:06:20 +0100 Subject: [PATCH 13/13] fix bugs --- examples/makefile | 4 ++-- examples/models/pyroteus_burgers.py | 19 +++++++++++++------ examples/pyroteus_burgers/config.py | 4 ++-- examples/pyroteus_burgers/network.py | 2 +- examples/run_adapt.py | 8 ++++---- examples/run_adapt_ml.py | 2 +- examples/run_uniform_refinement.py | 5 ++++- nn_adapt/solving.py | 11 ++++++++--- nn_adapt/utility.py | 10 ++++++++++ 9 files changed, 45 insertions(+), 20 deletions(-) diff --git a/examples/makefile b/examples/makefile index 000cac7..1820ad1 100644 --- a/examples/makefile +++ b/examples/makefile @@ -3,8 +3,8 @@ all: setup network test # --- Configurable parameters APPROACHES = anisotropic -MODEL = pyroteus_turbine -NUM_TRAINING_CASES = 1 +MODEL = pyroteus_burgers +NUM_TRAINING_CASES = 100 TESTING_CASES = $(shell cat $(MODEL)/testing_cases.txt) PETSC_OPTIONS = -dm_plex_metric_hausdorff_number 1 TAG = all diff --git a/examples/models/pyroteus_burgers.py b/examples/models/pyroteus_burgers.py index 070f1d2..45f66c0 100644 --- a/examples/models/pyroteus_burgers.py +++ b/examples/models/pyroteus_burgers.py @@ -29,7 +29,7 @@ class Parameters(nn_adapt.model.Parameters): y_offset = 0 # Timestepping parameters - timestep = 0.05 + timestep = 5 solver_parameters = {} adjoint_solver_parameters = {} @@ -67,7 +67,7 @@ def ic(self, mesh): """ x, y = SpatialCoordinate(mesh) # x_expr = self.initial_speed * sin(pi * x + self.x_offset) - # y_expr = self.initial_speed * cos(pi * y + self.y_offset) + # y_expr = self.initial_speed * sin(pi * y + self.y_offset) x_expr = self.initial_speed * sin(pi * x) y_expr = 0 return as_vector([x_expr, y_expr]) @@ -116,9 +116,10 @@ def solver(index, ic): dt = P.timesteps[index] t = t_start step = 0 + sp = {'snes_max_it': 100} while t < t_end - 1.0e-05: step += 1 - solve(F == 0, u, ad_block_tag="u") + solve(F == 0, u, ad_block_tag="u", solver_parameters=sp) u_.assign(u) t += dt return {"u": u} @@ -133,12 +134,18 @@ def get_initial_condition(mesh_seq): def get_qoi(mesh_seq, solutions, index): def end_time_qoi(): u = solutions["u"] - return inner(u, u) * ds(2) + fs = mesh_seq.function_spaces["u"][0] + x, y = SpatialCoordinate(fs) + partial = conditional(And(And(x > 0.1, x < 0.9), And(y > 0.45, y < 0.55)), 1, 0) + return inner(u, u) * dx def time_integrated_qoi(t): + fs = mesh_seq.function_spaces["u"][0] + x, y = SpatialCoordinate(fs) + partial = conditional(And(And(x > 0.7, x < 0.8), And(y > 0.45, y < 0.55)), 1, 0) dt = Constant(mesh_seq.time_partition[index].timestep) u = solutions["u"] - return dt * inner(u, u) * ds(2) + return dt * partial * inner(u, u) * dx if mesh_seq.qoi_type == "end_time": return end_time_qoi @@ -158,7 +165,7 @@ def GoalOrientedMeshSeq(mesh, **kwargs): num_subintervals = 1 # setup time steps and export steps - dt = 0.1 + dt = 1 steps_subintervals = 10 end_time = num_subintervals * steps_subintervals * dt timesteps_per_export = 1 diff --git a/examples/pyroteus_burgers/config.py b/examples/pyroteus_burgers/config.py index a23c993..62c7004 100644 --- a/examples/pyroteus_burgers/config.py +++ b/examples/pyroteus_burgers/config.py @@ -31,8 +31,8 @@ def initialise(case, discrete=False): parameters.viscosity_coefficient = sample_uniform(0.01, 1.0) * 10 ** np.random.randint(-1, 1) # Random offset for initial conditions - parameters.x_offset = sample_uniform(0, 2*pi) - parameters.y_offset = sample_uniform(0, 2*pi) + parameters.x_offset = sample_uniform(0, 1/2*pi) + parameters.y_offset = sample_uniform(0, 1/2*pi) return elif "demo" in case: parameters.viscosity_coefficient = 0.001 diff --git a/examples/pyroteus_burgers/network.py b/examples/pyroteus_burgers/network.py index dcf3ba9..5f437fb 100644 --- a/examples/pyroteus_burgers/network.py +++ b/examples/pyroteus_burgers/network.py @@ -16,7 +16,7 @@ class NetLayout(NetLayoutBase): + [boundary element?] + [12 forward DoFs per element] + [12 adjoint DoFs per element] - = 30 + = 29 Hidden layer: ------------- diff --git a/examples/run_adapt.py b/examples/run_adapt.py index b863190..252bba5 100644 --- a/examples/run_adapt.py +++ b/examples/run_adapt.py @@ -47,7 +47,7 @@ setup.initialise(test_case) unit = setup.parameters.qoi_unit if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh + mesh = setup.initial_mesh() else: mesh = Mesh(f"{model}/meshes/{test_case}.msh") @@ -114,14 +114,14 @@ for sol in adj_sol.values(): fields += sol[0][0].split() # FIXME: Only uses 0th adj_file.write(*fields) - ee_file.write(dwr[0]) # FIXME: Only uses 0th + ee_file.write(dwr[-1]) # FIXME: Only uses 0th metric_file.write(metric.function) # Extract features if not optimise: field = list(fwd_sol.keys())[0] # FIXME: Only uses 0th field - features = extract_features(setup, fwd_sol[field][0][0], adj_sol[field][0][0]) # FIXME - target = dwr[0].dat.data.flatten() # FIXME: Only uses 0th + features = extract_features(setup, fwd_sol[field][0][-1], adj_sol[field][0][-1]) # FIXME + target = dwr[-1].dat.data.flatten() # FIXME: Only uses 0th assert not np.isnan(target).any() for key, value in features.items(): np.save(f"{data_dir}/feature_{key}_{suffix}", value) diff --git a/examples/run_adapt_ml.py b/examples/run_adapt_ml.py index ccde295..f15c166 100644 --- a/examples/run_adapt_ml.py +++ b/examples/run_adapt_ml.py @@ -43,7 +43,7 @@ setup.initialise(test_case) unit = setup.parameters.qoi_unit if hasattr(setup, "initial_mesh"): - mesh = setup.initial_mesh + mesh = setup.initial_mesh() else: mesh = Mesh(f"{model}/meshes/{test_case}.msh") diff --git a/examples/run_uniform_refinement.py b/examples/run_uniform_refinement.py index 9d95858..cc58ca6 100644 --- a/examples/run_uniform_refinement.py +++ b/examples/run_uniform_refinement.py @@ -31,7 +31,10 @@ setup = importlib.import_module(f"{model}.config") setup.initialise(test_case) unit = setup.parameters.qoi_unit -mesh = Mesh(f"{model}/meshes/{test_case}.msh") +if hasattr(setup, "initial_mesh"): + mesh = setup.initial_mesh() +else: + mesh = Mesh(f"{model}/meshes/{test_case}.msh") mh = [mesh] + list(MeshHierarchy(mesh, num_refinements)) tm = TransferManager() kwargs = {} diff --git a/nn_adapt/solving.py b/nn_adapt/solving.py index 5c2a2c3..ccb9876 100644 --- a/nn_adapt/solving.py +++ b/nn_adapt/solving.py @@ -36,7 +36,7 @@ def get_solutions( # NOTE: None of the timings will work! # Solve forward problem in base space - mesh_seq = config.GoalOrientedMeshSeq(mesh) + mesh_seq = config.GoalOrientedMeshSeq(mesh, **kwargs) solutions = mesh_seq.solve_adjoint() fields = mesh_seq.fields qoi = mesh_seq.J @@ -63,7 +63,7 @@ def split_into_components(f): return [f] if f.function_space().value_size == 1 else f.split() -def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs): +def indicate_errors(mesh, config, enrichment_method="h", retall=False, convergence_checker = None, **kwargs): """ Indicate errors according to the ``GoalOrientedMeshSeq`` given in the configuration file. @@ -80,7 +80,7 @@ def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs) out = {} if not enrichment_method == "h": raise NotImplementedError # TODO - mesh_seq = config.GoalOrientedMeshSeq(mesh) + mesh_seq = config.GoalOrientedMeshSeq(mesh, **kwargs) fields = mesh_seq.fields kw = {"enrichment_method": enrichment_method} solutions, indicators = mesh_seq.indicate_errors(enrichment_kwargs=kw) @@ -96,4 +96,9 @@ def indicate_errors(mesh, config, enrichment_method="h", retall=False, **kwargs) out["forward"] = {f: solutions[f]["forward"] for f in fields} out["adjoint"] = {f: solutions[f]["adjoint"] for f in fields} out["dwr"] = integrated + if convergence_checker is not None: + if convergence_checker.check_qoi(qoi): + return out + else: + print("No convergence checker") return out if retall else out["dwr"] diff --git a/nn_adapt/utility.py b/nn_adapt/utility.py index 229de16..3668333 100644 --- a/nn_adapt/utility.py +++ b/nn_adapt/utility.py @@ -37,6 +37,15 @@ def _chk(self, val, old, rtol, reason): self.converged_reason = reason converged = True return converged + + def qoi_abs_chk(self, val, old, reason): + print("checking") + converged = False + if old is not None and self.fp_iteration >= self.miniter: + if abs(val - old) < 1e-8: + self.converged_reason = reason+"absolute error" + converged = True + return converged def check_qoi(self, val): """ @@ -44,6 +53,7 @@ def check_qoi(self, val): """ r = "QoI convergence" converged = self._chk(val, self.qoi_old, self.qoi_rtol, r) + # converged = self.qoi_abs_chk(val, self.qoi_old, r) self.qoi_old = val return converged