diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 37a4ab39ac..48fd5518c6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -58,6 +58,9 @@ jobs: - name: Build Firedrake run: | cd .. + # DO NOT MERGE: just until we rebuild the Docker container + sudo apt-get update + sudo apt-get -y install ninja-build # Linting should ignore unquoted shell variable $COMPLEX # shellcheck disable=SC2086 ./firedrake/scripts/firedrake-install \ @@ -84,6 +87,9 @@ jobs: --install defcon \ --install gadopt \ --install asQ \ + --package-branch fiat connorjward/numpy2 \ + --package-branch pyop2 connorjward/numpy2 \ + --package-branch tsfc connorjward/numpy2 \ || (cat firedrake-install.log && /bin/false) - name: Install test dependencies run: | diff --git a/docker/Dockerfile.env b/docker/Dockerfile.env index 6f3d17251f..a839d45f3e 100644 --- a/docker/Dockerfile.env +++ b/docker/Dockerfile.env @@ -13,7 +13,7 @@ RUN apt-get update \ cmake gfortran git libopenblas-serial-dev \ libtool python3-dev python3-pip python3-tk python3-venv \ python3-requests zlib1g-dev libboost-dev sudo gmsh \ - bison flex \ + bison flex ninja-build \ liboce-ocaf-dev \ swig graphviz \ libcurl4-openssl-dev libxml2-dev \ diff --git a/docs/source/download.rst b/docs/source/download.rst index 7037f49cc2..d10fa49900 100644 --- a/docs/source/download.rst +++ b/docs/source/download.rst @@ -135,6 +135,7 @@ they have the system dependencies: * CMake * zlib * flex, bison +* Ninja Firedrake has been successfully installed on Windows 10 using the Windows Subsystem for Linux. There are more detailed instructions for diff --git a/firedrake/checkpointing.py b/firedrake/checkpointing.py index e207b84a14..c52e6c7bc4 100644 --- a/firedrake/checkpointing.py +++ b/firedrake/checkpointing.py @@ -1359,7 +1359,7 @@ def _load_function_topology(self, tmesh, element, tf_name, idx=None): if element.family() == "Real": assert not isinstance(element, (finat.ufl.VectorElement, finat.ufl.TensorElement)) value = self.get_attr(path, "_".join([PREFIX, "value" if idx is None else "value_" + str(idx)])) - tf.dat.data.itemset(value) + tf.dat.data[...] = value else: if path in self.h5pyfile: timestepping = self.has_attr(os.path.join(path, tf.name()), "timestepping") diff --git a/firedrake/cython/dmcommon.pyx b/firedrake/cython/dmcommon.pyx index d3c68b5cc2..e03439f5ce 100644 --- a/firedrake/cython/dmcommon.pyx +++ b/firedrake/cython/dmcommon.pyx @@ -2,6 +2,7 @@ # Utility functions common to all DMs used in Firedrake import functools +import math import cython import numpy as np import firedrake @@ -675,7 +676,7 @@ def closure_ordering(PETSc.DM dm, incident = 1 break if incident == 0: - face_indices[nfaces] += v * 10**(1-fi) + face_indices[nfaces] += v * 10**(1-fi) fi += 1 nfaces += 1 @@ -970,7 +971,7 @@ cdef inline PetscInt _compute_orientation_simplex(PetscInt *fiat_cone, coneSize1 -= 1 assert n == coneSize for k in range(n): - o += np.math.factorial(n - 1 - k) * inds[k] + o += math.factorial(n - 1 - k) * inds[k] CHKERR(PetscFree(cone1)) CHKERR(PetscFree(inds)) return o @@ -1019,10 +1020,10 @@ cdef inline PetscInt _compute_orientation_interval_tensor_product(PetscInt *fiat # io += (2**(dim - 1 - i)) * 0 pass elif plex_cone_copy[2 * j + 1] == fiat_cone[2 * i] and plex_cone_copy[2 * j] == fiat_cone[2 * i + 1]: - io += (2**(dim - 1 - i)) * 1 + io += (2**(dim - 1 - i)) * 1 else: raise RuntimeError("Found inconsistent fiat_cone and plex_cone") - eo += np.math.factorial(dim - 1 - i) * j + eo += math.factorial(dim - 1 - i) * j for k in range(j, dim1 - 1): plex_cone_copy[2 * k] = plex_cone_copy[2 * k + 2] plex_cone_copy[2 * k + 1] = plex_cone_copy[2 * k + 3] @@ -1031,7 +1032,7 @@ cdef inline PetscInt _compute_orientation_interval_tensor_product(PetscInt *fiat else: raise RuntimeError("Found inconsistent fiat_cone and plex_cone") assert dim1 == 0 - return (2**dim) * eo + io + return (2**dim) * eo + io cdef inline PetscInt _compute_orientation(PETSc.DM dm, @@ -2438,7 +2439,7 @@ cdef struct CommFacet: PetscInt global_u, global_v PetscInt local_facet -cdef int CommFacet_cmp(const void *x_, const void *y_) nogil: +cdef int CommFacet_cmp(const void *x_, const void *y_) noexcept nogil: """Three-way comparison C function for CommFacet structs.""" cdef: CommFacet *x = x_ @@ -3215,7 +3216,7 @@ cdef int DMPlexGetAdjacency_Facet_Support(PETSc.PetscDM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[], - void *ctx) nogil: + void *ctx) noexcept nogil: """Custom adjacency callback for halo growth. :arg dm: The DMPlex object. diff --git a/firedrake/cython/mgimpl.pyx b/firedrake/cython/mgimpl.pyx index bd7f9d13f8..2867b5e79f 100644 --- a/firedrake/cython/mgimpl.pyx +++ b/firedrake/cython/mgimpl.pyx @@ -254,7 +254,7 @@ def coarse_to_fine_cells(mc, mf, clgmaps, flgmaps): cdm = mc.topology_dm fdm = mf.topology_dm dim = cdm.getDimension() - nref = 2 ** dim + nref = 2 ** dim ncoarse = mc.cell_set.size nfine = mf.cell_set.size co2n, _ = get_entity_renumbering(cdm, mc._cell_numbering, "cell") diff --git a/firedrake/cython/petschdr.pxi b/firedrake/cython/petschdr.pxi index b64a5620f5..f5e4bb2d53 100644 --- a/firedrake/cython/petschdr.pxi +++ b/firedrake/cython/petschdr.pxi @@ -175,7 +175,7 @@ cdef inline int SETERR(int ierr) with gil: PyErr_SetObject(PyExc_RuntimeError, ierr) return ierr -cdef inline int CHKERR(int ierr) nogil except -1: +cdef inline int CHKERR(int ierr) except -1 nogil: if ierr == 0: return 0 # no error else: diff --git a/firedrake/mesh.py b/firedrake/mesh.py index a09092fe62..2a24ebd006 100644 --- a/firedrake/mesh.py +++ b/firedrake/mesh.py @@ -21,7 +21,7 @@ from pyop2.mpi import ( MPI, COMM_WORLD, internal_comm, is_pyop2_comm, temp_internal_comm ) -from pyop2.utils import as_tuple, tuplify +from pyop2.utils import as_tuple import firedrake.cython.dmcommon as dmcommon import firedrake.cython.extrusion_numbering as extnum @@ -1444,7 +1444,14 @@ def make_dofs_per_plex_entity(self, entity_dofs): dofs_per_entity = np.zeros((1 + self._base_mesh.cell_dimension(), 2), dtype=IntType) for (b, v), entities in entity_dofs.items(): dofs_per_entity[b, v] += len(entities[0]) - return tuplify(dofs_per_entity) + + # Convert to a tuple of tuples with int (not numpy.intXX) values. This is + # to give us a string representation like ((0, 1), (2, 3)) instead of + # ((numpy.int32(0), numpy.int32(1)), (numpy.int32(2), numpy.int32(3))). + return tuple( + tuple(int(d_) for d_ in d) + for d in dofs_per_entity + ) @PETSc.Log.EventDecorator() def node_classes(self, nodes_per_entity, real_tensorproduct=False): diff --git a/firedrake/preconditioners/patch.py b/firedrake/preconditioners/patch.py index e73c41b129..ff7bf4e01a 100644 --- a/firedrake/preconditioners/patch.py +++ b/firedrake/preconditioners/patch.py @@ -816,10 +816,13 @@ def initialize(self, obj): is_snes = False if len(bcs) > 0: - ghost_bc_nodes = numpy.unique(numpy.concatenate([bcdofs(bc, ghost=True) - for bc in bcs])) - global_bc_nodes = numpy.unique(numpy.concatenate([bcdofs(bc, ghost=False) - for bc in bcs])) + ghost_bc_nodes = numpy.unique( + numpy.concatenate([bcdofs(bc, ghost=True) for bc in bcs], + dtype=PETSc.IntType) + ) + global_bc_nodes = numpy.unique( + numpy.concatenate([bcdofs(bc, ghost=False) for bc in bcs], + dtype=PETSc.IntType)) else: ghost_bc_nodes = numpy.empty(0, dtype=PETSc.IntType) global_bc_nodes = numpy.empty(0, dtype=PETSc.IntType) diff --git a/scripts/firedrake-install b/scripts/firedrake-install index d17b3df2ff..f6083d7914 100755 --- a/scripts/firedrake-install +++ b/scripts/firedrake-install @@ -686,7 +686,6 @@ def check_output(args): raise -pyinstall = [python, "setup.py", "install"] if "PYTHONPATH" in os.environ and not args.honour_pythonpath: quit("""The PYTHONPATH environment variable is set. This is probably an error. If you really want to use your own Python packages, please run again with the @@ -993,14 +992,10 @@ def run_pip_install(pipargs): # subprocesses wrote out. # Particularly important for debugging petsc fails. with environment(**blas): - pipargs = ["-vvv"] + pipargs + pipargs = ["-v"] + pipargs check_call(pipinstall + pipargs) -def run_cmd(args): - check_call(args) - - def get_requirements(reqfname): with open(reqfname, "r") as f: reqs = f.readlines() @@ -1013,14 +1008,6 @@ def run_pip_install_wrap(reqs, parallel_compiler_env): if package_name in parallel_packages: with environment(**parallel_compiler_env): run_pip_install([req]) - elif package_name == "numpy": - # Downgrade setuptools and wheel for numpy - run_pip(["install", "-U", "setuptools==59.2.0"]) - run_pip(["install", "-U", "wheel==0.37.0"]) - run_pip_install(["numpy==1.24"]) - # Upgrade setuptools and wheel for everything else - run_pip(["install", "-U", "setuptools"]) - run_pip(["install", "-U", "wheel"]) else: run_pip_install([req]) @@ -1347,8 +1334,9 @@ def build_and_install_torch(): raise InstallError("CUDA installation is not available on MacOS.") if sys.version_info >= (3, 10): run_pip_install(["typing_extensions"]) - extra_index_url = ["--extra-index-url", "https://download.pytorch.org/whl/cpu"] if args.torch == "cpu" else [] - run_pip_install(["torch"] + extra_index_url) + # FIXME: use nightly to see if that makes numpy 2.0 happy + extra_index_url = ["--extra-index-url", "https://download.pytorch.org/whl/nightly/cpu"] if args.torch == "cpu" else [] + run_pip_install(["--pre", "torch"] + extra_index_url) def build_and_install_slepc(): @@ -1567,6 +1555,7 @@ if mode == "install" or not args.update_script: "make", "automake", "cmake", + "ninja", "libtool", "boost"] if args.with_blas is None and mode == "install": @@ -1585,6 +1574,7 @@ if mode == "install" or not args.update_script: "pkgconf", # for p4est "libtool", "libxml2-dev", + "ninja-build", # for meson/numpy "python3-dev", "python3-pip", "python3-tk", @@ -1724,14 +1714,19 @@ if mode == "install": os.environ["VIRTUAL_ENV"] = firedrake_env # Ensure pip, setuptools, hatchling and wheel are at the latest version. -# numpy requires setuptools==59.2.0 and wheel==0.37.0 until it moves to meson build -run_pip(["install", "-U", "setuptools==59.2.0"]) +run_pip(["install", "-U", "setuptools"]) run_pip(["install", "-U", "hatch"]) run_pip(["install", "-U", "pip"]) -run_pip(["install", "-U", "wheel==0.37.0"]) +run_pip(["install", "-U", "wheel"]) + +# Extra numpy dependendencies, see +# https://github.com/numpy/numpy/blob/main/pyproject.toml +run_pip(["install", "-U", "meson-python>=0.15.0"]) +run_pip(["install", "-U", "Cython>=3.0.6"]) -# Pin Cython because it's causing multiple packages to fail to build -run_pip(["install", "-U", "Cython==0.29.36"]) +# 11/04/24 Install a 2.0 release candidate because 2.0 has not yet been +# officially released but we cannot build petsc4py without it. +run_pip(["install", "-U", "numpy==2.0.0rc1"]) # Loopy has additional build dependencies run_pip(["install", "-U", "scikit-build"]) @@ -1819,24 +1814,12 @@ if mode == "install": # recovery can be attempted if required. build_update_script() - # Force Cython to install first to work around pip dependency issues. - run_pip_install(["Cython>=0.22"]) - # Pre-install requested packages if args.pip_packages is not None: for package in args.pip_packages: log.info("Pip installing %s to venv" % package) run_pip_install_wrap(package.split(), {}) - # numpy requires old setuptools (+wheel) - log.info("Installing numpy using setuptools==59.2.0 and wheel==0.37.0 and Cython==0.29.36") - log.info("Installing numpy==1.24 due to performance regression") - # https://github.com/inducer/pytential/issues/211 - run_pip_install(["numpy==1.24"]) - log.info("Updating setuptools and wheel to latest versions") - run_pip(["install", "-U", "setuptools"]) - run_pip(["install", "-U", "wheel"]) - # Install VTK if not args.no_vtk: log.info("Pip installing VTK to venv") diff --git a/tests/output/test_io_function.py b/tests/output/test_io_function.py index d5dcd0f7f8..fc48a220b4 100644 --- a/tests/output/test_io_function.py +++ b/tests/output/test_io_function.py @@ -218,7 +218,7 @@ def test_io_function_real(cell_type, tmpdir): VA = FunctionSpace(meshA, "Real", 0) fA = Function(VA, name=func_name) valueA = 3.14 - fA.dat.data.itemset(valueA) + fA.dat.data[...] = valueA with CheckpointFile(filename, 'w', comm=COMM_WORLD) as afile: afile.save_function(fA) # Load -> View cycle @@ -281,7 +281,7 @@ def test_io_function_mixed_real(cell_family_degree_tuples, tmpdir): fA = Function(VA, name=func_name) fA0, fA1 = fA.subfunctions _initialise_function(fA0, _get_expr(VA[0]), method) - fA1.dat.data.itemset(3.14) + fA1.dat.data[...] = 3.14 with CheckpointFile(filename, 'w', comm=COMM_WORLD) as afile: afile.save_function(fA) # Load -> View cycle @@ -297,7 +297,7 @@ def test_io_function_mixed_real(cell_family_degree_tuples, tmpdir): fBe = Function(VB) fBe0, fBe1 = fBe.subfunctions _initialise_function(fBe0, _get_expr(VB[0]), method) - fBe1.dat.data.itemset(3.14) + fBe1.dat.data[...] = 3.14 assert assemble(inner(fB - fBe, fB - fBe) * dx) < 1.e-16 with CheckpointFile(filename, 'w', comm=comm) as afile: afile.save_function(fB) diff --git a/tests/output/test_io_timestepping.py b/tests/output/test_io_timestepping.py index f00d008285..c12c30d74a 100644 --- a/tests/output/test_io_timestepping.py +++ b/tests/output/test_io_timestepping.py @@ -29,7 +29,7 @@ def _get_expr(V, i): def _project(f, expr, method): if f.function_space().ufl_element().family() == "Real": - f.dat.data.itemset(expr) + f.dat.data[...] = expr elif method == "project": getattr(f, method)(expr, solver_parameters={"ksp_rtol": 1.e-16}) elif method == "interpolate":