Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ jobs:
- name: Build Firedrake
run: |
cd ..
# DO NOT MERGE: just until we rebuild the Docker container
sudo apt-get update
sudo apt-get -y install ninja-build
# Linting should ignore unquoted shell variable $COMPLEX
# shellcheck disable=SC2086
./firedrake/scripts/firedrake-install \
Expand All @@ -84,6 +87,9 @@ jobs:
--install defcon \
--install gadopt \
--install asQ \
--package-branch fiat connorjward/numpy2 \
--package-branch pyop2 connorjward/numpy2 \
--package-branch tsfc connorjward/numpy2 \
|| (cat firedrake-install.log && /bin/false)
- name: Install test dependencies
run: |
Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile.env
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ RUN apt-get update \
cmake gfortran git libopenblas-serial-dev \
libtool python3-dev python3-pip python3-tk python3-venv \
python3-requests zlib1g-dev libboost-dev sudo gmsh \
bison flex \
bison flex ninja-build \
liboce-ocaf-dev \
swig graphviz \
libcurl4-openssl-dev libxml2-dev \
Expand Down
1 change: 1 addition & 0 deletions docs/source/download.rst
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ they have the system dependencies:
* CMake
* zlib
* flex, bison
* Ninja

Firedrake has been successfully installed on Windows 10 using the
Windows Subsystem for Linux. There are more detailed instructions for
Expand Down
2 changes: 1 addition & 1 deletion firedrake/checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1359,7 +1359,7 @@ def _load_function_topology(self, tmesh, element, tf_name, idx=None):
if element.family() == "Real":
assert not isinstance(element, (finat.ufl.VectorElement, finat.ufl.TensorElement))
value = self.get_attr(path, "_".join([PREFIX, "value" if idx is None else "value_" + str(idx)]))
tf.dat.data.itemset(value)
tf.dat.data[...] = value
else:
if path in self.h5pyfile:
timestepping = self.has_attr(os.path.join(path, tf.name()), "timestepping")
Expand Down
15 changes: 8 additions & 7 deletions firedrake/cython/dmcommon.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

# Utility functions common to all DMs used in Firedrake
import functools
import math
import cython
import numpy as np
import firedrake
Expand Down Expand Up @@ -675,7 +676,7 @@ def closure_ordering(PETSc.DM dm,
incident = 1
break
if incident == 0:
face_indices[nfaces] += v * 10**(1-fi)
face_indices[nfaces] += v * <PetscInt> 10**(1-fi)
fi += 1
nfaces += 1

Expand Down Expand Up @@ -970,7 +971,7 @@ cdef inline PetscInt _compute_orientation_simplex(PetscInt *fiat_cone,
coneSize1 -= 1
assert n == coneSize
for k in range(n):
o += np.math.factorial(n - 1 - k) * inds[k]
o += math.factorial(n - 1 - k) * inds[k]
CHKERR(PetscFree(cone1))
CHKERR(PetscFree(inds))
return o
Expand Down Expand Up @@ -1019,10 +1020,10 @@ cdef inline PetscInt _compute_orientation_interval_tensor_product(PetscInt *fiat
# io += (2**(dim - 1 - i)) * 0
pass
elif plex_cone_copy[2 * j + 1] == fiat_cone[2 * i] and plex_cone_copy[2 * j] == fiat_cone[2 * i + 1]:
io += (2**(dim - 1 - i)) * 1
io += <PetscInt> (2**(dim - 1 - i)) * 1
else:
raise RuntimeError("Found inconsistent fiat_cone and plex_cone")
eo += np.math.factorial(dim - 1 - i) * j
eo += math.factorial(dim - 1 - i) * j
for k in range(j, dim1 - 1):
plex_cone_copy[2 * k] = plex_cone_copy[2 * k + 2]
plex_cone_copy[2 * k + 1] = plex_cone_copy[2 * k + 3]
Expand All @@ -1031,7 +1032,7 @@ cdef inline PetscInt _compute_orientation_interval_tensor_product(PetscInt *fiat
else:
raise RuntimeError("Found inconsistent fiat_cone and plex_cone")
assert dim1 == 0
return (2**dim) * eo + io
return <PetscInt> (2**dim) * eo + io


cdef inline PetscInt _compute_orientation(PETSc.DM dm,
Expand Down Expand Up @@ -2438,7 +2439,7 @@ cdef struct CommFacet:
PetscInt global_u, global_v
PetscInt local_facet

cdef int CommFacet_cmp(const void *x_, const void *y_) nogil:
cdef int CommFacet_cmp(const void *x_, const void *y_) noexcept nogil:
"""Three-way comparison C function for CommFacet structs."""
cdef:
CommFacet *x = <CommFacet *>x_
Expand Down Expand Up @@ -3215,7 +3216,7 @@ cdef int DMPlexGetAdjacency_Facet_Support(PETSc.PetscDM dm,
PetscInt p,
PetscInt *adjSize,
PetscInt adj[],
void *ctx) nogil:
void *ctx) noexcept nogil:
"""Custom adjacency callback for halo growth.

:arg dm: The DMPlex object.
Expand Down
2 changes: 1 addition & 1 deletion firedrake/cython/mgimpl.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def coarse_to_fine_cells(mc, mf, clgmaps, flgmaps):
cdm = mc.topology_dm
fdm = mf.topology_dm
dim = cdm.getDimension()
nref = 2 ** dim
nref = <PetscInt> 2 ** dim
ncoarse = mc.cell_set.size
nfine = mf.cell_set.size
co2n, _ = get_entity_renumbering(cdm, mc._cell_numbering, "cell")
Expand Down
2 changes: 1 addition & 1 deletion firedrake/cython/petschdr.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ cdef inline int SETERR(int ierr) with gil:
PyErr_SetObject(<object>PyExc_RuntimeError, <long>ierr)
return ierr

cdef inline int CHKERR(int ierr) nogil except -1:
cdef inline int CHKERR(int ierr) except -1 nogil:
if ierr == 0:
return 0 # no error
else:
Expand Down
11 changes: 9 additions & 2 deletions firedrake/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from pyop2.mpi import (
MPI, COMM_WORLD, internal_comm, is_pyop2_comm, temp_internal_comm
)
from pyop2.utils import as_tuple, tuplify
from pyop2.utils import as_tuple

import firedrake.cython.dmcommon as dmcommon
import firedrake.cython.extrusion_numbering as extnum
Expand Down Expand Up @@ -1444,7 +1444,14 @@ def make_dofs_per_plex_entity(self, entity_dofs):
dofs_per_entity = np.zeros((1 + self._base_mesh.cell_dimension(), 2), dtype=IntType)
for (b, v), entities in entity_dofs.items():
dofs_per_entity[b, v] += len(entities[0])
return tuplify(dofs_per_entity)

# Convert to a tuple of tuples with int (not numpy.intXX) values. This is
# to give us a string representation like ((0, 1), (2, 3)) instead of
# ((numpy.int32(0), numpy.int32(1)), (numpy.int32(2), numpy.int32(3))).
return tuple(
tuple(int(d_) for d_ in d)
for d in dofs_per_entity
)

@PETSc.Log.EventDecorator()
def node_classes(self, nodes_per_entity, real_tensorproduct=False):
Expand Down
11 changes: 7 additions & 4 deletions firedrake/preconditioners/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -816,10 +816,13 @@ def initialize(self, obj):
is_snes = False

if len(bcs) > 0:
ghost_bc_nodes = numpy.unique(numpy.concatenate([bcdofs(bc, ghost=True)
for bc in bcs]))
global_bc_nodes = numpy.unique(numpy.concatenate([bcdofs(bc, ghost=False)
for bc in bcs]))
ghost_bc_nodes = numpy.unique(
numpy.concatenate([bcdofs(bc, ghost=True) for bc in bcs],
dtype=PETSc.IntType)
)
global_bc_nodes = numpy.unique(
numpy.concatenate([bcdofs(bc, ghost=False) for bc in bcs],
dtype=PETSc.IntType))
else:
ghost_bc_nodes = numpy.empty(0, dtype=PETSc.IntType)
global_bc_nodes = numpy.empty(0, dtype=PETSc.IntType)
Expand Down
49 changes: 16 additions & 33 deletions scripts/firedrake-install
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,6 @@ def check_output(args):
raise


pyinstall = [python, "setup.py", "install"]
if "PYTHONPATH" in os.environ and not args.honour_pythonpath:
quit("""The PYTHONPATH environment variable is set. This is probably an error.
If you really want to use your own Python packages, please run again with the
Expand Down Expand Up @@ -993,14 +992,10 @@ def run_pip_install(pipargs):
# subprocesses wrote out.
# Particularly important for debugging petsc fails.
with environment(**blas):
pipargs = ["-vvv"] + pipargs
pipargs = ["-v"] + pipargs
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

😂

check_call(pipinstall + pipargs)


def run_cmd(args):
check_call(args)


def get_requirements(reqfname):
with open(reqfname, "r") as f:
reqs = f.readlines()
Expand All @@ -1013,14 +1008,6 @@ def run_pip_install_wrap(reqs, parallel_compiler_env):
if package_name in parallel_packages:
with environment(**parallel_compiler_env):
run_pip_install([req])
elif package_name == "numpy":
# Downgrade setuptools and wheel for numpy
run_pip(["install", "-U", "setuptools==59.2.0"])
run_pip(["install", "-U", "wheel==0.37.0"])
run_pip_install(["numpy==1.24"])
# Upgrade setuptools and wheel for everything else
run_pip(["install", "-U", "setuptools"])
run_pip(["install", "-U", "wheel"])
else:
run_pip_install([req])

Expand Down Expand Up @@ -1347,8 +1334,9 @@ def build_and_install_torch():
raise InstallError("CUDA installation is not available on MacOS.")
if sys.version_info >= (3, 10):
run_pip_install(["typing_extensions"])
extra_index_url = ["--extra-index-url", "https://download.pytorch.org/whl/cpu"] if args.torch == "cpu" else []
run_pip_install(["torch"] + extra_index_url)
# FIXME: use nightly to see if that makes numpy 2.0 happy
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should really be

pip install torch==2.3.0 --index-url https://download.pytorch.org/whl/test/cpu

as detailed in https://dev-discuss.pytorch.org/t/pytorch-release-2-3-0-final-rc-is-available/1995

2.3.0 is supposed to be released 24/04.

extra_index_url = ["--extra-index-url", "https://download.pytorch.org/whl/nightly/cpu"] if args.torch == "cpu" else []
run_pip_install(["--pre", "torch"] + extra_index_url)


def build_and_install_slepc():
Expand Down Expand Up @@ -1567,6 +1555,7 @@ if mode == "install" or not args.update_script:
"make",
"automake",
"cmake",
"ninja",
"libtool",
"boost"]
if args.with_blas is None and mode == "install":
Expand All @@ -1585,6 +1574,7 @@ if mode == "install" or not args.update_script:
"pkgconf", # for p4est
"libtool",
"libxml2-dev",
"ninja-build", # for meson/numpy
"python3-dev",
"python3-pip",
"python3-tk",
Expand Down Expand Up @@ -1724,14 +1714,19 @@ if mode == "install":
os.environ["VIRTUAL_ENV"] = firedrake_env

# Ensure pip, setuptools, hatchling and wheel are at the latest version.
# numpy requires setuptools==59.2.0 and wheel==0.37.0 until it moves to meson build
run_pip(["install", "-U", "setuptools==59.2.0"])
run_pip(["install", "-U", "setuptools"])
run_pip(["install", "-U", "hatch"])
run_pip(["install", "-U", "pip"])
run_pip(["install", "-U", "wheel==0.37.0"])
run_pip(["install", "-U", "wheel"])

# Extra numpy dependendencies, see
# https://github.com/numpy/numpy/blob/main/pyproject.toml
run_pip(["install", "-U", "meson-python>=0.15.0"])
run_pip(["install", "-U", "Cython>=3.0.6"])

# Pin Cython because it's causing multiple packages to fail to build
run_pip(["install", "-U", "Cython==0.29.36"])
# 11/04/24 Install a 2.0 release candidate because 2.0 has not yet been
# officially released but we cannot build petsc4py without it.
run_pip(["install", "-U", "numpy==2.0.0rc1"])

# Loopy has additional build dependencies
run_pip(["install", "-U", "scikit-build"])
Expand Down Expand Up @@ -1819,24 +1814,12 @@ if mode == "install":
# recovery can be attempted if required.
build_update_script()

# Force Cython to install first to work around pip dependency issues.
run_pip_install(["Cython>=0.22"])

# Pre-install requested packages
if args.pip_packages is not None:
for package in args.pip_packages:
log.info("Pip installing %s to venv" % package)
run_pip_install_wrap(package.split(), {})

# numpy requires old setuptools (+wheel)
log.info("Installing numpy using setuptools==59.2.0 and wheel==0.37.0 and Cython==0.29.36")
log.info("Installing numpy==1.24 due to performance regression")
# https://github.com/inducer/pytential/issues/211
run_pip_install(["numpy==1.24"])
log.info("Updating setuptools and wheel to latest versions")
run_pip(["install", "-U", "setuptools"])
run_pip(["install", "-U", "wheel"])

# Install VTK
if not args.no_vtk:
log.info("Pip installing VTK to venv")
Expand Down
6 changes: 3 additions & 3 deletions tests/output/test_io_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def test_io_function_real(cell_type, tmpdir):
VA = FunctionSpace(meshA, "Real", 0)
fA = Function(VA, name=func_name)
valueA = 3.14
fA.dat.data.itemset(valueA)
fA.dat.data[...] = valueA
with CheckpointFile(filename, 'w', comm=COMM_WORLD) as afile:
afile.save_function(fA)
# Load -> View cycle
Expand Down Expand Up @@ -281,7 +281,7 @@ def test_io_function_mixed_real(cell_family_degree_tuples, tmpdir):
fA = Function(VA, name=func_name)
fA0, fA1 = fA.subfunctions
_initialise_function(fA0, _get_expr(VA[0]), method)
fA1.dat.data.itemset(3.14)
fA1.dat.data[...] = 3.14
with CheckpointFile(filename, 'w', comm=COMM_WORLD) as afile:
afile.save_function(fA)
# Load -> View cycle
Expand All @@ -297,7 +297,7 @@ def test_io_function_mixed_real(cell_family_degree_tuples, tmpdir):
fBe = Function(VB)
fBe0, fBe1 = fBe.subfunctions
_initialise_function(fBe0, _get_expr(VB[0]), method)
fBe1.dat.data.itemset(3.14)
fBe1.dat.data[...] = 3.14
assert assemble(inner(fB - fBe, fB - fBe) * dx) < 1.e-16
with CheckpointFile(filename, 'w', comm=comm) as afile:
afile.save_function(fB)
Expand Down
2 changes: 1 addition & 1 deletion tests/output/test_io_timestepping.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def _get_expr(V, i):

def _project(f, expr, method):
if f.function_space().ufl_element().family() == "Real":
f.dat.data.itemset(expr)
f.dat.data[...] = expr
elif method == "project":
getattr(f, method)(expr, solver_parameters={"ksp_rtol": 1.e-16})
elif method == "interpolate":
Expand Down