Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
/.*
build
doc
tutorial
extra
archive
*.md
Dockerfile
54 changes: 30 additions & 24 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,56 +24,62 @@ jobs:
max-parallel: 10
fail-fast: false
matrix:
python-version: # There is a bug in conda 24.1.2 for Python < 3.12 so we pin the version to 23.11.0
- python: "3.8"
conda: "23.11.0"
- python: "3.9"
conda: "23.11.0"
- python: "3.10"
conda: "23.11.0"
- python: "3.11"
conda: "23.11.0"
- python: "3.12"
conda: "latest"
name: Testing with Python ${{ matrix.python-version.python }}
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
conda-env:
- "core"
- "full"
name: Python ${{ matrix.python-version }} and ${{ matrix.conda-env }} dependencies
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version.python }}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version.python }}
python-version: ${{ matrix.python-version }}
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
conda --version
- name: Change conda version if necessary
if: ${{ matrix.python-version.conda != 'latest' }}
conda info
- name: Make sure conda is updated
run: |
conda install conda=${{ matrix.python-version.conda }} python=${{ matrix.python-version.python }}
conda update conda
conda --version
- name: Install dependencies
- name: Install ${{ matrix.conda-env }} dependencies
run: |
# replace python version in core dependencies
sed -i 's/python/python=${{ matrix.python-version.python }}/' dependencies_core.yml
conda env update --file dependencies_core.yml --name base
# replace python version in dependencies
sed -i 's/python/python=${{ matrix.python-version }}/' dependencies_${{ matrix.conda-env }}.yml
if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.12' ]; then
sed -i '/- pyfftw/d' dependencies_${{ matrix.conda-env }}.yml
fi
# if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.8' ]; then
# sed -i '/- mpi4py/d' dependencies_${{ matrix.conda-env }}.yml
# fi
# if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.9' ]; then
# sed -i '/- mpi4py/d' dependencies_${{ matrix.conda-env }}.yml
# fi
conda install --solver=classic mpich
conda env update --file dependencies_${{ matrix.conda-env }}.yml --name base
conda install --solver=classic flake8 pytest pytest-cov
conda list
- name: Prepare ptypy
run: |
# Install ptypy
pip install .
- name: Lint with flake8
run: |
conda install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
conda install pytest
conda install pytest-cov
# pytest ptypy/test -v --doctest-modules --junitxml=junit/test-results.xml --cov=ptypy --cov-report=xml --cov-report=html --cov-config=.coveragerc
pytest -v
# - name: cobertura-report
Expand Down
74 changes: 74 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Select MPI environment: openmpi or mpich
ARG MPI=openmpi

# Select Platform: core, full, pycuda or cupy
ARG PLATFORM=cupy

# Select CUDA version
ARG CUDAVERSION=12.4

# Pull from mambaforge and install XML and ssh
FROM condaforge/mambaforge as base
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y libxml2 ssh

# Pull from base image and install OpenMPI/MPICH
FROM base as mpi
ARG MPI
RUN mamba install -n base -c conda-forge ${MPI}

# Pull from MPI build install core dependencies
FROM base as core
COPY ./dependencies_core.yml ./dependencies.yml
RUN mamba env update -n base -f dependencies.yml

# Pull from MPI build and install full dependencies
FROM mpi as full
COPY ./dependencies_full.yml ./dependencies.yml
RUN mamba env update -n base -f dependencies.yml

# Pull from MPI build and install accelerate/pycuda dependencies
FROM mpi as pycuda
ARG CUDAVERSION
COPY ./ptypy/accelerate/cuda_pycuda/dependencies.yml ./dependencies.yml
COPY ./cufft/dependencies.yml ./dependencies_cufft.yml
RUN mamba install cuda-version=${CUDAVERSION} && \
mamba env update -n base -f dependencies.yml && \
mamba env update -n base -f dependencies_cufft.yml

# Pull from MPI build and install accelerate/cupy dependencies
FROM mpi as cupy
ARG CUDAVERSION
COPY ./ptypy/accelerate/cuda_cupy/dependencies.yml ./dependencies.yml
COPY ./cufft/dependencies.yml ./dependencies_cufft.yml
RUN mamba install cuda-version=${CUDAVERSION} && \
mamba env update -n base -f dependencies.yml && \
mamba env update -n base -f dependencies_cufft.yml

# Pull from platform specific image and install ptypy
FROM ${PLATFORM} as build
COPY pyproject.toml setup.py ./
COPY ./scripts ./scripts
COPY ./templates ./templates
COPY ./benchmark ./benchmark
COPY ./cufft ./cufft
COPY ./ptypy ./ptypy
RUN pip install .

# For core/full build, no post processing needed
FROM build as core-post
FROM build as full-post

# For pycuda build, install filtered cufft
FROM build as pycuda-post
RUN pip install ./cufft

# For pycuda build, install filtered cufft
FROM build as cupy-post
RUN pip install ./cufft

# Platform specific runtime container
FROM ${PLATFORM}-post as runtime

# Run PtyPy run script as entrypoint
ENTRYPOINT ["ptypy.run"]
94 changes: 94 additions & 0 deletions benchmark/ptypy_moonflower_script_cupy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
"""
This script is a test for ptychographic reconstruction in the absence
of actual data. It uses the test Scan class
`ptypy.core.data.MoonFlowerScan` to provide "data".
"""
import ptypy
from ptypy.core import Ptycho
from ptypy import utils as u

import tempfile
import argparse

ptypy.load_gpu_engines("cupy")
tmpdir = tempfile.gettempdir()

def run_benchmark():
opt = parse()
p = get_params(opt)
P = Ptycho(p,level=5)
print_results(P)


def parse():
parser = argparse.ArgumentParser(description="A script to benchmark ptypy using the moonflower simulation")
parser.add_argument("-n", "--frames", type=int, help="Nr. of data frames")
parser.add_argument("-s", "--shape", type=int, help="1D shape of each data frame")
parser.add_argument("-i", "--iterations", type=int, help="Nr. of iterations")
parser.add_argument("-f", "--fftlib", type=str, default="cupy")
args = parser.parse_args()
return args

def get_params(args):

p = u.Param()

# for verbose output
p.verbose_level = "info"

# set home path
p.io = u.Param()
p.io.home = "/".join([tmpdir, "ptypy"])

# saving intermediate results
p.io.autosave = u.Param(active=False)

# opens plotting GUI if interaction set to active)
p.io.autoplot = u.Param(active=False)
p.io.interaction = u.Param(active=False)

# Save benchmark timings
p.io.benchmark = "all"

# max 200 frames (128x128px) of diffraction data
p.scans = u.Param()
p.scans.MF = u.Param()
# now you have to specify which ScanModel to use with scans.XX.name,
# just as you have to give 'name' for engines and PtyScan subclasses.
p.scans.MF.name = 'BlockVanilla' # or 'BlockFull'
p.scans.MF.data= u.Param()
p.scans.MF.data.name = 'MoonFlowerScan'
p.scans.MF.data.shape = args.shape
p.scans.MF.data.num_frames = args.frames
p.scans.MF.data.save = None

# position distance in fraction of illumination frame
p.scans.MF.data.density = 0.2
# total number of photon in empty beam
p.scans.MF.data.photons = 1e8
# Gaussian FWHM of possible detector blurring
p.scans.MF.data.psf = 0.

# attach a reconstrucion engine
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM_cupy'
p.engines.engine00.numiter = args.iterations
p.engines.engine00.fft_lib = args.fftlib

return p

def print_results(ptycho):
# Print benchmarking results
if (ptycho.p.io.benchmark == "all") and u.parallel.master:
print("\nBenchmark:")
print("==========")
total = 0
for k,v in ptycho.benchmark.items():
total += v
print(f"{k}: {v:.02f} s")
print(f"Total: {total:.02f} s")

# prepare and run
if __name__ == "__main__":
run_benchmark()
90 changes: 90 additions & 0 deletions benchmark/ptypy_moonflower_script_numpy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
"""
This script is a test for ptychographic reconstruction in the absence
of actual data. It uses the test Scan class
`ptypy.core.data.MoonFlowerScan` to provide "data".
"""
from ptypy.core import Ptycho
from ptypy import utils as u

import tempfile
import argparse

tmpdir = tempfile.gettempdir()

def run_benchmark():
opt = parse()
p = get_params(opt)
P = Ptycho(p,level=5)
print_results(P)


def parse():
parser = argparse.ArgumentParser(description="A script to benchmark ptypy using the moonflower simulation")
parser.add_argument("-n", "--frames", type=int, help="Nr. of data frames")
parser.add_argument("-s", "--shape", type=int, help="1D shape of each data frame")
parser.add_argument("-i", "--iterations", type=int, help="Nr. of iterations")
args = parser.parse_args()
return args

def get_params(args):

p = u.Param()

# for verbose output
p.verbose_level = "info"

# set home path
p.io = u.Param()
p.io.home = "/".join([tmpdir, "ptypy"])

# saving intermediate results
p.io.autosave = u.Param(active=False)

# opens plotting GUI if interaction set to active)
p.io.autoplot = u.Param(active=False)
p.io.interaction = u.Param(active=False)

# Save benchmark timings
p.io.benchmark = "all"

# max 200 frames (128x128px) of diffraction data
p.scans = u.Param()
p.scans.MF = u.Param()
# now you have to specify which ScanModel to use with scans.XX.name,
# just as you have to give 'name' for engines and PtyScan subclasses.
p.scans.MF.name = 'BlockVanilla' # or 'BlockFull'
p.scans.MF.data= u.Param()
p.scans.MF.data.name = 'MoonFlowerScan'
p.scans.MF.data.shape = args.shape
p.scans.MF.data.num_frames = args.frames
p.scans.MF.data.save = None

# position distance in fraction of illumination frame
p.scans.MF.data.density = 0.2
# total number of photon in empty beam
p.scans.MF.data.photons = 1e8
# Gaussian FWHM of possible detector blurring
p.scans.MF.data.psf = 0.

# attach a reconstrucion engine
p.engines = u.Param()
p.engines.engine00 = u.Param()
p.engines.engine00.name = 'DM'
p.engines.engine00.numiter = args.iterations

return p

def print_results(ptycho):
# Print benchmarking results
if (ptycho.p.io.benchmark == "all") and u.parallel.master:
print("\nBenchmark:")
print("==========")
total = 0
for k,v in ptycho.benchmark.items():
total += v
print(f"{k}: {v:.02f} s")
print(f"Total: {total:.02f} s")

# prepare and run
if __name__ == "__main__":
run_benchmark()
2 changes: 2 additions & 0 deletions dependencies_core.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
name: ptypy_core
channels:
- conda-forge
dependencies:
- python
- numpy
Expand Down
5 changes: 3 additions & 2 deletions ptypy/accelerate/base/engines/stochastic.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,15 +398,16 @@ def engine_finalize(self):

self._reset_benchmarks()

if self.do_position_refinement:
if self.do_position_refinement and self.p.position_refinement.record:
for label, d in self.di.storages.items():
prep = self.diff_info[d.ID]
res = self.kernels[prep.label].resolution
for i,view in enumerate(d.views):
for j,(pname, pod) in enumerate(view.pods.items()):
delta = (prep.original_addr[i][j][1][1:] - prep.addr[i][j][1][1:]) * res
delta = (prep.addr[i][j][1][1:] - prep.original_addr[i][j][1][1:]) * res
pod.ob_view.coord += delta
pod.ob_view.storage.update_views(pod.ob_view)
self.ptycho.record_positions = True


@register()
Expand Down
5 changes: 4 additions & 1 deletion ptypy/accelerate/cuda_cupy/multi_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,10 @@ def __init__(self):
# get a unique identifier for the NCCL communicator and
# broadcast it to all MPI processes (assuming one device per process)
if self.rank == 0:
self.id = nccl.get_unique_id()
try:
self.id = nccl.get_unique_id()
except:
self.id = None
else:
self.id = None

Expand Down
Loading