From b2d94c8393ed34d50b1bf952daf0f29ae491dc4a Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 12:00:01 +0200 Subject: [PATCH 01/23] Add KE and TKE to probes --- .../visualize_probes.py | 169 +++++++++++++++--- 1 file changed, 143 insertions(+), 26 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 05edcd14..9d2afb91 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -8,7 +8,7 @@ from vampy.automatedPostprocessing.postprocessing_common import read_command_line -def visualize_probes(case_path, probe_saving_frequency, show_figure=True, save_figure=False): +def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, save_figure=False): """ Loads probe points from completed CFD simulation, and visualizes velocity (magnitude) and pressure at respective probes @@ -17,17 +17,24 @@ def visualize_probes(case_path, probe_saving_frequency, show_figure=True, save_f Args: probe_saving_frequency (int): Interval between saving probes. case_path (str): Path to results from simulation + dt (float): Time step of simulation + T (float): One cardiac cycle, in [ms] show_figure (bool): Shows figure if True save_figure (bool): Saves figure if True """ # Plotting variables - colors = ["red", "blue"] + colors = ['red', 'blue', 'purple'] max_U = 0 max_P = 0 + max_ke = 0 + max_tke = 0 num_probes = 0 num_rows = 0 num_cols = 0 - velocitites = [] + velocities = [] + velocities_u = [] + velocities_v = [] + velocities_w = [] pressures = [] # Load and plot probes @@ -36,58 +43,134 @@ def visualize_probes(case_path, probe_saving_frequency, show_figure=True, save_f tstep = probe_saving_frequency * (counter + 1) # Load velocity component and pressure probes - u = path.join(case_path, "u_x_{}.probes".format(tstep)) - v = path.join(case_path, "u_y_{}.probes".format(tstep)) - w = path.join(case_path, "u_z_{}.probes".format(tstep)) - p = path.join(case_path, "p_{}.probes".format(tstep)) + u_path = path.join(case_path, "u_x_{}.probes".format(tstep)) + v_path = path.join(case_path, "u_y_{}.probes".format(tstep)) + w_path = path.join(case_path, "u_z_{}.probes".format(tstep)) + p_path = path.join(case_path, "p_{}.probes".format(tstep)) try: - p_probe = np.load(p, allow_pickle=True) - u_probe = np.load(u, allow_pickle=True) - v_probe = np.load(v, allow_pickle=True) - w_probe = np.load(w, allow_pickle=True) + p = np.load(p_path, allow_pickle=True) + u = np.load(u_path, allow_pickle=True) + v = np.load(v_path, allow_pickle=True) + w = np.load(w_path, allow_pickle=True) except Exception: print("-- Finished reading in probes") break # Create velocity magnitude - U = np.sqrt(u_probe ** 2 + v_probe ** 2 + w_probe ** 2) + U = np.linalg.norm([u, v, w], axis=0) if counter == 0: # Set plot parameters - num_probes = U.shape[0] + num_probes = u.shape[0] num_rows = 5 num_cols = int(np.ceil(num_probes / num_rows)) # Find velocity and pressure for scaling windows max_U = np.max(U) if np.max(U) > max_U else max_U p_id = int(0.1 * probe_saving_frequency) # Avoid scaling from initial pressure - max_P = np.max(p_probe[:, p_id:]) if np.max(p_probe[:, p_id:]) > max_P else max_P + max_P = np.max(p[:, p_id:]) if np.max(p[:, p_id:]) > max_P else max_P # Store values in lists for plotting for k in range(num_probes): if counter == 0: - velocitites.append(list(U[k])) - pressures.append(list(p_probe[k])) + velocities.append(list(U[k])) + velocities_u.append(list(u[k])) + velocities_v.append(list(v[k])) + velocities_w.append(list(w[k])) + pressures.append(list(p[k])) else: - velocitites[k] += list(U[k]) - pressures[k] += list(p_probe[k]) + velocities[k] += list(U[k]) + velocities_u[k] += list(u[k]) + velocities_v[k] += list(v[k]) + velocities_w[k] += list(w[k]) + pressures[k] += list(p[k]) counter += 1 # Get number of timesteps / store values - if len(velocitites[0]) > 0: - n_timesteps = len(velocitites[0]) + velocities = np.array(velocities) + velocities_u = np.array(velocities_u) + velocities_v = np.array(velocities_v) + velocities_w = np.array(velocities_w) + pressures = np.array(pressures) + + # FIXME: Remove + n_stop = 2853 + velocities = velocities[:, :n_stop] + velocities_u = velocities_u[:, :n_stop] + velocities_v = velocities_v[:, :n_stop] + velocities_w = velocities_w[:, :n_stop] + pressures = pressures[:, :n_stop] + # + if len(velocities[0]) > 0: + n_timesteps = velocities.shape[1] else: print("-- No data to visualize") exit() + # Generate probe based flow quantities + # FIXME: Revert + saved_points_per_cycle = 951 # int(T / dt) + # + n_cycles = int(n_timesteps / saved_points_per_cycle) + + mean_velocities = np.zeros((num_probes, n_timesteps)) + mean_velocities_u = np.zeros((num_probes, n_timesteps)) + mean_velocities_v = np.zeros((num_probes, n_timesteps)) + mean_velocities_w = np.zeros((num_probes, n_timesteps)) + kinetic_energy = np.zeros((num_probes, n_timesteps)) + turbulent_kinetic_energy = np.zeros((num_probes, n_timesteps)) + for k in range(num_probes): + U = velocities[k] + u = velocities_u[k] + v = velocities_v[k] + w = velocities_w[k] + u_mean = 0 + u_mean_u = 0 + u_mean_v = 0 + u_mean_w = 0 + for n in range(n_cycles): + u_mean += U[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_u += u[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_v += v[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_w += w[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + + u_mean_u = np.array(u_mean_u) + u_mean_v = np.array(u_mean_v) + u_mean_w = np.array(u_mean_w) + + u_mean /= n_cycles + u_mean_u /= n_cycles + u_mean_v /= n_cycles + u_mean_w /= n_cycles + mean_velocities[k] = list(u_mean) * n_cycles + mean_velocities_u[k] = list(u_mean_u) * n_cycles + mean_velocities_v[k] = list(u_mean_v) * n_cycles + mean_velocities_w[k] = list(u_mean_w) * n_cycles + + # Calculate total kinetic energy + kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 3) + + # Calculate fluctuating velocities + fluctuating_velocities_u = u - mean_velocities_u[k] + fluctuating_velocities_v = v - mean_velocities_v[k] + fluctuating_velocities_w = w - mean_velocities_w[k] + + turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocities_u ** 2) + + (fluctuating_velocities_v ** 2) + + (fluctuating_velocities_w ** 2)) + + max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke + max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke + # Generate plots for k in range(num_probes): # Create subplots ax0 = plt.subplot(num_rows, num_cols, k + 1) ax1 = ax0.twinx() time_interval = np.linspace(0, n_timesteps, n_timesteps) - ax0.plot(time_interval, velocitites[k], color=colors[0], label="") + ax0.plot(time_interval, velocities[k], color=colors[0], label="") + ax0.plot(time_interval, mean_velocities[k], color=colors[2], linestyle='--', label="") ax1.plot(time_interval, pressures[k], color=colors[1], label="") # Set axis limits (y-dir) @@ -107,7 +190,7 @@ def visualize_probes(case_path, probe_saving_frequency, show_figure=True, save_f ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) if save_figure: - save_path = path.join(case_path, "Probes.png") + save_path = path.join(case_path, "velocity_and_pressure.png") print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) plt.savefig(save_path) @@ -115,15 +198,49 @@ def visualize_probes(case_path, probe_saving_frequency, show_figure=True, save_f print("-- Plotting velocity magnitude and pressure at probes") plt.show() + # Generate plots for kinetic energy + for k in range(num_probes): + # Create subplots + ax0 = plt.subplot(num_rows, num_cols, k + 1) + ax1 = ax0.twinx() + time_interval = np.linspace(0, n_timesteps, n_timesteps) + ax0.plot(time_interval, kinetic_energy[k], color=colors[0], label="") + ax0.plot(time_interval, turbulent_kinetic_energy[k], color=colors[1], label="") + + # Set axis limits (y-dir) + ax0.set_ylim(-1E-2, max_ke * 1.1) + ax1.set_ylim(-1E-2, max_tke * 1.1) + + # Set axis labels + ax0.set_ylabel("Kinetic energy [m$^2$/s$^2$]", fontsize=12, color=colors[0]) + ax1.set_ylabel("Turbulent kinetic\n energy [m$^2$/s$^2$]", fontsize=12, color=colors[1]) + ax0.set_xlabel("Time [s]", fontsize=12) + + # Color axis ticks + ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + ax1.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) + + # Set title to probe number + ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + + if save_figure: + save_path = path.join(case_path, "kinetic_energy.png") + print("-- Saving figure of kinetic energy and turbulent kinetic energy at probes at {}".format(save_path)) + plt.savefig(save_path) + + if show_figure: + print("-- Plotting kinetic energy and turbulent kinetic energy at probes") + plt.show() + # Clear plotting figure plt.clf() def main_probe(): - folder, _, _, dt, _, _, probe_freq, _, _, _, _, _, _ = read_command_line() - visualize_probes(folder, probe_freq, save_figure=True) + folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _ = read_command_line() + visualize_probes(folder, probe_freq, T, dt, save_figure=True) if __name__ == '__main__': - folder, _, _, dt, _, _, probe_freq, _, _, _, _, _, _ = read_command_line() - visualize_probes(folder, probe_freq, save_figure=True) + folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _ = read_command_line() + visualize_probes(folder, probe_freq, T, dt, save_figure=True) From dd84e92ae686469061c5ce1011628ed565e9466f Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 13:13:08 +0200 Subject: [PATCH 02/23] Add Spectrogram --- .../visualize_probes.py | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 9d2afb91..2cbd9a94 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -8,6 +8,48 @@ from vampy.automatedPostprocessing.postprocessing_common import read_command_line + +def compute_energy_spectrum(num_probes, velocities_u, velocities_v, velocities_w): + import numpy as np + max_kes = 0 + + # Define FFT function + def get_fft(x): + n = len(x) + fft = np.fft.fft(x) + freq = np.fft.fftfreq(n) + return fft, freq + + # Create empty list to store kinetic energy spectrum + kinetic_energy_spectrum = [] + + # Iterate over each probe + for k in range(num_probes): + # FFT of the velocity signals + fft_u, freq_u = get_fft(velocities_u[k]) + fft_v, freq_v = get_fft(velocities_v[k]) + fft_w, freq_w = get_fft(velocities_w[k]) + + # Compute the energy spectrum (proportional to square of absolute fft values) + E_u = np.abs(fft_u) ** 2 + E_v = np.abs(fft_v) ** 2 + E_w = np.abs(fft_w) ** 2 + + # Summing up energy spectrum from all 3 velocity components + E_total = E_u + E_v + E_w + + # Store positive values of total kinetic energy spectrum + positive_indices = freq_u > 0 + freq = freq_u[positive_indices] + kinetic_energy_spectrum.append(E_total[positive_indices]) + max_kes = np.max(kinetic_energy_spectrum) if np.max(kinetic_energy_spectrum) > max_kes else max_kes + + # Convert list to numpy array + kinetic_energy_spectrum = np.array(kinetic_energy_spectrum) + + return kinetic_energy_spectrum, freq, max_kes + + def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, save_figure=False): """ Loads probe points from completed CFD simulation, @@ -163,6 +205,9 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke + kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(num_probes, velocities_u, velocities_v, + velocities_w) + # Generate plots for k in range(num_probes): # Create subplots @@ -232,6 +277,61 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, print("-- Plotting kinetic energy and turbulent kinetic energy at probes") plt.show() + # Generate plots + for k in range(num_probes): + # Create subplots + ax0 = plt.subplot(num_rows, num_cols, k + 1) + + ax0.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") + ax0.set_xscale("log") + ax0.set_yscale("log") + + # Set axis limits (y-dir) + ax0.set_ylim(None, max_kes * 1.1) + + # Set axis labels + ax0.set_ylabel("E(k)", fontsize=12, color=colors[0]) + ax0.set_xlabel("k", fontsize=12) + + # Color axis ticks + ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + + # Set title to probe number + ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + + if save_figure: + save_path = path.join(case_path, "energy_spectrum.png") + print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) + plt.savefig(save_path) + + if show_figure: + print("-- Plotting energy spectrm at probes") + plt.show() + + # Generate plots + for k in range(num_probes): + # Create subplots + ax0 = plt.subplot(num_rows, num_cols, k + 1) + + # Filter out negative values + ax0.specgram(velocities_u[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") + + # Set axis labels + ax0.set_ylabel("Frequency [Hz]", fontsize=12) + ax0.set_xlabel("Time [ms]", fontsize=12) + + # Set title to probe number + ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + + if save_figure: + save_path = path.join(case_path, "spectrogram.png") + print("-- Saving figure of spectrogram at probes at {}".format(save_path)) + plt.savefig(save_path) + + if show_figure: + print("-- Plotting spectrogram at probes") + plt.show() + # Clear plotting figure plt.clf() From 7d491c3503eaa6011c8c1b0ddbcf3bc50fd5c071 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 13:36:59 +0200 Subject: [PATCH 03/23] Refactor viz components into methods --- .../visualize_probes.py | 480 +++++++++--------- 1 file changed, 253 insertions(+), 227 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 2cbd9a94..753525b5 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -7,47 +7,8 @@ from vampy.automatedPostprocessing.postprocessing_common import read_command_line - - -def compute_energy_spectrum(num_probes, velocities_u, velocities_v, velocities_w): - import numpy as np - max_kes = 0 - - # Define FFT function - def get_fft(x): - n = len(x) - fft = np.fft.fft(x) - freq = np.fft.fftfreq(n) - return fft, freq - - # Create empty list to store kinetic energy spectrum - kinetic_energy_spectrum = [] - - # Iterate over each probe - for k in range(num_probes): - # FFT of the velocity signals - fft_u, freq_u = get_fft(velocities_u[k]) - fft_v, freq_v = get_fft(velocities_v[k]) - fft_w, freq_w = get_fft(velocities_w[k]) - - # Compute the energy spectrum (proportional to square of absolute fft values) - E_u = np.abs(fft_u) ** 2 - E_v = np.abs(fft_v) ** 2 - E_w = np.abs(fft_w) ** 2 - - # Summing up energy spectrum from all 3 velocity components - E_total = E_u + E_v + E_w - - # Store positive values of total kinetic energy spectrum - positive_indices = freq_u > 0 - freq = freq_u[positive_indices] - kinetic_energy_spectrum.append(E_total[positive_indices]) - max_kes = np.max(kinetic_energy_spectrum) if np.max(kinetic_energy_spectrum) > max_kes else max_kes - - # Convert list to numpy array - kinetic_energy_spectrum = np.array(kinetic_energy_spectrum) - - return kinetic_energy_spectrum, freq, max_kes +# Plotting variables +colors = ['red', 'blue', 'purple'] def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, save_figure=False): @@ -64,189 +25,94 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, show_figure (bool): Shows figure if True save_figure (bool): Saves figure if True """ - # Plotting variables - colors = ['red', 'blue', 'purple'] - max_U = 0 - max_P = 0 - max_ke = 0 - max_tke = 0 - num_probes = 0 - num_rows = 0 - num_cols = 0 - velocities = [] - velocities_u = [] - velocities_v = [] - velocities_w = [] - pressures = [] - - # Load and plot probes - counter = 0 - while True: - tstep = probe_saving_frequency * (counter + 1) + max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ + = load_probes(case_path, probe_saving_frequency) - # Load velocity component and pressure probes - u_path = path.join(case_path, "u_x_{}.probes".format(tstep)) - v_path = path.join(case_path, "u_y_{}.probes".format(tstep)) - w_path = path.join(case_path, "u_z_{}.probes".format(tstep)) - p_path = path.join(case_path, "p_{}.probes".format(tstep)) - - try: - p = np.load(p_path, allow_pickle=True) - u = np.load(u_path, allow_pickle=True) - v = np.load(v_path, allow_pickle=True) - w = np.load(w_path, allow_pickle=True) - except Exception: - print("-- Finished reading in probes") - break + # Generate probe based flow quantities + kinetic_energy, max_ke, max_tke, mean_velocity, turbulent_kinetic_energy \ + = compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, + velocity_v, velocity_w) - # Create velocity magnitude - U = np.linalg.norm([u, v, w], axis=0) - if counter == 0: - # Set plot parameters - num_probes = u.shape[0] - num_rows = 5 - num_cols = int(np.ceil(num_probes / num_rows)) + kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, + velocity_w) - # Find velocity and pressure for scaling windows - max_U = np.max(U) if np.max(U) > max_U else max_U - p_id = int(0.1 * probe_saving_frequency) # Avoid scaling from initial pressure - max_P = np.max(p[:, p_id:]) if np.max(p[:, p_id:]) > max_P else max_P + plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, + n_rows, pressures, save_figure, show_figure, velocity) - # Store values in lists for plotting - for k in range(num_probes): - if counter == 0: - velocities.append(list(U[k])) - velocities_u.append(list(u[k])) - velocities_v.append(list(v[k])) - velocities_w.append(list(w[k])) - pressures.append(list(p[k])) - else: - velocities[k] += list(U[k]) - velocities_u[k] += list(u[k]) - velocities_v[k] += list(v[k]) - velocities_w[k] += list(w[k]) - pressures[k] += list(p[k]) + plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, + save_figure, show_figure, turbulent_kinetic_energy) - counter += 1 + plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_cols, n_probes, n_rows, + save_figure, show_figure) - # Get number of timesteps / store values - velocities = np.array(velocities) - velocities_u = np.array(velocities_u) - velocities_v = np.array(velocities_v) - velocities_w = np.array(velocities_w) - pressures = np.array(pressures) + plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figure, velocity_u) - # FIXME: Remove - n_stop = 2853 - velocities = velocities[:, :n_stop] - velocities_u = velocities_u[:, :n_stop] - velocities_v = velocities_v[:, :n_stop] - velocities_w = velocities_w[:, :n_stop] - pressures = pressures[:, :n_stop] - # - if len(velocities[0]) > 0: - n_timesteps = velocities.shape[1] - else: - print("-- No data to visualize") - exit() - - # Generate probe based flow quantities - # FIXME: Revert - saved_points_per_cycle = 951 # int(T / dt) - # - n_cycles = int(n_timesteps / saved_points_per_cycle) - - mean_velocities = np.zeros((num_probes, n_timesteps)) - mean_velocities_u = np.zeros((num_probes, n_timesteps)) - mean_velocities_v = np.zeros((num_probes, n_timesteps)) - mean_velocities_w = np.zeros((num_probes, n_timesteps)) - kinetic_energy = np.zeros((num_probes, n_timesteps)) - turbulent_kinetic_energy = np.zeros((num_probes, n_timesteps)) - for k in range(num_probes): - U = velocities[k] - u = velocities_u[k] - v = velocities_v[k] - w = velocities_w[k] - u_mean = 0 - u_mean_u = 0 - u_mean_v = 0 - u_mean_w = 0 - for n in range(n_cycles): - u_mean += U[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_u += u[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_v += v[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_w += w[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - - u_mean_u = np.array(u_mean_u) - u_mean_v = np.array(u_mean_v) - u_mean_w = np.array(u_mean_w) + # Clear plotting figure + plt.clf() - u_mean /= n_cycles - u_mean_u /= n_cycles - u_mean_v /= n_cycles - u_mean_w /= n_cycles - mean_velocities[k] = list(u_mean) * n_cycles - mean_velocities_u[k] = list(u_mean_u) * n_cycles - mean_velocities_v[k] = list(u_mean_v) * n_cycles - mean_velocities_w[k] = list(u_mean_w) * n_cycles - # Calculate total kinetic energy - kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 3) +def plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figure, velocity_u): + # Generate plots + for k in range(n_probes): + # Create subplots + ax0 = plt.subplot(n_rows, n_cols, k + 1) - # Calculate fluctuating velocities - fluctuating_velocities_u = u - mean_velocities_u[k] - fluctuating_velocities_v = v - mean_velocities_v[k] - fluctuating_velocities_w = w - mean_velocities_w[k] + # Filter out negative values + ax0.specgram(velocity_u[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") - turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocities_u ** 2) + - (fluctuating_velocities_v ** 2) + - (fluctuating_velocities_w ** 2)) + # Set axis labels + ax0.set_ylabel("Frequency [Hz]", fontsize=12) + ax0.set_xlabel("Time [ms]", fontsize=12) - max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke - max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke + # Set title to probe number + ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + if save_figure: + save_path = path.join(case_path, "spectrogram.png") + print("-- Saving figure of spectrogram at probes at {}".format(save_path)) + plt.savefig(save_path) + if show_figure: + print("-- Plotting spectrogram at probes") + plt.show() - kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(num_probes, velocities_u, velocities_v, - velocities_w) +def plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_cols, n_probes, n_rows, + save_figure, show_figure): # Generate plots - for k in range(num_probes): + for k in range(n_probes): # Create subplots - ax0 = plt.subplot(num_rows, num_cols, k + 1) - ax1 = ax0.twinx() - time_interval = np.linspace(0, n_timesteps, n_timesteps) - ax0.plot(time_interval, velocities[k], color=colors[0], label="") - ax0.plot(time_interval, mean_velocities[k], color=colors[2], linestyle='--', label="") - ax1.plot(time_interval, pressures[k], color=colors[1], label="") + ax0 = plt.subplot(n_rows, n_cols, k + 1) + + ax0.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") + ax0.set_xscale("log") + ax0.set_yscale("log") # Set axis limits (y-dir) - ax0.set_ylim(-1E-2, max_U * 1.5) - ax1.set_ylim(-1E-2, max_P * 1.5) + ax0.set_ylim(None, max_kes * 1.1) # Set axis labels - ax0.set_ylabel("Velocity [m/s]", fontsize=12, color=colors[0]) - ax1.set_ylabel("Pressure [Pa]", fontsize=12, color=colors[1]) - ax0.set_xlabel("Time [s]", fontsize=12) + ax0.set_ylabel("E(k)", fontsize=12, color=colors[0]) + ax0.set_xlabel("k", fontsize=12) # Color axis ticks ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - ax1.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) # Set title to probe number ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "velocity_and_pressure.png") - print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) + save_path = path.join(case_path, "energy_spectrum.png") + print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) plt.savefig(save_path) - if show_figure: - print("-- Plotting velocity magnitude and pressure at probes") + print("-- Plotting energy spectrm at probes") plt.show() + +def plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, + save_figure, show_figure, turbulent_kinetic_energy): # Generate plots for kinetic energy - for k in range(num_probes): + for k in range(n_probes): # Create subplots - ax0 = plt.subplot(num_rows, num_cols, k + 1) + ax0 = plt.subplot(n_rows, n_cols, k + 1) ax1 = ax0.twinx() time_interval = np.linspace(0, n_timesteps, n_timesteps) ax0.plot(time_interval, kinetic_energy[k], color=colors[0], label="") @@ -267,73 +133,233 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, # Set title to probe number ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: save_path = path.join(case_path, "kinetic_energy.png") print("-- Saving figure of kinetic energy and turbulent kinetic energy at probes at {}".format(save_path)) plt.savefig(save_path) - if show_figure: print("-- Plotting kinetic energy and turbulent kinetic energy at probes") plt.show() + +def plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, + n_rows, pressures, save_figure, show_figure, velocity): # Generate plots - for k in range(num_probes): + for k in range(n_probes): # Create subplots - ax0 = plt.subplot(num_rows, num_cols, k + 1) - - ax0.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") - ax0.set_xscale("log") - ax0.set_yscale("log") + ax0 = plt.subplot(n_rows, n_cols, k + 1) + ax1 = ax0.twinx() + time_interval = np.linspace(0, n_timesteps, n_timesteps) + ax0.plot(time_interval, velocity[k], color=colors[0], label="") + ax0.plot(time_interval, mean_velocity[k], color=colors[2], linestyle='--', label="") + ax1.plot(time_interval, pressures[k], color=colors[1], label="") # Set axis limits (y-dir) - ax0.set_ylim(None, max_kes * 1.1) + ax0.set_ylim(-1E-2, max_U * 1.5) + ax1.set_ylim(-1E-2, max_P * 1.5) # Set axis labels - ax0.set_ylabel("E(k)", fontsize=12, color=colors[0]) - ax0.set_xlabel("k", fontsize=12) + ax0.set_ylabel("Velocity [m/s]", fontsize=12, color=colors[0]) + ax1.set_ylabel("Pressure [Pa]", fontsize=12, color=colors[1]) + ax0.set_xlabel("Time [s]", fontsize=12) # Color axis ticks ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + ax1.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) # Set title to probe number ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "energy_spectrum.png") - print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) + save_path = path.join(case_path, "velocity_and_pressure.png") + print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) plt.savefig(save_path) - if show_figure: - print("-- Plotting energy spectrm at probes") + print("-- Plotting velocity magnitude and pressure at probes") plt.show() - # Generate plots - for k in range(num_probes): - # Create subplots - ax0 = plt.subplot(num_rows, num_cols, k + 1) - # Filter out negative values - ax0.specgram(velocities_u[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") +def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, + velocity_v, velocity_w): + max_ke = 0 + max_tke = 0 + # FIXME: Revert + saved_points_per_cycle = int(T / dt) + saved_points_per_cycle = 951 + n_cycles = int(n_timesteps / saved_points_per_cycle) + mean_velocity = np.zeros((n_probes, n_timesteps)) + mean_velocity_u = np.zeros((n_probes, n_timesteps)) + mean_velocity_v = np.zeros((n_probes, n_timesteps)) + mean_velocity_w = np.zeros((n_probes, n_timesteps)) + kinetic_energy = np.zeros((n_probes, n_timesteps)) + turbulent_kinetic_energy = np.zeros((n_probes, n_timesteps)) + for k in range(n_probes): + U = velocity[k] + u = velocity_u[k] + v = velocity_v[k] + w = velocity_w[k] + u_mean = 0 + u_mean_u = 0 + u_mean_v = 0 + u_mean_w = 0 + for n in range(n_cycles): + u_mean += U[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_u += u[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_v += v[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] + u_mean_w += w[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - # Set axis labels - ax0.set_ylabel("Frequency [Hz]", fontsize=12) - ax0.set_xlabel("Time [ms]", fontsize=12) + u_mean_u = np.array(u_mean_u) + u_mean_v = np.array(u_mean_v) + u_mean_w = np.array(u_mean_w) - # Set title to probe number - ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + u_mean /= n_cycles + u_mean_u /= n_cycles + u_mean_v /= n_cycles + u_mean_w /= n_cycles + mean_velocity[k] = list(u_mean) * n_cycles + mean_velocity_u[k] = list(u_mean_u) * n_cycles + mean_velocity_v[k] = list(u_mean_v) * n_cycles + mean_velocity_w[k] = list(u_mean_w) * n_cycles - if save_figure: - save_path = path.join(case_path, "spectrogram.png") - print("-- Saving figure of spectrogram at probes at {}".format(save_path)) - plt.savefig(save_path) + # Calculate total kinetic energy + kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 3) - if show_figure: - print("-- Plotting spectrogram at probes") - plt.show() + # Calculate fluctuating velocity + fluctuating_velocity_u = u - mean_velocity_u[k] + fluctuating_velocity_v = v - mean_velocity_v[k] + fluctuating_velocity_w = w - mean_velocity_w[k] - # Clear plotting figure - plt.clf() + turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocity_u ** 2) + + (fluctuating_velocity_v ** 2) + + (fluctuating_velocity_w ** 2)) + + max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke + max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke + return kinetic_energy, max_ke, max_tke, mean_velocity, turbulent_kinetic_energy + + +def load_probes(case_path, probe_saving_frequency): + max_U = 0 + max_P = 0 + n_probes = 0 + n_rows = 0 + n_cols = 0 + velocity = [] + velocity_u = [] + velocity_v = [] + velocity_w = [] + pressures = [] + # Load and plot probes + counter = 0 + while True: + tstep = probe_saving_frequency * (counter + 1) + + # Load velocity component and pressure probes + u_path = path.join(case_path, "u_x_{}.probes".format(tstep)) + v_path = path.join(case_path, "u_y_{}.probes".format(tstep)) + w_path = path.join(case_path, "u_z_{}.probes".format(tstep)) + p_path = path.join(case_path, "p_{}.probes".format(tstep)) + + try: + p = np.load(p_path, allow_pickle=True) + u = np.load(u_path, allow_pickle=True) + v = np.load(v_path, allow_pickle=True) + w = np.load(w_path, allow_pickle=True) + except Exception: + print("-- Finished reading in probes") + break + + # Create velocity magnitude + U = np.linalg.norm([u, v, w], axis=0) + if counter == 0: + # Set plot parameters + n_probes = u.shape[0] + n_rows = 5 + n_cols = int(np.ceil(n_probes / n_rows)) + + # Find velocity and pressure for scaling windows + max_U = np.max(U) if np.max(U) > max_U else max_U + p_id = int(0.1 * probe_saving_frequency) # Avoid scaling from initial pressure + max_P = np.max(p[:, p_id:]) if np.max(p[:, p_id:]) > max_P else max_P + + # Store values in lists for plotting + for k in range(n_probes): + if counter == 0: + velocity.append(list(U[k])) + velocity_u.append(list(u[k])) + velocity_v.append(list(v[k])) + velocity_w.append(list(w[k])) + pressures.append(list(p[k])) + else: + velocity[k] += list(U[k]) + velocity_u[k] += list(u[k]) + velocity_v[k] += list(v[k]) + velocity_w[k] += list(w[k]) + pressures[k] += list(p[k]) + + counter += 1 + # Get number of timesteps / store values + velocity = np.array(velocity) + velocity_u = np.array(velocity_u) + velocity_v = np.array(velocity_v) + velocity_w = np.array(velocity_w) + pressures = np.array(pressures) + # FIXME: Remove + n_stop = 2853 + velocity = velocity[:, :n_stop] + velocity_u = velocity_u[:, :n_stop] + velocity_v = velocity_v[:, :n_stop] + velocity_w = velocity_w[:, :n_stop] + pressures = pressures[:, :n_stop] + + # Check if data is available + if len(velocity[0]) > 0: + n_timesteps = velocity.shape[1] + else: + print("-- No data to visualize. Exiting..") + exit() + + return max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps + + +def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): + import numpy as np + max_kes = 0 + + # Define FFT function + def get_fft(x): + n = len(x) + fft = np.fft.fft(x) + freq = np.fft.fftfreq(n) + return fft, freq + + # Create empty list to store kinetic energy spectrum + kinetic_energy_spectrum = [] + + # Iterate over each probe + for k in range(n_probes): + # FFT of the velocity signals + fft_u, freq_u = get_fft(velocity_u[k]) + fft_v, freq_v = get_fft(velocity_v[k]) + fft_w, freq_w = get_fft(velocity_w[k]) + + # Compute the energy spectrum (proportional to square of absolute fft values) + E_u = np.abs(fft_u) ** 2 + E_v = np.abs(fft_v) ** 2 + E_w = np.abs(fft_w) ** 2 + + # Summing up energy spectrum from all 3 velocity components + E_total = E_u + E_v + E_w + + # Store positive values of total kinetic energy spectrum + positive_indices = freq_u > 0 + freq = freq_u[positive_indices] + kinetic_energy_spectrum.append(E_total[positive_indices]) + max_kes = np.max(kinetic_energy_spectrum) if np.max(kinetic_energy_spectrum) > max_kes else max_kes + + # Convert list to numpy array + kinetic_energy_spectrum = np.array(kinetic_energy_spectrum) + + return kinetic_energy_spectrum, freq, max_kes def main_probe(): From 7621c8f87e5b7441e1cd11b55206128a945cbed9 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 13:57:57 +0200 Subject: [PATCH 04/23] Add docstrings to all methods --- .../visualize_probes.py | 167 ++++++++++++++---- 1 file changed, 137 insertions(+), 30 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 753525b5..fec3e70a 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -1,5 +1,3 @@ -from __future__ import print_function - from os import path import matplotlib.pyplot as plt @@ -11,33 +9,30 @@ colors = ['red', 'blue', 'purple'] -def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, save_figure=False): +def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, show_figure=True, save_figure=False): """ - Loads probe points from completed CFD simulation, - and visualizes velocity (magnitude) and pressure at respective probes - Assuming results are in mm/s. + Loads probe points from completed CFD simulation and visualizes velocity (magnitude) + and pressure at respective probes. Assuming results are in mm/s. Args: + case_path (str): Path to results from simulation. probe_saving_frequency (int): Interval between saving probes. - case_path (str): Path to results from simulation - dt (float): Time step of simulation - T (float): One cardiac cycle, in [ms] - show_figure (bool): Shows figure if True - save_figure (bool): Saves figure if True + T (float): One cardiac cycle, in [ms]. + dt (float): Time step of simulation. + show_figure (bool): Shows figure if True. + save_figure (bool): Saves figure if True. """ - max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ - = load_probes(case_path, probe_saving_frequency) - # Generate probe based flow quantities - kinetic_energy, max_ke, max_tke, mean_velocity, turbulent_kinetic_energy \ - = compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, - velocity_v, velocity_w) + max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps = load_probes( + case_path, probe_saving_frequency) + + mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke = compute_mean_velocity_and_kinetic_energy( + T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w) - kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, - velocity_w) + kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w) - plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, - n_rows, pressures, save_figure, show_figure, velocity) + plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, n_rows, + pressures, save_figure, show_figure, velocity) plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, save_figure, show_figure, turbulent_kinetic_energy) @@ -52,6 +47,18 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, show_figure=True, def plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figure, velocity_u): + """ + Plots spectrogram at probe points. + + Args: + case_path (str): Path to results from simulation. + n_cols (int): Number of columns in the plot grid. + n_probes (int): Number of probe points. + n_rows (int): Number of rows in the plot grid. + save_figure (bool): Saves figure if True. + show_figure (bool): Shows figure if True. + velocity_u (numpy.ndarray): Velocity data. + """ # Generate plots for k in range(n_probes): # Create subplots @@ -77,6 +84,20 @@ def plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figu def plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_cols, n_probes, n_rows, save_figure, show_figure): + """ + Plots energy spectrum at probe points. + + Args: + case_path (str): Path to results from simulation. + freq (numpy.ndarray): Frequency data. + kinetic_energy_spectrum (numpy.ndarray): Kinetic energy spectrum data. + max_kes (float): Maximum value in the kinetic energy spectrum. + n_cols (int): Number of columns in the plot grid. + n_probes (int): Number of probe points. + n_rows (int): Number of rows in the plot grid. + save_figure (bool): Saves figure if True. + show_figure (bool): Shows figure if True. + """ # Generate plots for k in range(n_probes): # Create subplots @@ -103,12 +124,28 @@ def plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_co print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) plt.savefig(save_path) if show_figure: - print("-- Plotting energy spectrm at probes") + print("-- Plotting energy spectrum at probes") plt.show() def plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, save_figure, show_figure, turbulent_kinetic_energy): + """ + Plots kinetic energy and turbulent kinetic energy at probe points. + + Args: + case_path (str): Path to results from simulation. + kinetic_energy (numpy.ndarray): Kinetic energy data. + max_ke (float): Maximum kinetic energy value. + max_tke (float): Maximum turbulent kinetic energy value. + n_timesteps (int): Number of time steps. + n_cols (int): Number of columns in the plot grid. + n_probes (int): Number of probe points. + n_rows (int): Number of rows in the plot grid. + save_figure (bool): Saves figure if True. + show_figure (bool): Shows figure if True. + turbulent_kinetic_energy (numpy.ndarray): Turbulent kinetic energy data. + """ # Generate plots for kinetic energy for k in range(n_probes): # Create subplots @@ -144,6 +181,23 @@ def plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, def plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, n_rows, pressures, save_figure, show_figure, velocity): + """ + Plots velocity magnitude and pressure at probe points. + + Args: + case_path (str): Path to results from simulation. + max_P (float): Maximum pressure value. + max_U (float): Maximum velocity value. + mean_velocity (numpy.ndarray): Mean velocity data. + n_timesteps (int): Number of time steps. + n_cols (int): Number of columns in the plot grid. + n_probes (int): Number of probe points. + n_rows (int): Number of rows in the plot grid. + pressures (numpy.ndarray): Pressure data. + save_figure (bool): Saves figure if True. + show_figure (bool): Shows figure if True. + velocity (numpy.ndarray): Velocity data. + """ # Generate plots for k in range(n_probes): # Create subplots @@ -178,8 +232,28 @@ def plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timeste plt.show() -def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, - velocity_v, velocity_w): +def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, + velocity_w): + """ + Computes mean velocity and kinetic energy at probe points. + + Args: + T (float): One cardiac cycle, in [ms]. + dt (float): Time step of simulation. + n_timesteps (int): Number of time steps. + n_probes (int): Number of probe points. + velocity (numpy.ndarray): Velocity data. + velocity_u (numpy.ndarray): Velocity component 'u' data. + velocity_v (numpy.ndarray): Velocity component 'v' data. + velocity_w (numpy.ndarray): Velocity component 'w' data. + + Returns: + numpy.ndarray: Kinetic energy data. + float: Maximum kinetic energy value. + float: Maximum turbulent kinetic energy value. + numpy.ndarray: Mean velocity data. + numpy.ndarray: Turbulent kinetic energy data. + """ max_ke = 0 max_tke = 0 # FIXME: Revert @@ -234,10 +308,30 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke - return kinetic_energy, max_ke, max_tke, mean_velocity, turbulent_kinetic_energy + return mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke def load_probes(case_path, probe_saving_frequency): + """ + Loads probe data from a completed CFD simulation. + + Args: + case_path (str): Path to results from simulation. + probe_saving_frequency (int): Interval between saving probes. + + Returns: + float: Maximum pressure value. + float: Maximum velocity value. + int: Number of columns in the plot grid. + int: Number of probe points. + int: Number of rows in the plot grid. + numpy.ndarray: Pressure data. + numpy.ndarray: Velocity data. + numpy.ndarray: Velocity component 'u' data. + numpy.ndarray: Velocity component 'v' data. + numpy.ndarray: Velocity component 'w' data. + int: Number of time steps. + """ max_U = 0 max_P = 0 n_probes = 0 @@ -322,7 +416,20 @@ def load_probes(case_path, probe_saving_frequency): def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): - import numpy as np + """ + Computes the energy spectrum for each probe based on velocity components. + + Args: + n_probes (int): Number of probe points. + velocity_u (numpy.ndarray): Velocity component 'u' data for each probe. + velocity_v (numpy.ndarray): Velocity component 'v' data for each probe. + velocity_w (numpy.ndarray): Velocity component 'w' data for each probe. + + Returns: + numpy.ndarray: Kinetic energy spectrum for each probe. + numpy.ndarray: Frequency values. + float: Maximum value in the kinetic energy spectrum. + """ max_kes = 0 # Define FFT function @@ -363,10 +470,10 @@ def get_fft(x): def main_probe(): - folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _ = read_command_line() - visualize_probes(folder, probe_freq, T, dt, save_figure=True) + # Read command line arguments + folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _, probes_to_plot = read_command_line() + visualize_probes(folder, probe_freq, T, dt, probes_to_plot, save_figure=True) if __name__ == '__main__': - folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _ = read_command_line() - visualize_probes(folder, probe_freq, T, dt, save_figure=True) + main_probe() From c372ad7fdd0d2421f96cb0290727cb959d8abeda Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 13:58:22 +0200 Subject: [PATCH 05/23] Add probes-to-plot argument --- .../compute_flow_and_simulation_metrics.py | 9 +++------ .../compute_hemodynamic_indices.py | 8 ++------ .../compute_velocity_and_pressure.py | 5 ++--- .../automatedPostprocessing/postprocessing_common.py | 11 +++++++++-- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py index 6e66728f..fd369734 100755 --- a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py +++ b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py @@ -3,6 +3,7 @@ import numpy as np from dolfin import FunctionSpace, Function, MPI, VectorFunctionSpace, Timer, project, sqrt, inner, HDF5File, XDMFFile, \ assign, CellDiameter, Mesh, TestFunction, list_timings, TimingClear, TimingType + from vampy.automatedPostprocessing.postprocessing_common import read_command_line, get_dataset_names, \ rate_of_dissipation, rate_of_strain @@ -518,15 +519,11 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, def main_metrics(): folder, nu, _, dt, velocity_degree, _, _, T, save_frequency, times_to_average, start_cycle, step, \ - average_over_cycles = read_command_line() + average_over_cycles, _ = read_command_line() compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, times_to_average, save_frequency, start_cycle, step, average_over_cycles) if __name__ == '__main__': - folder, nu, _, dt, velocity_degree, _, _, T, save_frequency, times_to_average, start_cycle, step, \ - average_over_cycles = read_command_line() - - compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles) + main_metrics() diff --git a/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py b/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py index 8ab57d2b..4205fb23 100644 --- a/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py +++ b/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py @@ -254,7 +254,7 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr def main_hemo(): - folder, nu, rho, dt, velocity_degree, _, _, T, save_frequency, _, start_cycle, step, average_over_cycles \ + folder, nu, rho, dt, velocity_degree, _, _, T, save_frequency, _, start_cycle, step, average_over_cycles, _ \ = read_command_line() compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, @@ -262,8 +262,4 @@ def main_hemo(): if __name__ == '__main__': - folder, nu, rho, dt, velocity_degree, _, _, T, save_frequency, _, start_cycle, step, average_over_cycles \ - = read_command_line() - - compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, - step, average_over_cycles) + main_hemo() diff --git a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py index 42d6a31d..a24b8eae 100644 --- a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py +++ b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py @@ -92,10 +92,9 @@ def compute_velocity_and_pressure(case_path, dt, velocity_degree, pressure_degre def main_convert(): - folder, _, _, dt, velocity_degree, pressure_degree, _, _, _, _, _, step, _ = read_command_line() + folder, _, _, dt, velocity_degree, pressure_degree, _, _, _, _, _, step, _, _ = read_command_line() compute_velocity_and_pressure(folder, dt, velocity_degree, pressure_degree, step) if __name__ == '__main__': - folder, _, _, dt, velocity_degree, pressure_degree, _, _, _, _, _, step, _ = read_command_line() - compute_velocity_and_pressure(folder, dt, velocity_degree, pressure_degree, step) + main_convert() diff --git a/src/vampy/automatedPostprocessing/postprocessing_common.py b/src/vampy/automatedPostprocessing/postprocessing_common.py index 939889c0..e72e244d 100644 --- a/src/vampy/automatedPostprocessing/postprocessing_common.py +++ b/src/vampy/automatedPostprocessing/postprocessing_common.py @@ -67,6 +67,13 @@ def read_command_line(): nargs="+", help="Time(s) during cardiac cycle to average, in the interval [0,T). Measured in [ms].") + parser.add_argument('-pp', '--probes-to-plot', + type=int, + default=[], + nargs="+", + help="List of integers corresponding to probe numbers. " + + "The probes are to visualized in separate probe plots. ") + parser.add_argument('-sc', '--start-cycle', type=int, default=2, @@ -85,8 +92,8 @@ def read_command_line(): args = parser.parse_args() return args.case, args.nu, args.rho, args.dt, args.velocity_degree, args.pressure_degree, args.probe_frequency, \ - args.T, args.save_frequency, args.times_to_average, args.start_cycle, args.sample_step, \ - args.average_over_cycles + args.T, args.save_frequency, args.times_to_average, args.start_cycle, args.sample_step, \ + args.average_over_cycles, args.probes_to_plot def epsilon(u): From 7defd3dc420c3769ed2eb9eb775cbe94e26edc18 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:26:21 +0200 Subject: [PATCH 06/23] Plot single probes in same plot --- .../visualize_probes.py | 280 +++++++++--------- 1 file changed, 142 insertions(+), 138 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index fec3e70a..bc7b6fd9 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -31,59 +31,98 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w) - plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, n_rows, - pressures, save_figure, show_figure, velocity) + if not probes_to_plot: + # Plot all probes in same plot - plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, - save_figure, show_figure, turbulent_kinetic_energy) - - plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_cols, n_probes, n_rows, - save_figure, show_figure) - - plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figure, velocity_u) - - # Clear plotting figure - plt.clf() + # Plot velocity + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) + + if save_figure: + save_path = path.join(case_path, "velocity_and_pressure.png") + print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) + plt.savefig(save_path) + if show_figure: + print("-- Plotting velocity magnitude and pressure at probes") + plt.show() + + # Plot kinetic energy + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) + + if save_figure: + save_path = path.join(case_path, "kinetic_energy.png") + print("-- Saving figure of kinetic energy and turbulent kinetic energy at probes at {}".format(save_path)) + plt.savefig(save_path) + if show_figure: + print("-- Plotting kinetic energy and turbulent kinetic energy at probes") + plt.show() + + # Plot energy spectrum + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes) + + if save_figure: + save_path = path.join(case_path, "energy_spectrum.png") + print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) + plt.savefig(save_path) + if show_figure: + print("-- Plotting energy spectrum at probes") + plt.show() + + # Plot spectrogram + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_spectrogram(k, ax, velocity_u) + if save_figure: + save_path = path.join(case_path, "spectrogram.png") + print("-- Saving figure of spectrogram at probes at {}".format(save_path)) + plt.savefig(save_path) + if show_figure: + print("-- Plotting spectrogram at probes") + plt.show() -def plot_spectrogram(case_path, n_cols, n_probes, n_rows, save_figure, show_figure, velocity_u): + else: + # Plot probes in separate plots + for k in probes_to_plot: + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) + plot_velocity_and_pressure(k, ax1, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) + plot_kinetic_energy(k, ax2, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) + plot_energy_spectrum(k, ax3, freq, kinetic_energy_spectrum, max_kes) + plot_spectrogram(k, ax4, velocity_u) + + if save_figure: + save_path = path.join(case_path, f"probes_{k}.png") + print(f"-- Saving figure for probes at {save_path}") + plt.savefig(save_path) + if show_figure: + print(f"-- Plotting quantities for probe {k}") + plt.show() + + +def plot_spectrogram(k, ax, velocity): """ Plots spectrogram at probe points. Args: - case_path (str): Path to results from simulation. - n_cols (int): Number of columns in the plot grid. - n_probes (int): Number of probe points. - n_rows (int): Number of rows in the plot grid. - save_figure (bool): Saves figure if True. - show_figure (bool): Shows figure if True. - velocity_u (numpy.ndarray): Velocity data. + velocity(numpy.ndarray): Velocity data. """ - # Generate plots - for k in range(n_probes): - # Create subplots - ax0 = plt.subplot(n_rows, n_cols, k + 1) - - # Filter out negative values - ax0.specgram(velocity_u[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") + # Filter out negative values + ax.specgram(velocity[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") - # Set axis labels - ax0.set_ylabel("Frequency [Hz]", fontsize=12) - ax0.set_xlabel("Time [ms]", fontsize=12) + # Set axis labels + ax.set_ylabel("Frequency [Hz]", fontsize=12) + ax.set_xlabel("Time [ms]", fontsize=12) - # Set title to probe number - ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "spectrogram.png") - print("-- Saving figure of spectrogram at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting spectrogram at probes") - plt.show() + # Set title to probe number + ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) -def plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_cols, n_probes, n_rows, - save_figure, show_figure): +def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): """ Plots energy spectrum at probe points. @@ -98,89 +137,63 @@ def plot_energy_spectrum(case_path, freq, kinetic_energy_spectrum, max_kes, n_co save_figure (bool): Saves figure if True. show_figure (bool): Shows figure if True. """ - # Generate plots - for k in range(n_probes): - # Create subplots - ax0 = plt.subplot(n_rows, n_cols, k + 1) - - ax0.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") - ax0.set_xscale("log") - ax0.set_yscale("log") + # Create subplots + ax.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") + ax.set_xscale("log") + ax.set_yscale("log") - # Set axis limits (y-dir) - ax0.set_ylim(None, max_kes * 1.1) + # Set axis limits + ax.set_ylim(None, max_kes * 1.1) + ax.set_xlim(min(freq), max(freq)) - # Set axis labels - ax0.set_ylabel("E(k)", fontsize=12, color=colors[0]) - ax0.set_xlabel("k", fontsize=12) + # Set axis labels + ax.set_ylabel("E(k)", fontsize=12, color=colors[0]) + ax.set_xlabel("k", fontsize=12) - # Color axis ticks - ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + # Color axis ticks + ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - # Set title to probe number - ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "energy_spectrum.png") - print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting energy spectrum at probes") - plt.show() + # Set title to probe number + ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) -def plot_kinetic_energy(case_path, kinetic_energy, max_ke, max_tke, n_timesteps, n_cols, n_probes, n_rows, - save_figure, show_figure, turbulent_kinetic_energy): +def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy): """ Plots kinetic energy and turbulent kinetic energy at probe points. Args: - case_path (str): Path to results from simulation. kinetic_energy (numpy.ndarray): Kinetic energy data. max_ke (float): Maximum kinetic energy value. max_tke (float): Maximum turbulent kinetic energy value. n_timesteps (int): Number of time steps. - n_cols (int): Number of columns in the plot grid. - n_probes (int): Number of probe points. - n_rows (int): Number of rows in the plot grid. - save_figure (bool): Saves figure if True. - show_figure (bool): Shows figure if True. turbulent_kinetic_energy (numpy.ndarray): Turbulent kinetic energy data. """ # Generate plots for kinetic energy - for k in range(n_probes): - # Create subplots - ax0 = plt.subplot(n_rows, n_cols, k + 1) - ax1 = ax0.twinx() - time_interval = np.linspace(0, n_timesteps, n_timesteps) - ax0.plot(time_interval, kinetic_energy[k], color=colors[0], label="") - ax0.plot(time_interval, turbulent_kinetic_energy[k], color=colors[1], label="") - - # Set axis limits (y-dir) - ax0.set_ylim(-1E-2, max_ke * 1.1) - ax1.set_ylim(-1E-2, max_tke * 1.1) - - # Set axis labels - ax0.set_ylabel("Kinetic energy [m$^2$/s$^2$]", fontsize=12, color=colors[0]) - ax1.set_ylabel("Turbulent kinetic\n energy [m$^2$/s$^2$]", fontsize=12, color=colors[1]) - ax0.set_xlabel("Time [s]", fontsize=12) - - # Color axis ticks - ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - ax1.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) - - # Set title to probe number - ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "kinetic_energy.png") - print("-- Saving figure of kinetic energy and turbulent kinetic energy at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting kinetic energy and turbulent kinetic energy at probes") - plt.show() - - -def plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timesteps, n_cols, n_probes, - n_rows, pressures, save_figure, show_figure, velocity): + # Create subplots + ax_twinx = ax.twinx() + time_interval = np.linspace(0, n_timesteps, n_timesteps) + ax.plot(time_interval, kinetic_energy[k], color=colors[0], label="") + ax.plot(time_interval, turbulent_kinetic_energy[k], color=colors[1], label="") + + # Set axis limits + ax.set_ylim(-1E-2, max_ke * 1.1) + ax_twinx.set_ylim(-1E-2, max_tke * 1.1) + ax.set_xlim(min(time_interval), max(time_interval)) + + # Set axis labels + ax.set_ylabel("Kinetic energy [m$^2$/s$^2$]", fontsize=12, color=colors[0]) + ax_twinx.set_ylabel("Turbulent kinetic\n energy [m$^2$/s$^2$]", fontsize=12, color=colors[1]) + ax.set_xlabel("Time [s]", fontsize=12) + + # Color axis ticks + ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) + + # Set title to probe number + ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + + +def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity): """ Plots velocity magnitude and pressure at probe points. @@ -199,37 +212,28 @@ def plot_velocity_and_pressure(case_path, max_P, max_U, mean_velocity, n_timeste velocity (numpy.ndarray): Velocity data. """ # Generate plots - for k in range(n_probes): - # Create subplots - ax0 = plt.subplot(n_rows, n_cols, k + 1) - ax1 = ax0.twinx() - time_interval = np.linspace(0, n_timesteps, n_timesteps) - ax0.plot(time_interval, velocity[k], color=colors[0], label="") - ax0.plot(time_interval, mean_velocity[k], color=colors[2], linestyle='--', label="") - ax1.plot(time_interval, pressures[k], color=colors[1], label="") - - # Set axis limits (y-dir) - ax0.set_ylim(-1E-2, max_U * 1.5) - ax1.set_ylim(-1E-2, max_P * 1.5) - - # Set axis labels - ax0.set_ylabel("Velocity [m/s]", fontsize=12, color=colors[0]) - ax1.set_ylabel("Pressure [Pa]", fontsize=12, color=colors[1]) - ax0.set_xlabel("Time [s]", fontsize=12) - - # Color axis ticks - ax0.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - ax1.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) - - # Set title to probe number - ax0.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - if save_figure: - save_path = path.join(case_path, "velocity_and_pressure.png") - print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting velocity magnitude and pressure at probes") - plt.show() + ax_twinx = ax.twinx() + time_interval = np.linspace(0, n_timesteps, n_timesteps) + ax.plot(time_interval, velocity[k], color=colors[0], label="") + ax.plot(time_interval, mean_velocity[k], color=colors[2], linestyle='--', label="") + ax_twinx.plot(time_interval, pressures[k], color=colors[1], label="") + + # Set axis limits + ax.set_ylim(-1E-2, max_U * 1.5) + ax_twinx.set_ylim(-1E-2, max_P * 1.5) + ax.set_xlim(min(time_interval), max(time_interval)) + + # Set axis labels + ax.set_ylabel("Velocity [m/s]", fontsize=12, color=colors[0]) + ax_twinx.set_ylabel("Pressure [Pa]", fontsize=12, color=colors[1]) + ax.set_xlabel("Time [s]", fontsize=12) + + # Color axis ticks + ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) + + # Set title to probe number + ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, From f5a8c9584b97df457df8ac4cd307f85cb38e3ba4 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:32:03 +0200 Subject: [PATCH 07/23] Update test --- tests/test_visualize_probes.py | 44 +++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/tests/test_visualize_probes.py b/tests/test_visualize_probes.py index 0f21fbbf..3cd14c94 100644 --- a/tests/test_visualize_probes.py +++ b/tests/test_visualize_probes.py @@ -3,22 +3,50 @@ from vampy.automatedPostprocessing.visualize_probes import visualize_probes -def test_visualize_probes(): +def test_visualize_all_probes(): # Path to test results and params + T = 100 + dt = 0.1 probe_path = "tests/test_data/results/Probes" probe_frequency = 100 - file_path = path.join(probe_path, "Probes.png") + u_p_path = path.join(probe_path, "velocity_and_pressure.png") + ke_path = path.join(probe_path, "kinetic_energy.png") + spectrum_path = path.join(probe_path, "energy_spectrum.png") + spectrogram_path = path.join(probe_path, "spectrogram.png") # Run post-processing - visualize_probes(probe_path, probe_frequency, save_figure=True, show_figure=False) + visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=[], save_figure=True, show_figure=False) # Check that output image exists - assert path.exists(file_path) - assert path.getsize(file_path) > 0 + for file_path in [u_p_path, ke_path, spectrum_path, spectrogram_path]: + assert path.exists(file_path) + assert path.getsize(file_path) > 0 - # Remove generatedoutput - remove(file_path) + # Remove generatedoutput + remove(file_path) + + +def test_visualize_two_probes(): + # Path to test results and params + probes = [1, 2] + T = 100 + dt = 0.1 + probe_path = "tests/test_data/results/Probes" + probe_frequency = 100 + + # Run post-processing + visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=probes, save_figure=True, show_figure=False) + + # Check that output image exists + for probe in probes: + results_path = path.join(probe_path, f"probes_{probe}.png") + assert path.exists(results_path) + assert path.getsize(results_path) > 0 + + # Remove generatedoutput + remove(results_path) if __name__ == "__main__": - test_visualize_probes() + test_visualize_all_probes() + test_visualize_two_probes() From c87e0083580e50a4a8dcf1076e92ead6e1c62645 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:32:22 +0200 Subject: [PATCH 08/23] Comment out hacks for testing --- .../automatedPostprocessing/visualize_probes.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index bc7b6fd9..20c1abe9 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -262,7 +262,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc max_tke = 0 # FIXME: Revert saved_points_per_cycle = int(T / dt) - saved_points_per_cycle = 951 + #saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) mean_velocity_u = np.zeros((n_probes, n_timesteps)) @@ -402,12 +402,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # FIXME: Remove - n_stop = 2853 - velocity = velocity[:, :n_stop] - velocity_u = velocity_u[:, :n_stop] - velocity_v = velocity_v[:, :n_stop] - velocity_w = velocity_w[:, :n_stop] - pressures = pressures[:, :n_stop] + # n_stop = 2853 + # velocity = velocity[:, :n_stop] + # velocity_u = velocity_u[:, :n_stop] + # velocity_v = velocity_v[:, :n_stop] + # velocity_w = velocity_w[:, :n_stop] + # pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 64e6b059656ce4447f31f61b9c3033fc7b3e82ef Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:40:24 +0200 Subject: [PATCH 09/23] Fix flake8 errors --- .../postprocessing_common.py | 4 ++-- .../visualize_probes.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/vampy/automatedPostprocessing/postprocessing_common.py b/src/vampy/automatedPostprocessing/postprocessing_common.py index e72e244d..1503eb5c 100644 --- a/src/vampy/automatedPostprocessing/postprocessing_common.py +++ b/src/vampy/automatedPostprocessing/postprocessing_common.py @@ -92,8 +92,8 @@ def read_command_line(): args = parser.parse_args() return args.case, args.nu, args.rho, args.dt, args.velocity_degree, args.pressure_degree, args.probe_frequency, \ - args.T, args.save_frequency, args.times_to_average, args.start_cycle, args.sample_step, \ - args.average_over_cycles, args.probes_to_plot + args.T, args.save_frequency, args.times_to_average, args.start_cycle, args.sample_step, \ + args.average_over_cycles, args.probes_to_plot def epsilon(u): diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 20c1abe9..2488a2c3 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -23,8 +23,8 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s save_figure (bool): Saves figure if True. """ - max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps = load_probes( - case_path, probe_saving_frequency) + max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ + = load_probes(case_path, probe_saving_frequency) mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke = compute_mean_velocity_and_kinetic_energy( T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w) @@ -262,7 +262,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc max_tke = 0 # FIXME: Revert saved_points_per_cycle = int(T / dt) - #saved_points_per_cycle = 951 + saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) mean_velocity_u = np.zeros((n_probes, n_timesteps)) @@ -402,12 +402,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # FIXME: Remove - # n_stop = 2853 - # velocity = velocity[:, :n_stop] - # velocity_u = velocity_u[:, :n_stop] - # velocity_v = velocity_v[:, :n_stop] - # velocity_w = velocity_w[:, :n_stop] - # pressures = pressures[:, :n_stop] + n_stop = 2853 + velocity = velocity[:, :n_stop] + velocity_u = velocity_u[:, :n_stop] + velocity_v = velocity_v[:, :n_stop] + velocity_w = velocity_w[:, :n_stop] + pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 6e7b0bcdf56ea2331879be7d06bdcf17f4f39b14 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:42:35 +0200 Subject: [PATCH 10/23] Uncomment testing hacks --- .../automatedPostprocessing/visualize_probes.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 2488a2c3..01d1d053 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -262,7 +262,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc max_tke = 0 # FIXME: Revert saved_points_per_cycle = int(T / dt) - saved_points_per_cycle = 951 + #saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) mean_velocity_u = np.zeros((n_probes, n_timesteps)) @@ -402,12 +402,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # FIXME: Remove - n_stop = 2853 - velocity = velocity[:, :n_stop] - velocity_u = velocity_u[:, :n_stop] - velocity_v = velocity_v[:, :n_stop] - velocity_w = velocity_w[:, :n_stop] - pressures = pressures[:, :n_stop] + # n_stop = 2853 + # velocity = velocity[:, :n_stop] + # velocity_u = velocity_u[:, :n_stop] + # velocity_v = velocity_v[:, :n_stop] + # velocity_w = velocity_w[:, :n_stop] + # pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 3ae9038089d76ef0025e65b4e591fc47f8341370 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 14:43:45 +0200 Subject: [PATCH 11/23] Fix flake8 --- src/vampy/automatedPostprocessing/visualize_probes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 01d1d053..c0752f60 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -262,7 +262,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc max_tke = 0 # FIXME: Revert saved_points_per_cycle = int(T / dt) - #saved_points_per_cycle = 951 + # saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) mean_velocity_u = np.zeros((n_probes, n_timesteps)) From 7ac928c19c2535a1b52ee8b5c8caf81ebec94215 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 15:15:35 +0200 Subject: [PATCH 12/23] Refactor code --- .../visualize_probes.py | 168 +++++++++--------- 1 file changed, 87 insertions(+), 81 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index c0752f60..eedceea6 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -104,19 +104,23 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s plt.show() -def plot_spectrogram(k, ax, velocity): +def plot_spectrogram(k, ax, velocity, color_map='jet', font_size=12): """ Plots spectrogram at probe points. Args: + k (int): Current probe index. + ax (matplotlib.axis): The matplotlib axis to plot on. velocity(numpy.ndarray): Velocity data. + color_map (str): Color map for the spectrogram plot. + font_size (int): Font size for the labels. """ # Filter out negative values - ax.specgram(velocity[k], NFFT=256, Fs=2, noverlap=128, cmap="jet") + ax.specgram(velocity[k], NFFT=256, Fs=2, noverlap=128, cmap=color_map) # Set axis labels - ax.set_ylabel("Frequency [Hz]", fontsize=12) - ax.set_xlabel("Time [ms]", fontsize=12) + ax.set_ylabel("Frequency [Hz]", fontsize=font_size) + ax.set_xlabel("Time [ms]", fontsize=font_size) # Set title to probe number ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) @@ -127,16 +131,13 @@ def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): Plots energy spectrum at probe points. Args: - case_path (str): Path to results from simulation. + k (int): Current probe index. + ax (matplotlib.axis): The matplotlib axis to plot on. freq (numpy.ndarray): Frequency data. kinetic_energy_spectrum (numpy.ndarray): Kinetic energy spectrum data. max_kes (float): Maximum value in the kinetic energy spectrum. - n_cols (int): Number of columns in the plot grid. - n_probes (int): Number of probe points. - n_rows (int): Number of rows in the plot grid. - save_figure (bool): Saves figure if True. - show_figure (bool): Shows figure if True. """ + # Create subplots ax.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") ax.set_xscale("log") @@ -162,6 +163,8 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur Plots kinetic energy and turbulent kinetic energy at probe points. Args: + k (int): Current probe index. + ax (matplotlib.axis): The matplotlib axis to plot on. kinetic_energy (numpy.ndarray): Kinetic energy data. max_ke (float): Maximum kinetic energy value. max_tke (float): Maximum turbulent kinetic energy value. @@ -169,7 +172,6 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur turbulent_kinetic_energy (numpy.ndarray): Turbulent kinetic energy data. """ # Generate plots for kinetic energy - # Create subplots ax_twinx = ax.twinx() time_interval = np.linspace(0, n_timesteps, n_timesteps) ax.plot(time_interval, kinetic_energy[k], color=colors[0], label="") @@ -198,17 +200,13 @@ def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, Plots velocity magnitude and pressure at probe points. Args: - case_path (str): Path to results from simulation. + k (int): Current probe index. + ax (matplotlib.axis): The matplotlib axis to plot on. max_P (float): Maximum pressure value. max_U (float): Maximum velocity value. mean_velocity (numpy.ndarray): Mean velocity data. n_timesteps (int): Number of time steps. - n_cols (int): Number of columns in the plot grid. - n_probes (int): Number of probe points. - n_rows (int): Number of rows in the plot grid. pressures (numpy.ndarray): Pressure data. - save_figure (bool): Saves figure if True. - show_figure (bool): Shows figure if True. velocity (numpy.ndarray): Velocity data. """ # Generate plots @@ -258,16 +256,12 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc numpy.ndarray: Mean velocity data. numpy.ndarray: Turbulent kinetic energy data. """ - max_ke = 0 - max_tke = 0 - # FIXME: Revert saved_points_per_cycle = int(T / dt) - # saved_points_per_cycle = 951 + # FIXME: Revert + saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) + mean_velocity = np.zeros((n_probes, n_timesteps)) - mean_velocity_u = np.zeros((n_probes, n_timesteps)) - mean_velocity_v = np.zeros((n_probes, n_timesteps)) - mean_velocity_w = np.zeros((n_probes, n_timesteps)) kinetic_energy = np.zeros((n_probes, n_timesteps)) turbulent_kinetic_energy = np.zeros((n_probes, n_timesteps)) for k in range(n_probes): @@ -275,43 +269,34 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc u = velocity_u[k] v = velocity_v[k] w = velocity_w[k] - u_mean = 0 - u_mean_u = 0 - u_mean_v = 0 - u_mean_w = 0 - for n in range(n_cycles): - u_mean += U[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_u += u[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_v += v[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - u_mean_w += w[n * saved_points_per_cycle:(n + 1) * saved_points_per_cycle] - - u_mean_u = np.array(u_mean_u) - u_mean_v = np.array(u_mean_v) - u_mean_w = np.array(u_mean_w) - - u_mean /= n_cycles - u_mean_u /= n_cycles - u_mean_v /= n_cycles - u_mean_w /= n_cycles - mean_velocity[k] = list(u_mean) * n_cycles - mean_velocity_u[k] = list(u_mean_u) * n_cycles - mean_velocity_v[k] = list(u_mean_v) * n_cycles - mean_velocity_w[k] = list(u_mean_w) * n_cycles + + # Calculate mean velocities + u_mean = np.mean(U.reshape(-1, saved_points_per_cycle), axis=0) + u_mean_u = np.mean(u.reshape(-1, saved_points_per_cycle), axis=0) + u_mean_v = np.mean(v.reshape(-1, saved_points_per_cycle), axis=0) + u_mean_w = np.mean(w.reshape(-1, saved_points_per_cycle), axis=0) + + # Replicate mean velocities to match original array shape + mean_velocity[k] = np.tile(u_mean, n_cycles) + u_mean_u = np.tile(u_mean_u, n_cycles) + u_mean_v = np.tile(u_mean_v, n_cycles) + u_mean_w = np.tile(u_mean_w, n_cycles) # Calculate total kinetic energy - kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 3) + kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 2) # Calculate fluctuating velocity - fluctuating_velocity_u = u - mean_velocity_u[k] - fluctuating_velocity_v = v - mean_velocity_v[k] - fluctuating_velocity_w = w - mean_velocity_w[k] + fluctuating_velocity_u = u - u_mean_u + fluctuating_velocity_v = v - u_mean_v + fluctuating_velocity_w = w - u_mean_w turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocity_u ** 2) + (fluctuating_velocity_v ** 2) + (fluctuating_velocity_w ** 2)) - max_ke = np.max(kinetic_energy) if np.max(kinetic_energy) > max_ke else max_ke - max_tke = np.max(turbulent_kinetic_energy) if np.max(turbulent_kinetic_energy) > max_tke else max_tke + max_ke = np.max(kinetic_energy) + max_tke = np.max(turbulent_kinetic_energy) + return mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke @@ -351,23 +336,12 @@ def load_probes(case_path, probe_saving_frequency): while True: tstep = probe_saving_frequency * (counter + 1) - # Load velocity component and pressure probes - u_path = path.join(case_path, "u_x_{}.probes".format(tstep)) - v_path = path.join(case_path, "u_y_{}.probes".format(tstep)) - w_path = path.join(case_path, "u_z_{}.probes".format(tstep)) - p_path = path.join(case_path, "p_{}.probes".format(tstep)) - - try: - p = np.load(p_path, allow_pickle=True) - u = np.load(u_path, allow_pickle=True) - v = np.load(v_path, allow_pickle=True) - w = np.load(w_path, allow_pickle=True) - except Exception: - print("-- Finished reading in probes") + probe_data = load_probe_data(case_path, tstep) + if probe_data is None: break - # Create velocity magnitude - U = np.linalg.norm([u, v, w], axis=0) + p, U, u, v, w = probe_data + if counter == 0: # Set plot parameters n_probes = u.shape[0] @@ -402,12 +376,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # FIXME: Remove - # n_stop = 2853 - # velocity = velocity[:, :n_stop] - # velocity_u = velocity_u[:, :n_stop] - # velocity_v = velocity_v[:, :n_stop] - # velocity_w = velocity_w[:, :n_stop] - # pressures = pressures[:, :n_stop] + n_stop = 2853 + velocity = velocity[:, :n_stop] + velocity_u = velocity_u[:, :n_stop] + velocity_v = velocity_v[:, :n_stop] + velocity_w = velocity_w[:, :n_stop] + pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: @@ -419,6 +393,39 @@ def load_probes(case_path, probe_saving_frequency): return max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps +def load_probe_data(case_path, tstep): + """ + Loads probe data for a given timestep. + + Args: + case_path (str): Path to results from simulation. + tstep (int): Current timestep. + + Returns: + numpy.ndarray: Pressure data. + numpy.ndarray: Velocity data. + """ + # Load velocity component and pressure probes + u_path = path.join(case_path, f"u_x_{tstep}.probes") + v_path = path.join(case_path, f"u_y_{tstep}.probes") + w_path = path.join(case_path, f"u_z_{tstep}.probes") + p_path = path.join(case_path, f"p_{tstep}.probes") + + try: + p = np.load(p_path, allow_pickle=True) + u = np.load(u_path, allow_pickle=True) + v = np.load(v_path, allow_pickle=True) + w = np.load(w_path, allow_pickle=True) + except Exception: + print("-- Finished reading in probes") + return None + + # Create velocity magnitude + U = np.linalg.norm([u, v, w], axis=0) + + return p, U, u, v, w + + def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): """ Computes the energy spectrum for each probe based on velocity components. @@ -434,7 +441,6 @@ def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): numpy.ndarray: Frequency values. float: Maximum value in the kinetic energy spectrum. """ - max_kes = 0 # Define FFT function def get_fft(x): @@ -450,8 +456,8 @@ def get_fft(x): for k in range(n_probes): # FFT of the velocity signals fft_u, freq_u = get_fft(velocity_u[k]) - fft_v, freq_v = get_fft(velocity_v[k]) - fft_w, freq_w = get_fft(velocity_w[k]) + fft_v, _ = get_fft(velocity_v[k]) + fft_w, _ = get_fft(velocity_w[k]) # Compute the energy spectrum (proportional to square of absolute fft values) E_u = np.abs(fft_u) ** 2 @@ -462,15 +468,15 @@ def get_fft(x): E_total = E_u + E_v + E_w # Store positive values of total kinetic energy spectrum - positive_indices = freq_u > 0 - freq = freq_u[positive_indices] - kinetic_energy_spectrum.append(E_total[positive_indices]) - max_kes = np.max(kinetic_energy_spectrum) if np.max(kinetic_energy_spectrum) > max_kes else max_kes + kinetic_energy_spectrum.append(E_total[freq_u > 0]) # Convert list to numpy array kinetic_energy_spectrum = np.array(kinetic_energy_spectrum) - return kinetic_energy_spectrum, freq, max_kes + # Compute the maximum value + max_kes = np.max(kinetic_energy_spectrum) + + return kinetic_energy_spectrum, freq_u[freq_u > 0], max_kes def main_probe(): From eae3b9c6f0cf280d82d999138b9e1add4d1dfe80 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 15:33:40 +0200 Subject: [PATCH 13/23] Refactor script --- .../visualize_probes.py | 165 +++++++++--------- tests/test_visualize_probes.py | 4 +- 2 files changed, 82 insertions(+), 87 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index eedceea6..97706b3f 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -9,20 +9,20 @@ colors = ['red', 'blue', 'purple'] -def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, show_figure=True, save_figure=False): +def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, show_figure=False, save_figure=False): """ Loads probe points from completed CFD simulation and visualizes velocity (magnitude) and pressure at respective probes. Assuming results are in mm/s. Args: - case_path (str): Path to results from simulation. - probe_saving_frequency (int): Interval between saving probes. - T (float): One cardiac cycle, in [ms]. - dt (float): Time step of simulation. - show_figure (bool): Shows figure if True. - save_figure (bool): Saves figure if True. + case_path: Path to results from simulation. + probe_saving_frequency: Interval between saving probes. + T: One cardiac cycle, in [ms]. + dt: Time step of simulation. + probes_to_plot: List of integers corresponding to single probe points + show_figure: Shows figure if True. + save_figure: Saves figure if True. """ - max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ = load_probes(case_path, probe_saving_frequency) @@ -33,59 +33,9 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s if not probes_to_plot: # Plot all probes in same plot - - # Plot velocity - for k in range(n_probes): - ax = plt.subplot(n_rows, n_cols, k + 1) - plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) - - if save_figure: - save_path = path.join(case_path, "velocity_and_pressure.png") - print("-- Saving figure of velocity magnitude and pressure at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting velocity magnitude and pressure at probes") - plt.show() - - # Plot kinetic energy - for k in range(n_probes): - ax = plt.subplot(n_rows, n_cols, k + 1) - plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) - - if save_figure: - save_path = path.join(case_path, "kinetic_energy.png") - print("-- Saving figure of kinetic energy and turbulent kinetic energy at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting kinetic energy and turbulent kinetic energy at probes") - plt.show() - - # Plot energy spectrum - for k in range(n_probes): - ax = plt.subplot(n_rows, n_cols, k + 1) - plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes) - - if save_figure: - save_path = path.join(case_path, "energy_spectrum.png") - print("-- Saving figure of energy spectrum at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting energy spectrum at probes") - plt.show() - - # Plot spectrogram - for k in range(n_probes): - ax = plt.subplot(n_rows, n_cols, k + 1) - plot_spectrogram(k, ax, velocity_u) - - if save_figure: - save_path = path.join(case_path, "spectrogram.png") - print("-- Saving figure of spectrogram at probes at {}".format(save_path)) - plt.savefig(save_path) - if show_figure: - print("-- Plotting spectrogram at probes") - plt.show() - + plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, + velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, freq, + kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure) else: # Plot probes in separate plots for k in probes_to_plot: @@ -95,13 +45,61 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s plot_energy_spectrum(k, ax3, freq, kinetic_energy_spectrum, max_kes) plot_spectrogram(k, ax4, velocity_u) - if save_figure: - save_path = path.join(case_path, f"probes_{k}.png") - print(f"-- Saving figure for probes at {save_path}") - plt.savefig(save_path) - if show_figure: - print(f"-- Plotting quantities for probe {k}") - plt.show() + print(f"-- Plotting quantities for probe {k}") + save_and_show_plot(case_path, f"probes_{k}.png", save_figure, show_figure) + + +def save_and_show_plot(case_path, filename, save, show): + """ + Saves and/or shows a figure based on the given flags. + + Args: + case_path: Path to the results from the simulation. + filename: Name of the file to save the plot. + save: If True, the figure will be saved. + show: If True, the figure will be displayed. + """ + if save: + save_path = path.join(case_path, filename) + print(f"-- Saving figure at {save_path}") + plt.savefig(save_path) + if show: + print("-- Showing figure") + plt.show() + + +def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, + velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, freq, + kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure): + """ + Plots data for all probes. + + Args: + All the input parameters for the respective plotting functions. + """ + # Plot velocity and pressure + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) + save_and_show_plot(case_path, "velocity_and_pressure.png", save_figure, show_figure) + + # Plot kinetic energy + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) + save_and_show_plot(case_path, "kinetic_energy.png", save_figure, show_figure) + + # Plot energy spectrum + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes) + save_and_show_plot(case_path, "energy_spectrum.png", save_figure, show_figure) + + # Plot spectrogram + for k in range(n_probes): + ax = plt.subplot(n_rows, n_cols, k + 1) + plot_spectrogram(k, ax, velocity_u) + save_and_show_plot(case_path, "spectrogram.png", save_figure, show_figure) def plot_spectrogram(k, ax, velocity, color_map='jet', font_size=12): @@ -110,7 +108,7 @@ def plot_spectrogram(k, ax, velocity, color_map='jet', font_size=12): Args: k (int): Current probe index. - ax (matplotlib.axis): The matplotlib axis to plot on. + ax (matplotlib.axis.Axes): The matplotlib axis to plot on. velocity(numpy.ndarray): Velocity data. color_map (str): Color map for the spectrogram plot. font_size (int): Font size for the labels. @@ -132,7 +130,7 @@ def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): Args: k (int): Current probe index. - ax (matplotlib.axis): The matplotlib axis to plot on. + ax (matplotlib.axis.Axes): The matplotlib axis to plot on. freq (numpy.ndarray): Frequency data. kinetic_energy_spectrum (numpy.ndarray): Kinetic energy spectrum data. max_kes (float): Maximum value in the kinetic energy spectrum. @@ -148,12 +146,9 @@ def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): ax.set_xlim(min(freq), max(freq)) # Set axis labels - ax.set_ylabel("E(k)", fontsize=12, color=colors[0]) + ax.set_ylabel("E(k)", fontsize=12) ax.set_xlabel("k", fontsize=12) - # Color axis ticks - ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - # Set title to probe number ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) @@ -164,7 +159,7 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur Args: k (int): Current probe index. - ax (matplotlib.axis): The matplotlib axis to plot on. + ax (matplotlib.axis.Axes): The matplotlib axis to plot on. kinetic_energy (numpy.ndarray): Kinetic energy data. max_ke (float): Maximum kinetic energy value. max_tke (float): Maximum turbulent kinetic energy value. @@ -201,7 +196,7 @@ def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, Args: k (int): Current probe index. - ax (matplotlib.axis): The matplotlib axis to plot on. + ax (matplotlib.axis.Axes): The matplotlib axis to plot on. max_P (float): Maximum pressure value. max_U (float): Maximum velocity value. mean_velocity (numpy.ndarray): Mean velocity data. @@ -258,7 +253,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc """ saved_points_per_cycle = int(T / dt) # FIXME: Revert - saved_points_per_cycle = 951 + # saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) @@ -375,13 +370,13 @@ def load_probes(case_path, probe_saving_frequency): velocity_v = np.array(velocity_v) velocity_w = np.array(velocity_w) pressures = np.array(pressures) - # FIXME: Remove - n_stop = 2853 - velocity = velocity[:, :n_stop] - velocity_u = velocity_u[:, :n_stop] - velocity_v = velocity_v[:, :n_stop] - velocity_w = velocity_w[:, :n_stop] - pressures = pressures[:, :n_stop] + # # FIXME: Remove + # n_stop = 2853 + # velocity = velocity[:, :n_stop] + # velocity_u = velocity_u[:, :n_stop] + # velocity_v = velocity_v[:, :n_stop] + # velocity_w = velocity_w[:, :n_stop] + # pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: @@ -482,7 +477,7 @@ def get_fft(x): def main_probe(): # Read command line arguments folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _, probes_to_plot = read_command_line() - visualize_probes(folder, probe_freq, T, dt, probes_to_plot, save_figure=True) + visualize_probes(folder, probe_freq, T, dt, probes_to_plot, show_figure=True, save_figure=True) if __name__ == '__main__': diff --git a/tests/test_visualize_probes.py b/tests/test_visualize_probes.py index 3cd14c94..d31f4f09 100644 --- a/tests/test_visualize_probes.py +++ b/tests/test_visualize_probes.py @@ -15,7 +15,7 @@ def test_visualize_all_probes(): spectrogram_path = path.join(probe_path, "spectrogram.png") # Run post-processing - visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=[], save_figure=True, show_figure=False) + visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=[], show_figure=False, save_figure=True) # Check that output image exists for file_path in [u_p_path, ke_path, spectrum_path, spectrogram_path]: @@ -35,7 +35,7 @@ def test_visualize_two_probes(): probe_frequency = 100 # Run post-processing - visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=probes, save_figure=True, show_figure=False) + visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=probes, show_figure=False, save_figure=True) # Check that output image exists for probe in probes: From e6f4b2afbdc34267582f54c3771c25da1122d8a5 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 15:54:43 +0200 Subject: [PATCH 14/23] Add turbulence intenstiy --- .../visualize_probes.py | 46 +++++++++++-------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 97706b3f..a2282a76 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -26,7 +26,7 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ = load_probes(case_path, probe_saving_frequency) - mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke = compute_mean_velocity_and_kinetic_energy( + mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke = compute_mean_velocity_and_kinetic_energy( T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w) kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w) @@ -34,14 +34,15 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s if not probes_to_plot: # Plot all probes in same plot plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, - velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, freq, + velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, turbulence_intensity, freq, kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure) else: # Plot probes in separate plots for k in probes_to_plot: fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) plot_velocity_and_pressure(k, ax1, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) - plot_kinetic_energy(k, ax2, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) + plot_kinetic_energy(k, ax2, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, + turbulence_intensity) plot_energy_spectrum(k, ax3, freq, kinetic_energy_spectrum, max_kes) plot_spectrogram(k, ax4, velocity_u) @@ -69,7 +70,7 @@ def save_and_show_plot(case_path, filename, save, show): def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, - velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, freq, + velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, turbulence_intensity, freq, kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure): """ Plots data for all probes. @@ -86,7 +87,8 @@ def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velo # Plot kinetic energy for k in range(n_probes): ax = plt.subplot(n_rows, n_cols, k + 1) - plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy) + plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, + turbulence_intensity) save_and_show_plot(case_path, "kinetic_energy.png", save_figure, show_figure) # Plot energy spectrum @@ -153,7 +155,8 @@ def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) -def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy): +def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, + turbulence_intensity): """ Plots kinetic energy and turbulent kinetic energy at probe points. @@ -171,19 +174,22 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur time_interval = np.linspace(0, n_timesteps, n_timesteps) ax.plot(time_interval, kinetic_energy[k], color=colors[0], label="") ax.plot(time_interval, turbulent_kinetic_energy[k], color=colors[1], label="") + ax_twinx.plot(time_interval, turbulence_intensity[k], color=colors[2], label="") # Set axis limits ax.set_ylim(-1E-2, max_ke * 1.1) - ax_twinx.set_ylim(-1E-2, max_tke * 1.1) ax.set_xlim(min(time_interval), max(time_interval)) + # Scale axis + ax_twinx.set_yscale("log") + # Set axis labels - ax.set_ylabel("Kinetic energy [m$^2$/s$^2$]", fontsize=12, color=colors[0]) - ax_twinx.set_ylabel("Turbulent kinetic\n energy [m$^2$/s$^2$]", fontsize=12, color=colors[1]) + ax.set_ylabel("Energy [m$^2$/s$^2$]", fontsize=12) + ax_twinx.set_ylabel("Turbulence intensity [-]", fontsize=12, color=colors[2]) ax.set_xlabel("Time [s]", fontsize=12) # Color axis ticks - ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) + ax.tick_params(axis='y', which='major', labelsize=12) ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) # Set title to probe number @@ -253,12 +259,13 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc """ saved_points_per_cycle = int(T / dt) # FIXME: Revert - # saved_points_per_cycle = 951 + saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) kinetic_energy = np.zeros((n_probes, n_timesteps)) turbulent_kinetic_energy = np.zeros((n_probes, n_timesteps)) + turbulence_intensity = np.zeros((n_probes, n_timesteps)) for k in range(n_probes): U = velocity[k] u = velocity_u[k] @@ -288,11 +295,14 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocity_u ** 2) + (fluctuating_velocity_v ** 2) + (fluctuating_velocity_w ** 2)) + u_prime = np.sqrt(2 / 3 * turbulent_kinetic_energy[k]) + U_bar = np.sqrt(u_mean_u ** 2 + u_mean_v ** 2 + u_mean_w ** 2) + turbulence_intensity[k] = u_prime / U_bar max_ke = np.max(kinetic_energy) max_tke = np.max(turbulent_kinetic_energy) - return mean_velocity, kinetic_energy, turbulent_kinetic_energy, max_ke, max_tke + return mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke def load_probes(case_path, probe_saving_frequency): @@ -371,12 +381,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # # FIXME: Remove - # n_stop = 2853 - # velocity = velocity[:, :n_stop] - # velocity_u = velocity_u[:, :n_stop] - # velocity_v = velocity_v[:, :n_stop] - # velocity_w = velocity_w[:, :n_stop] - # pressures = pressures[:, :n_stop] + n_stop = 2853 + velocity = velocity[:, :n_stop] + velocity_u = velocity_u[:, :n_stop] + velocity_v = velocity_v[:, :n_stop] + velocity_w = velocity_w[:, :n_stop] + pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From d452899e68e7143c91e8cf3b9cad7b20d34f5ed1 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Fri, 30 Jun 2023 15:57:32 +0200 Subject: [PATCH 15/23] Comment hacks --- .../automatedPostprocessing/visualize_probes.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index a2282a76..fbd4f813 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -259,7 +259,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc """ saved_points_per_cycle = int(T / dt) # FIXME: Revert - saved_points_per_cycle = 951 + # saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) @@ -380,13 +380,13 @@ def load_probes(case_path, probe_saving_frequency): velocity_v = np.array(velocity_v) velocity_w = np.array(velocity_w) pressures = np.array(pressures) - # # FIXME: Remove - n_stop = 2853 - velocity = velocity[:, :n_stop] - velocity_u = velocity_u[:, :n_stop] - velocity_v = velocity_v[:, :n_stop] - velocity_w = velocity_w[:, :n_stop] - pressures = pressures[:, :n_stop] + # # # FIXME: Remove + # n_stop = 2853 + # velocity = velocity[:, :n_stop] + # velocity_u = velocity_u[:, :n_stop] + # velocity_v = velocity_v[:, :n_stop] + # velocity_w = velocity_w[:, :n_stop] + # pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 82ba8e9a5a4782c00b3e7a5369bd852f504478a6 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Sun, 2 Jul 2023 17:01:13 +0200 Subject: [PATCH 16/23] Update docstrings for viz probe --- .../visualize_probes.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index fbd4f813..33d543f6 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -11,8 +11,9 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, show_figure=False, save_figure=False): """ - Loads probe points from completed CFD simulation and visualizes velocity (magnitude) - and pressure at respective probes. Assuming results are in mm/s. + Loads probe points from completed CFD simulation and visualizes (1) velocity (magnitude), mean velocity, + and pressure, (2) kinetic energy, turbulent kinetic energy, and turbulence intensity, (3) kinetic energy spectrum, + and (4) spectrogram of velocity (u-component) at respective probes. Assuming results are in m/s. Args: case_path: Path to results from simulation. @@ -26,8 +27,9 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ = load_probes(case_path, probe_saving_frequency) - mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke = compute_mean_velocity_and_kinetic_energy( - T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w) + mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke \ + = compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, + velocity_w) kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w) @@ -259,7 +261,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc """ saved_points_per_cycle = int(T / dt) # FIXME: Revert - # saved_points_per_cycle = 951 + saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) @@ -381,12 +383,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # # # FIXME: Remove - # n_stop = 2853 - # velocity = velocity[:, :n_stop] - # velocity_u = velocity_u[:, :n_stop] - # velocity_v = velocity_v[:, :n_stop] - # velocity_w = velocity_w[:, :n_stop] - # pressures = pressures[:, :n_stop] + n_stop = 2853 + velocity = velocity[:, :n_stop] + velocity_u = velocity_u[:, :n_stop] + velocity_v = velocity_v[:, :n_stop] + velocity_w = velocity_w[:, :n_stop] + pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 476e8fdd0202469d59f6ea542084b19f09791e19 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Sun, 2 Jul 2023 17:02:48 +0200 Subject: [PATCH 17/23] Comment slices --- .../automatedPostprocessing/visualize_probes.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 33d543f6..47fbdf4a 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -192,7 +192,7 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur # Color axis ticks ax.tick_params(axis='y', which='major', labelsize=12) - ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) + ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[2]) # Set title to probe number ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) @@ -261,7 +261,7 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc """ saved_points_per_cycle = int(T / dt) # FIXME: Revert - saved_points_per_cycle = 951 + # saved_points_per_cycle = 951 n_cycles = int(n_timesteps / saved_points_per_cycle) mean_velocity = np.zeros((n_probes, n_timesteps)) @@ -383,12 +383,12 @@ def load_probes(case_path, probe_saving_frequency): velocity_w = np.array(velocity_w) pressures = np.array(pressures) # # # FIXME: Remove - n_stop = 2853 - velocity = velocity[:, :n_stop] - velocity_u = velocity_u[:, :n_stop] - velocity_v = velocity_v[:, :n_stop] - velocity_w = velocity_w[:, :n_stop] - pressures = pressures[:, :n_stop] + # n_stop = 2853 + # velocity = velocity[:, :n_stop] + # velocity_u = velocity_u[:, :n_stop] + # velocity_v = velocity_v[:, :n_stop] + # velocity_w = velocity_w[:, :n_stop] + # pressures = pressures[:, :n_stop] # Check if data is available if len(velocity[0]) > 0: From 04500098535943950413e0d1203db27beea03870 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Sun, 2 Jul 2023 17:08:25 +0200 Subject: [PATCH 18/23] Remove log scale for KE --- src/vampy/automatedPostprocessing/visualize_probes.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 47fbdf4a..f4e31b0e 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -182,9 +182,6 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur ax.set_ylim(-1E-2, max_ke * 1.1) ax.set_xlim(min(time_interval), max(time_interval)) - # Scale axis - ax_twinx.set_yscale("log") - # Set axis labels ax.set_ylabel("Energy [m$^2$/s$^2$]", fontsize=12) ax_twinx.set_ylabel("Turbulence intensity [-]", fontsize=12, color=colors[2]) From 6a5346f93c226ac298517121dee8e16cff428a99 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Sun, 2 Jul 2023 23:49:17 +0200 Subject: [PATCH 19/23] Add theoretical energy spectrum function --- .../visualize_probes.py | 52 ++++++++++++++----- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index f4e31b0e..0c90739b 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -2,6 +2,7 @@ import matplotlib.pyplot as plt import numpy as np +from numpy.fft import fftn from vampy.automatedPostprocessing.postprocessing_common import read_command_line @@ -31,7 +32,7 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s = compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w) - kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w) + kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w, dt) if not probes_to_plot: # Plot all probes in same plot @@ -141,9 +142,26 @@ def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): """ # Create subplots + def E(k): + """ + Computes the energy spectrum E(k) of turbulent flow, using Kolmogorov's -5/3 law. + + Parameters: + k (numpy.array): List of wave numbers in Hz. + + Returns: + E_k (numpy.array): Energy spectrum E(k) at the given wave numbers. + """ + return k ** (-5 / 3) + + energy_spectrum = E(freq) + print(energy_spectrum) + ax.plot(freq, energy_spectrum, color="black", linestyle="--", linewidth=3, label="") ax.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") ax.set_xscale("log") ax.set_yscale("log") + ax.annotate('$k^{-5/3}$', xy=(np.mean(freq) / 4, np.mean(kinetic_energy_spectrum[k])), xycoords='data', + textcoords='data', color='black', fontsize=15) # Set axis limits ax.set_ylim(None, max_kes * 1.1) @@ -430,7 +448,7 @@ def load_probe_data(case_path, tstep): return p, U, u, v, w -def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): +def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w, dt): """ Computes the energy spectrum for each probe based on velocity components. @@ -439,6 +457,7 @@ def compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w): velocity_u (numpy.ndarray): Velocity component 'u' data for each probe. velocity_v (numpy.ndarray): Velocity component 'v' data for each probe. velocity_w (numpy.ndarray): Velocity component 'w' data for each probe. + dt (float): Sampling rate at probes Returns: numpy.ndarray: Kinetic energy spectrum for each probe. @@ -458,21 +477,26 @@ def get_fft(x): # Iterate over each probe for k in range(n_probes): - # FFT of the velocity signals - fft_u, freq_u = get_fft(velocity_u[k]) - fft_v, _ = get_fft(velocity_v[k]) - fft_w, _ = get_fft(velocity_w[k]) + # Compute the Fourier Transform of the velocity fields + Fu = np.fft.fft(velocity_u[k]) + Fv = np.fft.fft(velocity_v[k]) + Fw = np.fft.fft(velocity_w[k]) - # Compute the energy spectrum (proportional to square of absolute fft values) - E_u = np.abs(fft_u) ** 2 - E_v = np.abs(fft_v) ** 2 - E_w = np.abs(fft_w) ** 2 + # Compute the energy density + Eu = np.abs(Fu) ** 2 + Ev = np.abs(Fv) ** 2 + Ew = np.abs(Fw) ** 2 + E_total = 0.5 * (Eu + Ev + Ew) - # Summing up energy spectrum from all 3 velocity components - E_total = E_u + E_v + E_w + # Compute the frequencies for each time point + N = velocity_u[k].size + freqs = np.fft.fftfreq(N, dt) # Store positive values of total kinetic energy spectrum - kinetic_energy_spectrum.append(E_total[freq_u > 0]) + freq = freqs[:N // 2] + E_total = E_total[:N // 2] + + kinetic_energy_spectrum.append(E_total) # Convert list to numpy array kinetic_energy_spectrum = np.array(kinetic_energy_spectrum) @@ -480,7 +504,7 @@ def get_fft(x): # Compute the maximum value max_kes = np.max(kinetic_energy_spectrum) - return kinetic_energy_spectrum, freq_u[freq_u > 0], max_kes + return kinetic_energy_spectrum, freq, max_kes def main_probe(): From 49bb30f712475446fc9dd3c9e0ceb8cd3a19c4b9 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Mon, 3 Jul 2023 11:58:23 +0200 Subject: [PATCH 20/23] Remove unused import --- src/vampy/automatedPostprocessing/visualize_probes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index 0c90739b..ed9126bf 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -2,7 +2,6 @@ import matplotlib.pyplot as plt import numpy as np -from numpy.fft import fftn from vampy.automatedPostprocessing.postprocessing_common import read_command_line From 02704e02e59f7e08462622774a738bec0a9ca616 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Wed, 9 Oct 2024 22:01:18 +0200 Subject: [PATCH 21/23] Add linting and lint project --- .github/workflows/check_and_test_package.yml | 29 +- linting.sh | 7 + src/vampy/__init__.py | 42 +- src/vampy/automatedPostprocessing/__init__.py | 12 +- .../compute_flow_and_simulation_metrics.py | 340 +++++++-- .../compute_hemodynamic_indices.py | 107 ++- .../compute_velocity_and_pressure.py | 21 +- .../postprocessing_common.py | 257 ++++--- .../visualize_probes.py | 246 +++++-- .../automatedPreprocessing/DisplayData.py | 36 +- .../automatedPreprocessing/ImportData.py | 219 ++++-- .../NetworkBoundaryConditions.py | 26 +- .../automatedPreprocessing/ToolRepairSTL.py | 125 +++- src/vampy/automatedPreprocessing/__init__.py | 20 +- .../automated_preprocessing.py | 668 ++++++++++++------ .../preprocessing_common.py | 240 +++++-- src/vampy/automatedPreprocessing/simulate.py | 30 +- .../vmtk_pointselector.py | 26 +- src/vampy/simulation/Artery.py | 215 ++++-- src/vampy/simulation/Atrium.py | 173 ++++- src/vampy/simulation/Probe.py | 4 +- src/vampy/simulation/Womersley.py | 100 ++- src/vampy/simulation/__init__.py | 6 +- src/vampy/simulation/simulation_common.py | 37 +- ...est_compute_flow_and_simulation_metrics.py | 155 +++- tests/test_compute_hemodynamc_indices.py | 73 +- tests/test_compute_velocity_and_pressure.py | 8 +- tests/test_pre_processing.py | 48 +- tests/test_visualize_probes.py | 20 +- 29 files changed, 2367 insertions(+), 923 deletions(-) create mode 100644 linting.sh diff --git a/.github/workflows/check_and_test_package.yml b/.github/workflows/check_and_test_package.yml index 1199df52..fe441350 100644 --- a/.github/workflows/check_and_test_package.yml +++ b/.github/workflows/check_and_test_package.yml @@ -21,25 +21,20 @@ jobs: check-code: name: Check code runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Upgrade pip and setuptools - run: python3 -m pip install pip setuptools --upgrade - - - name: Install types-paramiko - run: python3 -m pip install types-paramiko - - - name: Install VaMPy - run: python3 -m pip install .[test] + - uses: actions/checkout@v3 + - uses: actions/setup-python@v5 + with: + python-version: '3.10' - - name: Check code with Flake8 - run: python3 -m flake8 + - name: Install dependencies and project + run: | + python -m pip install --upgrade pip setuptools types-paramiko + python -m pip install '.[test]' - - name: Check code with mypy - run: python3 -m mypy + - name: Run linting and checks + run: | + bash linting.sh test-code: needs: check-code @@ -59,7 +54,7 @@ jobs: # https://github.com/marketplace/actions/setup-miniconda#use-a-default-shell defaults: run: - shell: bash -l {0} + shell: bash -el {0} steps: - name: Checkout diff --git a/linting.sh b/linting.sh new file mode 100644 index 00000000..c00e8f5d --- /dev/null +++ b/linting.sh @@ -0,0 +1,7 @@ +#! /usr/bin/env bash + +isort src tests + +black src tests + +flake8 src tests \ No newline at end of file diff --git a/src/vampy/__init__.py b/src/vampy/__init__.py index 5f25ca24..199bdf82 100644 --- a/src/vampy/__init__.py +++ b/src/vampy/__init__.py @@ -1,29 +1,31 @@ """Top-level package for VaMPy.""" + from importlib.metadata import metadata # Imports from post-processing -from .automatedPostprocessing import compute_flow_and_simulation_metrics -from .automatedPostprocessing import compute_hemodynamic_indices -from .automatedPostprocessing import compute_velocity_and_pressure -from .automatedPostprocessing import postprocessing_common -from .automatedPostprocessing import visualize_probes +from .automatedPostprocessing import ( + compute_flow_and_simulation_metrics, + compute_hemodynamic_indices, + compute_velocity_and_pressure, + postprocessing_common, + visualize_probes, +) + # Imports from pre-processing -from .automatedPreprocessing import DisplayData -from .automatedPreprocessing import ImportData -from .automatedPreprocessing import NetworkBoundaryConditions -from .automatedPreprocessing import ToolRepairSTL -from .automatedPreprocessing import automated_preprocessing -from .automatedPreprocessing import preprocessing_common -from .automatedPreprocessing import simulate -from .automatedPreprocessing import visualize -from .automatedPreprocessing import vmtk_pointselector -# Imports from simulation scripts -from .simulation import Artery -from .simulation import Atrium -from .simulation import Probe -from .simulation import Womersley -from .simulation import simulation_common +from .automatedPreprocessing import ( + DisplayData, + ImportData, + NetworkBoundaryConditions, + ToolRepairSTL, + automated_preprocessing, + preprocessing_common, + simulate, + visualize, + vmtk_pointselector, +) +# Imports from simulation scripts +from .simulation import Artery, Atrium, Probe, Womersley, simulation_common meta = metadata("vampy") __version__ = meta["Version"] diff --git a/src/vampy/automatedPostprocessing/__init__.py b/src/vampy/automatedPostprocessing/__init__.py index bc98e86e..f238e25c 100644 --- a/src/vampy/automatedPostprocessing/__init__.py +++ b/src/vampy/automatedPostprocessing/__init__.py @@ -1,5 +1,7 @@ -from . import compute_flow_and_simulation_metrics -from . import compute_hemodynamic_indices -from . import compute_velocity_and_pressure -from . import postprocessing_common -from . import visualize_probes +from . import ( + compute_flow_and_simulation_metrics, + compute_hemodynamic_indices, + compute_velocity_and_pressure, + postprocessing_common, + visualize_probes, +) diff --git a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py index fd369734..0165132c 100755 --- a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py +++ b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py @@ -1,15 +1,46 @@ from os import path import numpy as np -from dolfin import FunctionSpace, Function, MPI, VectorFunctionSpace, Timer, project, sqrt, inner, HDF5File, XDMFFile, \ - assign, CellDiameter, Mesh, TestFunction, list_timings, TimingClear, TimingType - -from vampy.automatedPostprocessing.postprocessing_common import read_command_line, get_dataset_names, \ - rate_of_dissipation, rate_of_strain - - -def compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles): +from dolfin import ( + MPI, + CellDiameter, + Function, + FunctionSpace, + HDF5File, + Mesh, + TestFunction, + Timer, + TimingClear, + TimingType, + VectorFunctionSpace, + XDMFFile, + assign, + inner, + list_timings, + project, + sqrt, +) + +from vampy.automatedPostprocessing.postprocessing_common import ( + get_dataset_names, + rate_of_dissipation, + rate_of_strain, + read_command_line, +) + + +def compute_flow_and_simulation_metrics( + folder, + nu, + dt, + velocity_degree, + T, + times_to_average, + save_frequency, + start_cycle, + step, + average_over_cycles, +): """ Computes several flow field characteristics and metrics for the velocity field. Reads in the .h5 soution stored in the 'Solution' folder within the results' folder, @@ -52,7 +83,7 @@ def compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, time mesh_file.read(mesh, "mesh", False) # Function space - DG = FunctionSpace(mesh, 'DG', 0) + DG = FunctionSpace(mesh, "DG", 0) V = VectorFunctionSpace(mesh, "CG", velocity_degree) Vv = FunctionSpace(mesh, "CG", velocity_degree) @@ -65,34 +96,77 @@ def compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, time # Read velocity and compute cycle averaged velocity if not path.exists(file_path_u_avg): - compute_u_avg(dataset_names, file_path_u_avg, file_u, number_of_cycles, saved_time_steps_per_cycle, start_cycle, - u, u_avg) + compute_u_avg( + dataset_names, + file_path_u_avg, + file_u, + number_of_cycles, + saved_time_steps_per_cycle, + start_cycle, + u, + u_avg, + ) # Perform phase averaging (Average over cycles at specified time point(s)) if len(times_to_average) != 0: - dataset_dict, dataset_dict_avg, number_of_files = \ - get_files_for_phase_averaging(times_to_average, dt, save_frequency, step, number_of_cycles, start_cycle, - saved_time_steps_per_cycle, dataset_names) + dataset_dict, dataset_dict_avg, number_of_files = get_files_for_phase_averaging( + times_to_average, + dt, + save_frequency, + step, + number_of_cycles, + start_cycle, + saved_time_steps_per_cycle, + dataset_names, + ) # Perform cycle averaging (Average per cycle) and time averaging (Average of all cycles) else: - dataset_dict, dataset_dict_avg, number_of_files = \ - get_files_for_cycle_averaging(dataset_names, file_path_u_avg, number_of_cycles, saved_time_steps_per_cycle, - start_cycle) + dataset_dict, dataset_dict_avg, number_of_files = get_files_for_cycle_averaging( + dataset_names, + file_path_u_avg, + number_of_cycles, + saved_time_steps_per_cycle, + start_cycle, + ) if average_over_cycles: cycles_to_average = [cycle + 1 for cycle in range(number_of_cycles)] # Compute flow and simulation metrics for time_to_average, dataset in dataset_dict.items(): if len(times_to_average) != 0 and MPI.rank(MPI.comm_world) == 0: - print(f"Phase averaging results over {number_of_files} cycles at t={time_to_average} ms") - - define_functions_and_iterate_dataset(time_to_average, dataset, dataset_dict_avg[time_to_average], dt, file_u, - file_path_u_avg, file_path_u, mesh, nu, number_of_files, DG, V, Vv, - cycles_to_average, saved_time_steps_per_cycle) - - -def compute_u_avg(dataset_names, file_path_u_avg, file_u, n_cycles, saved_time_steps_per_cycle, - start_cycle, u, u_avg): + print( + f"Phase averaging results over {number_of_files} cycles at t={time_to_average} ms" + ) + + define_functions_and_iterate_dataset( + time_to_average, + dataset, + dataset_dict_avg[time_to_average], + dt, + file_u, + file_path_u_avg, + file_path_u, + mesh, + nu, + number_of_files, + DG, + V, + Vv, + cycles_to_average, + saved_time_steps_per_cycle, + ) + + +def compute_u_avg( + dataset_names, + file_path_u_avg, + file_u, + n_cycles, + saved_time_steps_per_cycle, + start_cycle, + u, + u_avg, +): """ Iterate over saved time steps and compute average velocity based on save sampling parameters @@ -120,7 +194,7 @@ def compute_u_avg(dataset_names, file_path_u_avg, file_u, n_cycles, saved_time_s u_avg.vector().axpy(1, u.vector()) # Average over pre-defined amount of cycles - u_avg.vector()[:] /= (n_cycles - start_cycle + 1) + u_avg.vector()[:] /= n_cycles - start_cycle + 1 if MPI.rank(MPI.comm_world) == 0: print("=" * 10, f"Computing average velocity at time: {time} ms", "=" * 10) @@ -135,8 +209,13 @@ def compute_u_avg(dataset_names, file_path_u_avg, file_u, n_cycles, saved_time_s u_avg.vector().zero() -def get_files_for_cycle_averaging(dataset_names, file_path_u_avg, number_of_cycles, saved_time_steps_per_cycle, - start_cycle): +def get_files_for_cycle_averaging( + dataset_names, + file_path_u_avg, + number_of_cycles, + saved_time_steps_per_cycle, + start_cycle, +): """ Retrieves the dataset dictionaries for cycle averaging. @@ -158,7 +237,10 @@ def get_files_for_cycle_averaging(dataset_names, file_path_u_avg, number_of_cycl # Create similar dataset for the mean velocity file_u_avg = HDF5File(MPI.comm_world, file_path_u_avg, "r") - dataset_dict_avg = {"": get_dataset_names(file_u_avg, start=0, step=1) * (number_of_cycles - start_cycle + 1)} + dataset_dict_avg = { + "": get_dataset_names(file_u_avg, start=0, step=1) + * (number_of_cycles - start_cycle + 1) + } # Get number of files based on the sliced dataset names number_of_files = len(dataset_dict[""]) @@ -166,8 +248,16 @@ def get_files_for_cycle_averaging(dataset_names, file_path_u_avg, number_of_cycl return dataset_dict, dataset_dict_avg, number_of_files -def get_files_for_phase_averaging(times_to_average, dt, save_frequency, step, number_of_cycles, start_cycle, - saved_time_steps_per_cycle, dataset_names): +def get_files_for_phase_averaging( + times_to_average, + dt, + save_frequency, + step, + number_of_cycles, + start_cycle, + saved_time_steps_per_cycle, + dataset_names, +): """ Generates dataset dictionaries for phase averaging based on the selected times. @@ -190,10 +280,14 @@ def get_files_for_phase_averaging(times_to_average, dt, save_frequency, step, nu dataset_dict_avg = {} # Iterate over selected times to average over + time_index = start_cycle - 1 for t in times_to_average: # Find corresponding IDs for dataset_name list time_id = int(t / dt / save_frequency / step) - time_ids = [time_id + saved_time_steps_per_cycle * k for k in range(number_of_cycles)][start_cycle - 1:] + time_ids = [ + time_id + saved_time_steps_per_cycle * k for k in range(number_of_cycles) + ] + time_ids = time_ids[time_index:] # Get dataset_names at corresponding times dataset_dict[f"_{t}"] = [dataset_names[max(0, j - 1)] for j in time_ids] @@ -205,9 +299,23 @@ def get_files_for_phase_averaging(times_to_average, dt, save_frequency, step, nu return dataset_dict, dataset_dict_avg, number_of_files -def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, dt, file_u, file_path_u_avg, - file_path_u, mesh, nu, number_of_files, DG, V, Vv, cycles_to_average, - saved_time_steps_per_cycle): +def define_functions_and_iterate_dataset( + time_to_average, + dataset, + dataset_avg, + dt, + file_u, + file_path_u_avg, + file_path_u, + mesh, + nu, + number_of_files, + DG, + V, + Vv, + cycles_to_average, + saved_time_steps_per_cycle, +): """ Defines functions and vector functions for all metrics to be computed, iterates through dataset and computes several flow and simulation metrics. @@ -303,28 +411,67 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, number_of_files = len(cycles_to_average) save_path = save_path.replace("Solutions", "FlowMetrics") - metric_names = ["characteristic_edge_length", "u_time_avg", "l_plus", "t_plus", "CFL", "strain", "length_scale", - "time_scale", "velocity_scale", "dissipation", "kinetic_energy", "turbulent_kinetic_energy", - "turbulent_dissipation"] - - metric_variables_cycle_avg = [characteristic_edge_length, u_time_cycle_avg, l_plus_cycle_avg, t_plus_cycle_avg, - CFL_cycle_avg, strain_cycle_avg, length_scale_cycle_avg, time_scale_cycle_avg, - velocity_scale_cycle_avg, dissipation_cycle_avg, kinetic_energy_cycle_avg, - turbulent_kinetic_energy_cycle_avg, turbulent_dissipation_cycle_avg] - - metric_variables_avg = [characteristic_edge_length, u_time_avg, l_plus_avg, t_plus_avg, CFL_avg, strain_avg, - length_scale_avg, time_scale_avg, velocity_scale_avg, dissipation_avg, kinetic_energy_avg, - turbulent_kinetic_energy_avg, turbulent_dissipation_avg] + metric_names = [ + "characteristic_edge_length", + "u_time_avg", + "l_plus", + "t_plus", + "CFL", + "strain", + "length_scale", + "time_scale", + "velocity_scale", + "dissipation", + "kinetic_energy", + "turbulent_kinetic_energy", + "turbulent_dissipation", + ] + + metric_variables_cycle_avg = [ + characteristic_edge_length, + u_time_cycle_avg, + l_plus_cycle_avg, + t_plus_cycle_avg, + CFL_cycle_avg, + strain_cycle_avg, + length_scale_cycle_avg, + time_scale_cycle_avg, + velocity_scale_cycle_avg, + dissipation_cycle_avg, + kinetic_energy_cycle_avg, + turbulent_kinetic_energy_cycle_avg, + turbulent_dissipation_cycle_avg, + ] + + metric_variables_avg = [ + characteristic_edge_length, + u_time_avg, + l_plus_avg, + t_plus_avg, + CFL_avg, + strain_avg, + length_scale_avg, + time_scale_avg, + velocity_scale_avg, + dissipation_avg, + kinetic_energy_avg, + turbulent_kinetic_energy_avg, + turbulent_dissipation_avg, + ] metric_dict_cycle = dict(zip(metric_names, metric_variables_cycle_avg)) metric_dict = dict(zip(metric_names, metric_variables_avg)) - counters_to_save = [saved_time_steps_per_cycle * cycle for cycle in cycles_to_average] + counters_to_save = [ + saved_time_steps_per_cycle * cycle for cycle in cycles_to_average + ] cycle_names = [""] + ["_cycle_{:02d}".format(cycle) for cycle in cycles_to_average] metrics = {} for cycle_name in cycle_names: for vn in metric_dict.keys(): - metrics[vn + cycle_name] = XDMFFile(MPI.comm_world, save_path % (vn, cycle_name)) + metrics[vn + cycle_name] = XDMFFile( + MPI.comm_world, save_path % (vn, cycle_name) + ) metrics[vn + cycle_name].parameters["rewrite_function_mesh"] = False metrics[vn + cycle_name].parameters["flush_output"] = True @@ -356,7 +503,11 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, # Compute CFL t0 = Timer("CFL number") u_mag = project(sqrt(inner(u, u)), DG) - CFL.vector().set_local(u_mag.vector().get_local() / characteristic_edge_length.vector().get_local() * dt) + CFL.vector().set_local( + u_mag.vector().get_local() + / characteristic_edge_length.vector().get_local() + * dt + ) CFL.vector().apply("insert") CFL_cycle_avg.vector().axpy(1, CFL.vector()) t0.stop() @@ -370,14 +521,16 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, # Compute l+ t0 = Timer("l plus") u_star = np.sqrt(strain.vector().get_local() * nu) - l_plus.vector().set_local(u_star * characteristic_edge_length.vector().get_local() / nu) + l_plus.vector().set_local( + u_star * characteristic_edge_length.vector().get_local() / nu + ) l_plus.vector().apply("insert") l_plus_cycle_avg.vector().axpy(1, l_plus.vector()) t0.stop() # Compute t+ t0 = Timer("t plus") - t_plus.vector().set_local(u_star ** 2 * dt / nu) + t_plus.vector().set_local(u_star**2 * dt / nu) t_plus.vector().apply("insert") t_plus_cycle_avg.vector().axpy(1, t_plus.vector()) t0.stop() @@ -403,7 +556,7 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, # Compute length scale t0 = Timer("Length scale") - length_scale.vector().set_local((nu ** 3 / eps) ** (1. / 4)) + length_scale.vector().set_local((nu**3 / eps) ** (1.0 / 4)) length_scale.vector().apply("insert") length_scale_cycle_avg.vector().axpy(1, length_scale.vector()) t0.stop() @@ -417,7 +570,7 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, # Compute velocity scale t0 = Timer("Velocity scale") - velocity_scale.vector().set_local((eps * nu) ** (1. / 4)) + velocity_scale.vector().set_local((eps * nu) ** (1.0 / 4)) velocity_scale.vector().apply("insert") velocity_scale_cycle_avg.vector().axpy(1, velocity_scale.vector()) t0.stop() @@ -432,7 +585,13 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, assign(u2, u.sub(2)) kinetic_energy.vector().set_local( - 0.5 * (u0.vector().get_local() ** 2 + u1.vector().get_local() ** 2 + u2.vector().get_local() ** 2)) + 0.5 + * ( + u0.vector().get_local() ** 2 + + u1.vector().get_local() ** 2 + + u2.vector().get_local() ** 2 + ) + ) kinetic_energy.vector().apply("insert") kinetic_energy_cycle_avg.vector().axpy(1, kinetic_energy.vector()) t0.stop() @@ -445,11 +604,17 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, assign(u2_prime, u_prime.sub(2)) turbulent_kinetic_energy.vector().set_local( - 0.5 * (u0_prime.vector().get_local() ** 2 - + u1_prime.vector().get_local() ** 2 - + u2_prime.vector().get_local() ** 2)) + 0.5 + * ( + u0_prime.vector().get_local() ** 2 + + u1_prime.vector().get_local() ** 2 + + u2_prime.vector().get_local() ** 2 + ) + ) turbulent_kinetic_energy.vector().apply("insert") - turbulent_kinetic_energy_cycle_avg.vector().axpy(1, turbulent_kinetic_energy.vector()) + turbulent_kinetic_energy_cycle_avg.vector().axpy( + 1, turbulent_kinetic_energy.vector() + ) t0.stop() if counter % 10 == 0: @@ -467,11 +632,14 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, # Store solution for name, metric in metric_dict_cycle.items(): - metrics[name + "_cycle_{:02d}".format(cycle)].write_checkpoint(metric, name) + metrics[name + "_cycle_{:02d}".format(cycle)].write_checkpoint( + metric, name + ) # Append solution to total solution - for metric_avg, metric_cycle_avg in zip(list(metric_dict.values())[1:], - list(metric_dict_cycle.values())[1:]): + for metric_avg, metric_cycle_avg in zip( + list(metric_dict.values())[1:], list(metric_dict_cycle.values())[1:] + ): metric_cycle_avg.vector().apply("insert") metric_avg.vector().axpy(1, metric_cycle_avg.vector()) @@ -482,7 +650,9 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, counters_to_save.pop(0) # Get average over sampled time steps - metrics_dict_to_save = metric_dict if len(cycles_to_average) != 0 else metric_dict_cycle + metrics_dict_to_save = ( + metric_dict if len(cycles_to_average) != 0 else metric_dict_cycle + ) if len(cycles_to_average) != 0: number_of_files = len(cycles_to_average) @@ -518,12 +688,36 @@ def define_functions_and_iterate_dataset(time_to_average, dataset, dataset_avg, def main_metrics(): - folder, nu, _, dt, velocity_degree, _, _, T, save_frequency, times_to_average, start_cycle, step, \ - average_over_cycles, _ = read_command_line() - - compute_flow_and_simulation_metrics(folder, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles) - - -if __name__ == '__main__': + ( + folder, + nu, + _, + dt, + velocity_degree, + _, + _, + T, + save_frequency, + times_to_average, + start_cycle, + step, + average_over_cycles, + _, + ) = read_command_line() + + compute_flow_and_simulation_metrics( + folder, + nu, + dt, + velocity_degree, + T, + times_to_average, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) + + +if __name__ == "__main__": main_metrics() diff --git a/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py b/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py index 4205fb23..631172b7 100644 --- a/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py +++ b/src/vampy/automatedPostprocessing/compute_hemodynamic_indices.py @@ -1,9 +1,24 @@ from os import path -from dolfin import Function, VectorFunctionSpace, FunctionSpace, parameters, MPI, HDF5File, Mesh, XDMFFile, \ - BoundaryMesh, project, inner - -from vampy.automatedPostprocessing.postprocessing_common import STRESS, read_command_line, get_dataset_names +from dolfin import ( + MPI, + BoundaryMesh, + Function, + FunctionSpace, + HDF5File, + Mesh, + VectorFunctionSpace, + XDMFFile, + inner, + parameters, + project, +) + +from vampy.automatedPostprocessing.postprocessing_common import ( + STRESS, + get_dataset_names, + read_command_line, +) try: parameters["reorder_dofs_serial"] = False @@ -11,8 +26,18 @@ pass -def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, step, - average_over_cycles): +def compute_hemodynamic_indices( + folder, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + average_over_cycles, +): """ Loads velocity fields from completed CFD simulation, and computes and saves the following hemodynamic quantities: @@ -57,7 +82,7 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr with HDF5File(MPI.comm_world, mesh_path.__str__(), "r") as mesh_file: mesh_file.read(mesh, "mesh", False) - bm = BoundaryMesh(mesh, 'exterior') + bm = BoundaryMesh(mesh, "exterior") if MPI.rank(MPI.comm_world) == 0: print("Defining function spaces") @@ -105,7 +130,9 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr # Set number of cycles to average over cycles_to_average = list(range(1, n_cycles + 1)) if average_over_cycles else [] - counters_to_save = [saved_time_steps_per_cycle * cycle for cycle in cycles_to_average] + counters_to_save = [ + saved_time_steps_per_cycle * cycle for cycle in cycles_to_average + ] cycle_names = [""] + ["_cycle_{:02d}".format(cycle) for cycle in cycles_to_average] # Create XDMF files for saving indices @@ -121,7 +148,9 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr indices = {} for cycle_name in cycle_names: for index in index_names + ["WSS"]: - indices[index + cycle_name] = XDMFFile(MPI.comm_world, fullname % (index, cycle_name)) + indices[index + cycle_name] = XDMFFile( + MPI.comm_world, fullname % (index, cycle_name) + ) indices[index + cycle_name].parameters["rewrite_function_mesh"] = False indices[index + cycle_name].parameters["flush_output"] = True indices[index + cycle_name].parameters["functions_share_mesh"] = True @@ -155,7 +184,9 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr # Compute TWSSG if MPI.rank(MPI.comm_world) == 0: print("Compute TWSSG") - twssg.vector().set_local((tau.vector().get_local() - tau_prev.vector().get_local()) / dt) + twssg.vector().set_local( + (tau.vector().get_local() - tau_prev.vector().get_local()) / dt + ) twssg.vector().apply("insert") twssg_ = project(inner(twssg, twssg) ** (1 / 2), U_b1) TWSSG_avg.vector().axpy(1, twssg_.vector()) @@ -225,19 +256,21 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr index_dict = index_dict if len(cycles_to_average) != 0 else index_dict_cycle WSS_mean = WSS_mean if len(cycles_to_average) != 0 else WSS_mean_avg - index_dict['TWSSG'].vector()[:] = index_dict['TWSSG'].vector()[:] / n - index_dict['TAWSS'].vector()[:] = index_dict['TAWSS'].vector()[:] / n + index_dict["TWSSG"].vector()[:] = index_dict["TWSSG"].vector()[:] / n + index_dict["TAWSS"].vector()[:] = index_dict["TAWSS"].vector()[:] / n WSS_mean.vector()[:] = WSS_mean.vector()[:] / n wss_mean = project(inner(WSS_mean, WSS_mean) ** (1 / 2), U_b1) wss_mean_vec = wss_mean.vector().get_local() - tawss_vec = index_dict['TAWSS'].vector().get_local() + tawss_vec = index_dict["TAWSS"].vector().get_local() # Compute RRT, OSI, and ECAP based on mean and absolute WSS - index_dict['RRT'].vector().set_local(1 / wss_mean_vec) - index_dict['OSI'].vector().set_local(0.5 * (1 - wss_mean_vec / tawss_vec)) - index_dict['ECAP'].vector().set_local(index_dict['OSI'].vector().get_local() / tawss_vec) + index_dict["RRT"].vector().set_local(1 / wss_mean_vec) + index_dict["OSI"].vector().set_local(0.5 * (1 - wss_mean_vec / tawss_vec)) + index_dict["ECAP"].vector().set_local( + index_dict["OSI"].vector().get_local() / tawss_vec + ) - for index in ['RRT', 'OSI', 'ECAP']: + for index in ["RRT", "OSI", "ECAP"]: index_dict[index].vector().apply("insert") # Rename displayed variable names @@ -254,12 +287,36 @@ def compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_fr def main_hemo(): - folder, nu, rho, dt, velocity_degree, _, _, T, save_frequency, _, start_cycle, step, average_over_cycles, _ \ - = read_command_line() - - compute_hemodynamic_indices(folder, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, - step, average_over_cycles) - - -if __name__ == '__main__': + ( + folder, + nu, + rho, + dt, + velocity_degree, + _, + _, + T, + save_frequency, + _, + start_cycle, + step, + average_over_cycles, + _, + ) = read_command_line() + + compute_hemodynamic_indices( + folder, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) + + +if __name__ == "__main__": main_hemo() diff --git a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py index a24b8eae..eb2df836 100644 --- a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py +++ b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py @@ -1,6 +1,15 @@ from pathlib import Path -from dolfin import parameters, FunctionSpace, XDMFFile, MPI, VectorFunctionSpace, HDF5File, Mesh, Function +from dolfin import ( + MPI, + Function, + FunctionSpace, + HDF5File, + Mesh, + VectorFunctionSpace, + XDMFFile, + parameters, +) from vampy.automatedPostprocessing.postprocessing_common import read_command_line @@ -10,7 +19,9 @@ pass -def compute_velocity_and_pressure(case_path, dt, velocity_degree, pressure_degree, step): +def compute_velocity_and_pressure( + case_path, dt, velocity_degree, pressure_degree, step +): """ Loads velocity and pressure from compressed .h5 CFD solution and converts and saves to .xdmf format for visualization (in e.g. ParaView). @@ -92,9 +103,11 @@ def compute_velocity_and_pressure(case_path, dt, velocity_degree, pressure_degre def main_convert(): - folder, _, _, dt, velocity_degree, pressure_degree, _, _, _, _, _, step, _, _ = read_command_line() + folder, _, _, dt, velocity_degree, pressure_degree, _, _, _, _, _, step, _, _ = ( + read_command_line() + ) compute_velocity_and_pressure(folder, dt, velocity_degree, pressure_degree, step) -if __name__ == '__main__': +if __name__ == "__main__": main_convert() diff --git a/src/vampy/automatedPostprocessing/postprocessing_common.py b/src/vampy/automatedPostprocessing/postprocessing_common.py index 1503eb5c..913f6d01 100644 --- a/src/vampy/automatedPostprocessing/postprocessing_common.py +++ b/src/vampy/automatedPostprocessing/postprocessing_common.py @@ -1,8 +1,24 @@ import argparse from time import time -from dolfin import parameters, MPI, assemble, interpolate, Measure, FacetNormal, Identity, VectorFunctionSpace, \ - BoundaryMesh, Function, FacetArea, TestFunction, FunctionSpace, grad, inner, sqrt +from dolfin import ( + MPI, + BoundaryMesh, + FacetArea, + FacetNormal, + Function, + FunctionSpace, + Identity, + Measure, + TestFunction, + VectorFunctionSpace, + assemble, + grad, + inner, + interpolate, + parameters, + sqrt, +) try: parameters["allow_extrapolation"] = True @@ -12,88 +28,145 @@ def read_command_line(): """Read arguments from commandline""" - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Automated post-processing for vascular modeling.") - - parser.add_argument('-c', '--case', - type=str, - default="simulation/results_folder/model/data/1/Solutions", - help="Path to simulation results.", - metavar="PATH") - - parser.add_argument('-nu', '--nu', - type=float, - default=3.3018e-3, - help="Kinematic viscosity used in simulation. Measured in [mm^2/ms].") - - parser.add_argument('-r', '--rho', - type=float, - default=1060, - help="Fluid density used in simulation. Measured in [kg/m^3].") - - parser.add_argument('-T', '--T', - type=float, - default=951, - help="Duration of one cardiac cycle. Measured in [ms].") - - parser.add_argument('-dt', '--dt', - type=float, - default=0.0951, - help="Time step of simulation. Measured in [ms].") - - parser.add_argument('-vd', '--velocity-degree', - type=int, - default=1, - help="Degree of velocity element.") - - parser.add_argument('-pd', '--pressure-degree', - type=int, - default=1, - help="Degree of pressure element.") - - parser.add_argument('-sf', '--save-frequency', - type=int, - default=5, - help="Frequency of saving velocity to file.") - - parser.add_argument('-pf', '--probe-frequency', - type=int, - default=100, - help="Frequency of saving probes to file.") - - parser.add_argument('-ta', '--times-to-average', - type=float, - default=[], - nargs="+", - help="Time(s) during cardiac cycle to average, in the interval [0,T). Measured in [ms].") - - parser.add_argument('-pp', '--probes-to-plot', - type=int, - default=[], - nargs="+", - help="List of integers corresponding to probe numbers. " + - "The probes are to visualized in separate probe plots. ") - - parser.add_argument('-sc', '--start-cycle', - type=int, - default=2, - help="Start post-processing from this cardiac cycle.") - - parser.add_argument('-ss', '--sample-step', - type=int, - default=1, - help="Step size that determines how many times data is sampled.") - - parser.add_argument('-ac', '--average-over-cycles', - default=False, - action='store_true', - help="Computes average over all cycles if True.") + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Automated post-processing for vascular modeling.", + ) + + parser.add_argument( + "-c", + "--case", + type=str, + default="simulation/results_folder/model/data/1/Solutions", + help="Path to simulation results.", + metavar="PATH", + ) + + parser.add_argument( + "-nu", + "--nu", + type=float, + default=3.3018e-3, + help="Kinematic viscosity used in simulation. Measured in [mm^2/ms].", + ) + + parser.add_argument( + "-r", + "--rho", + type=float, + default=1060, + help="Fluid density used in simulation. Measured in [kg/m^3].", + ) + + parser.add_argument( + "-T", + "--T", + type=float, + default=951, + help="Duration of one cardiac cycle. Measured in [ms].", + ) + + parser.add_argument( + "-dt", + "--dt", + type=float, + default=0.0951, + help="Time step of simulation. Measured in [ms].", + ) + + parser.add_argument( + "-vd", + "--velocity-degree", + type=int, + default=1, + help="Degree of velocity element.", + ) + + parser.add_argument( + "-pd", + "--pressure-degree", + type=int, + default=1, + help="Degree of pressure element.", + ) + + parser.add_argument( + "-sf", + "--save-frequency", + type=int, + default=5, + help="Frequency of saving velocity to file.", + ) + + parser.add_argument( + "-pf", + "--probe-frequency", + type=int, + default=100, + help="Frequency of saving probes to file.", + ) + + parser.add_argument( + "-ta", + "--times-to-average", + type=float, + default=[], + nargs="+", + help="Time(s) during cardiac cycle to average, in the interval [0,T). Measured in [ms].", + ) + + parser.add_argument( + "-pp", + "--probes-to-plot", + type=int, + default=[], + nargs="+", + help="List of integers corresponding to probe numbers. " + + "The probes are to visualized in separate probe plots. ", + ) + + parser.add_argument( + "-sc", + "--start-cycle", + type=int, + default=2, + help="Start post-processing from this cardiac cycle.", + ) + + parser.add_argument( + "-ss", + "--sample-step", + type=int, + default=1, + help="Step size that determines how many times data is sampled.", + ) + + parser.add_argument( + "-ac", + "--average-over-cycles", + default=False, + action="store_true", + help="Computes average over all cycles if True.", + ) args = parser.parse_args() - return args.case, args.nu, args.rho, args.dt, args.velocity_degree, args.pressure_degree, args.probe_frequency, \ - args.T, args.save_frequency, args.times_to_average, args.start_cycle, args.sample_step, \ - args.average_over_cycles, args.probes_to_plot + return ( + args.case, + args.nu, + args.rho, + args.dt, + args.velocity_degree, + args.pressure_degree, + args.probe_frequency, + args.T, + args.save_frequency, + args.times_to_average, + args.start_cycle, + args.sample_step, + args.average_over_cycles, + args.probes_to_plot, + ) def epsilon(u): @@ -161,8 +234,8 @@ def __init__(self, u, p, nu, mesh): mesh (Mesh): The mesh on which to compute stress. """ boundary_ds = Measure("ds", domain=mesh) - boundary_mesh = BoundaryMesh(mesh, 'exterior') - self.bmV = VectorFunctionSpace(boundary_mesh, 'CG', 1) + boundary_mesh = BoundaryMesh(mesh, "exterior") + self.bmV = VectorFunctionSpace(boundary_mesh, "CG", 1) # Compute stress tensor sigma = (2 * nu * epsilon(u)) - (p * Identity(len(u))) @@ -176,9 +249,11 @@ def __init__(self, u, p, nu, mesh): Ft = F - (Fn * n) # vector-valued # Integrate against piecewise constants on the boundary - scalar = FunctionSpace(mesh, 'DG', 0) - vector = VectorFunctionSpace(mesh, 'CG', 1) - scaling = FacetArea(mesh) # Normalise the computed stress relative to the size of the element + scalar = FunctionSpace(mesh, "DG", 0) + vector = VectorFunctionSpace(mesh, "CG", 1) + scaling = FacetArea( + mesh + ) # Normalise the computed stress relative to the size of the element v = TestFunction(scalar) w = TestFunction(vector) @@ -218,8 +293,14 @@ def norm_l2(self, u): return pow(inner(u, u), 0.5) -def get_dataset_names(data_file, num_files=100000, step=1, start=1, print_info=True, - vector_filename="/velocity/vector_%d"): +def get_dataset_names( + data_file, + num_files=100000, + step=1, + start=1, + print_info=True, + vector_filename="/velocity/vector_%d", +): """ Read velocity fields datasets and extract names of files diff --git a/src/vampy/automatedPostprocessing/visualize_probes.py b/src/vampy/automatedPostprocessing/visualize_probes.py index ed9126bf..13088196 100644 --- a/src/vampy/automatedPostprocessing/visualize_probes.py +++ b/src/vampy/automatedPostprocessing/visualize_probes.py @@ -6,10 +6,18 @@ from vampy.automatedPostprocessing.postprocessing_common import read_command_line # Plotting variables -colors = ['red', 'blue', 'purple'] - - -def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, show_figure=False, save_figure=False): +colors = ["red", "blue", "purple"] + + +def visualize_probes( + case_path, + probe_saving_frequency, + T, + dt, + probes_to_plot, + show_figure=False, + save_figure=False, +): """ Loads probe points from completed CFD simulation and visualizes (1) velocity (magnitude), mean velocity, and pressure, (2) kinetic energy, turbulent kinetic energy, and turbulence intensity, (3) kinetic energy spectrum, @@ -24,27 +32,77 @@ def visualize_probes(case_path, probe_saving_frequency, T, dt, probes_to_plot, s show_figure: Shows figure if True. save_figure: Saves figure if True. """ - max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps \ - = load_probes(case_path, probe_saving_frequency) - - mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke \ - = compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, - velocity_w) - - kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum(n_probes, velocity_u, velocity_v, velocity_w, dt) + ( + max_P, + max_U, + n_cols, + n_probes, + n_rows, + pressures, + velocity, + velocity_u, + velocity_v, + velocity_w, + n_timesteps, + ) = load_probes(case_path, probe_saving_frequency) + + ( + mean_velocity, + kinetic_energy, + turbulent_kinetic_energy, + turbulence_intensity, + max_ke, + max_tke, + ) = compute_mean_velocity_and_kinetic_energy( + T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w + ) + + kinetic_energy_spectrum, freq, max_kes = compute_energy_spectrum( + n_probes, velocity_u, velocity_v, velocity_w, dt + ) if not probes_to_plot: # Plot all probes in same plot - plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, - velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, turbulence_intensity, freq, - kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure) + plot_all_probes( + n_probes, + n_rows, + n_cols, + case_path, + max_P, + max_U, + mean_velocity, + n_timesteps, + pressures, + velocity, + kinetic_energy, + max_ke, + max_tke, + turbulent_kinetic_energy, + turbulence_intensity, + freq, + kinetic_energy_spectrum, + max_kes, + velocity_u, + save_figure, + show_figure, + ) else: # Plot probes in separate plots for k in probes_to_plot: fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) - plot_velocity_and_pressure(k, ax1, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) - plot_kinetic_energy(k, ax2, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, - turbulence_intensity) + plot_velocity_and_pressure( + k, ax1, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity + ) + plot_kinetic_energy( + k, + ax2, + kinetic_energy, + max_ke, + max_tke, + n_timesteps, + turbulent_kinetic_energy, + turbulence_intensity, + ) plot_energy_spectrum(k, ax3, freq, kinetic_energy_spectrum, max_kes) plot_spectrogram(k, ax4, velocity_u) @@ -71,9 +129,29 @@ def save_and_show_plot(case_path, filename, save, show): plt.show() -def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velocity, n_timesteps, pressures, - velocity, kinetic_energy, max_ke, max_tke, turbulent_kinetic_energy, turbulence_intensity, freq, - kinetic_energy_spectrum, max_kes, velocity_u, save_figure, show_figure): +def plot_all_probes( + n_probes, + n_rows, + n_cols, + case_path, + max_P, + max_U, + mean_velocity, + n_timesteps, + pressures, + velocity, + kinetic_energy, + max_ke, + max_tke, + turbulent_kinetic_energy, + turbulence_intensity, + freq, + kinetic_energy_spectrum, + max_kes, + velocity_u, + save_figure, + show_figure, +): """ Plots data for all probes. @@ -83,14 +161,24 @@ def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velo # Plot velocity and pressure for k in range(n_probes): ax = plt.subplot(n_rows, n_cols, k + 1) - plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity) + plot_velocity_and_pressure( + k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity + ) save_and_show_plot(case_path, "velocity_and_pressure.png", save_figure, show_figure) # Plot kinetic energy for k in range(n_probes): ax = plt.subplot(n_rows, n_cols, k + 1) - plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, - turbulence_intensity) + plot_kinetic_energy( + k, + ax, + kinetic_energy, + max_ke, + max_tke, + n_timesteps, + turbulent_kinetic_energy, + turbulence_intensity, + ) save_and_show_plot(case_path, "kinetic_energy.png", save_figure, show_figure) # Plot energy spectrum @@ -106,7 +194,7 @@ def plot_all_probes(n_probes, n_rows, n_cols, case_path, max_P, max_U, mean_velo save_and_show_plot(case_path, "spectrogram.png", save_figure, show_figure) -def plot_spectrogram(k, ax, velocity, color_map='jet', font_size=12): +def plot_spectrogram(k, ax, velocity, color_map="jet", font_size=12): """ Plots spectrogram at probe points. @@ -125,7 +213,7 @@ def plot_spectrogram(k, ax, velocity, color_map='jet', font_size=12): ax.set_xlabel("Time [ms]", fontsize=font_size) # Set title to probe number - ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + ax.set_title("Probe {}".format(k + 1), y=1.0, pad=-14) def plot_energy_spectrum(k, ax, freq, kinetic_energy_spectrum, max_kes): @@ -159,8 +247,14 @@ def E(k): ax.plot(freq, kinetic_energy_spectrum[k], color=colors[1], label="") ax.set_xscale("log") ax.set_yscale("log") - ax.annotate('$k^{-5/3}$', xy=(np.mean(freq) / 4, np.mean(kinetic_energy_spectrum[k])), xycoords='data', - textcoords='data', color='black', fontsize=15) + ax.annotate( + "$k^{-5/3}$", + xy=(np.mean(freq) / 4, np.mean(kinetic_energy_spectrum[k])), + xycoords="data", + textcoords="data", + color="black", + fontsize=15, + ) # Set axis limits ax.set_ylim(None, max_kes * 1.1) @@ -171,11 +265,19 @@ def E(k): ax.set_xlabel("k", fontsize=12) # Set title to probe number - ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) - - -def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, turbulent_kinetic_energy, - turbulence_intensity): + ax.set_title("Probe {}".format(k + 1), y=1.0, pad=-14) + + +def plot_kinetic_energy( + k, + ax, + kinetic_energy, + max_ke, + max_tke, + n_timesteps, + turbulent_kinetic_energy, + turbulence_intensity, +): """ Plots kinetic energy and turbulent kinetic energy at probe points. @@ -196,7 +298,7 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur ax_twinx.plot(time_interval, turbulence_intensity[k], color=colors[2], label="") # Set axis limits - ax.set_ylim(-1E-2, max_ke * 1.1) + ax.set_ylim(-1e-2, max_ke * 1.1) ax.set_xlim(min(time_interval), max(time_interval)) # Set axis labels @@ -205,14 +307,16 @@ def plot_kinetic_energy(k, ax, kinetic_energy, max_ke, max_tke, n_timesteps, tur ax.set_xlabel("Time [s]", fontsize=12) # Color axis ticks - ax.tick_params(axis='y', which='major', labelsize=12) - ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[2]) + ax.tick_params(axis="y", which="major", labelsize=12) + ax_twinx.tick_params(axis="y", which="major", labelsize=12, labelcolor=colors[2]) # Set title to probe number - ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + ax.set_title("Probe {}".format(k + 1), y=1.0, pad=-14) -def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity): +def plot_velocity_and_pressure( + k, ax, max_P, max_U, mean_velocity, n_timesteps, pressures, velocity +): """ Plots velocity magnitude and pressure at probe points. @@ -230,12 +334,12 @@ def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, ax_twinx = ax.twinx() time_interval = np.linspace(0, n_timesteps, n_timesteps) ax.plot(time_interval, velocity[k], color=colors[0], label="") - ax.plot(time_interval, mean_velocity[k], color=colors[2], linestyle='--', label="") + ax.plot(time_interval, mean_velocity[k], color=colors[2], linestyle="--", label="") ax_twinx.plot(time_interval, pressures[k], color=colors[1], label="") # Set axis limits - ax.set_ylim(-1E-2, max_U * 1.5) - ax_twinx.set_ylim(-1E-2, max_P * 1.5) + ax.set_ylim(-1e-2, max_U * 1.5) + ax_twinx.set_ylim(-1e-2, max_P * 1.5) ax.set_xlim(min(time_interval), max(time_interval)) # Set axis labels @@ -244,15 +348,16 @@ def plot_velocity_and_pressure(k, ax, max_P, max_U, mean_velocity, n_timesteps, ax.set_xlabel("Time [s]", fontsize=12) # Color axis ticks - ax.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[0]) - ax_twinx.tick_params(axis='y', which='major', labelsize=12, labelcolor=colors[1]) + ax.tick_params(axis="y", which="major", labelsize=12, labelcolor=colors[0]) + ax_twinx.tick_params(axis="y", which="major", labelsize=12, labelcolor=colors[1]) # Set title to probe number - ax.set_title('Probe {}'.format(k + 1), y=1.0, pad=-14) + ax.set_title("Probe {}".format(k + 1), y=1.0, pad=-14) -def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, - velocity_w): +def compute_mean_velocity_and_kinetic_energy( + T, dt, n_timesteps, n_probes, velocity, velocity_u, velocity_v, velocity_w +): """ Computes mean velocity and kinetic energy at probe points. @@ -301,24 +406,33 @@ def compute_mean_velocity_and_kinetic_energy(T, dt, n_timesteps, n_probes, veloc u_mean_w = np.tile(u_mean_w, n_cycles) # Calculate total kinetic energy - kinetic_energy[k] = 0.5 * (u ** 2 + v ** 2 + w ** 2) + kinetic_energy[k] = 0.5 * (u**2 + v**2 + w**2) # Calculate fluctuating velocity fluctuating_velocity_u = u - u_mean_u fluctuating_velocity_v = v - u_mean_v fluctuating_velocity_w = w - u_mean_w - turbulent_kinetic_energy[k] = 0.5 * ((fluctuating_velocity_u ** 2) + - (fluctuating_velocity_v ** 2) + - (fluctuating_velocity_w ** 2)) + turbulent_kinetic_energy[k] = 0.5 * ( + (fluctuating_velocity_u**2) + + (fluctuating_velocity_v**2) + + (fluctuating_velocity_w**2) + ) u_prime = np.sqrt(2 / 3 * turbulent_kinetic_energy[k]) - U_bar = np.sqrt(u_mean_u ** 2 + u_mean_v ** 2 + u_mean_w ** 2) + U_bar = np.sqrt(u_mean_u**2 + u_mean_v**2 + u_mean_w**2) turbulence_intensity[k] = u_prime / U_bar max_ke = np.max(kinetic_energy) max_tke = np.max(turbulent_kinetic_energy) - return mean_velocity, kinetic_energy, turbulent_kinetic_energy, turbulence_intensity, max_ke, max_tke + return ( + mean_velocity, + kinetic_energy, + turbulent_kinetic_energy, + turbulence_intensity, + max_ke, + max_tke, + ) def load_probes(case_path, probe_saving_frequency): @@ -411,7 +525,19 @@ def load_probes(case_path, probe_saving_frequency): print("-- No data to visualize. Exiting..") exit() - return max_P, max_U, n_cols, n_probes, n_rows, pressures, velocity, velocity_u, velocity_v, velocity_w, n_timesteps + return ( + max_P, + max_U, + n_cols, + n_probes, + n_rows, + pressures, + velocity, + velocity_u, + velocity_v, + velocity_w, + n_timesteps, + ) def load_probe_data(case_path, tstep): @@ -492,8 +618,8 @@ def get_fft(x): freqs = np.fft.fftfreq(N, dt) # Store positive values of total kinetic energy spectrum - freq = freqs[:N // 2] - E_total = E_total[:N // 2] + freq = freqs[: N // 2] + E_total = E_total[: N // 2] kinetic_energy_spectrum.append(E_total) @@ -508,9 +634,13 @@ def get_fft(x): def main_probe(): # Read command line arguments - folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _, probes_to_plot = read_command_line() - visualize_probes(folder, probe_freq, T, dt, probes_to_plot, show_figure=True, save_figure=True) + folder, _, _, dt, _, _, probe_freq, T, _, _, _, _, _, probes_to_plot = ( + read_command_line() + ) + visualize_probes( + folder, probe_freq, T, dt, probes_to_plot, show_figure=True, save_figure=True + ) -if __name__ == '__main__': +if __name__ == "__main__": main_probe() diff --git a/src/vampy/automatedPreprocessing/DisplayData.py b/src/vampy/automatedPreprocessing/DisplayData.py index 5aebd312..1e0e6737 100644 --- a/src/vampy/automatedPreprocessing/DisplayData.py +++ b/src/vampy/automatedPreprocessing/DisplayData.py @@ -22,8 +22,8 @@ def __init__(self, maxNumPoints=1e6): mapper.SetInputData(self.vtkPolyData) self.vtkActor = vtk.vtkActor() self.vtkActor.SetMapper(mapper) - self.vtkActor.GetProperty().SetPointSize(9.) - self.vtkActor.GetProperty().SetColor(1., .0, .0) + self.vtkActor.GetProperty().SetPointSize(9.0) + self.vtkActor.GetProperty().SetColor(1.0, 0.0, 0.0) def addPoint(self, point): if self.vtkPoints.GetNumberOfPoints() < self.maxNumPoints: @@ -57,7 +57,7 @@ class DisplayModel(object): def polyDataToActor(self, polyData, opacity=1.0): """Wrap the provided vtkPolyData object in a mapper and an actor, - returning the actor. """ + returning the actor.""" mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION > 5: mapper.SetInputData(polyData) @@ -67,7 +67,7 @@ def polyDataToActor(self, polyData, opacity=1.0): actor.SetMapper(mapper) actor.GetProperty().SetOpacity(opacity) - return (actor) + return actor def setLight(self, renderer): lightKit = vtk.vtkLightKit() @@ -84,9 +84,9 @@ def setLight(self, renderer): lightKit.SetHeadlightWarmth(0.5) # intensity ratios - lightKit.SetKeyToFillRatio(2.) - lightKit.SetKeyToHeadRatio(7.) - lightKit.SetKeyToBackRatio(1000.) + lightKit.SetKeyToFillRatio(2.0) + lightKit.SetKeyToHeadRatio(7.0) + lightKit.SetKeyToBackRatio(1000.0) lightKit.AddLightsToRenderer(renderer) def renderWindow(self, renderer, titleWindow): @@ -105,10 +105,11 @@ def renderWindow(self, renderer, titleWindow): interactor.Initialize() interactor.Start() - def DisplayProbesAndModel(self, centerline, fileNameCenterline, - listProbePoints, model=None): + def DisplayProbesAndModel( + self, centerline, fileNameCenterline, listProbePoints, model=None + ): """Displays a model and the corresponding probe points along - the centerline. """ + the centerline.""" if model is None: isDisplayingModel = False @@ -140,15 +141,20 @@ def DisplayProbesAndModel(self, centerline, fileNameCenterline, opacity = 0.25 ren.AddActor(self.polyDataToActor(model, opacity)) ren.AddActor(self.polyDataToActor(centerline)) - ren.SetBackground(.2, .3, .4) + ren.SetBackground(0.2, 0.3, 0.4) renWindows.SetSize(700, 700) # Create a text actor. txt = vtk.vtkTextActor() - guiText = ("Centerline file name: " - + repr(fileNameCenterline.rsplit('/', 1)[-1]) + "\n" - + "Number of probes: " + repr(len(listProbePoints)) + "\n" - + "Q to exit.") + guiText = ( + "Centerline file name: " + + repr(fileNameCenterline.rsplit("/", 1)[-1]) + + "\n" + + "Number of probes: " + + repr(len(listProbePoints)) + + "\n" + + "Q to exit." + ) txt.SetInputData(guiText) txtprop = txt.GetTextProperty() txtprop.SetFontFamilyToArial() diff --git a/src/vampy/automatedPreprocessing/ImportData.py b/src/vampy/automatedPreprocessing/ImportData.py index eca34c6b..a00db3c6 100644 --- a/src/vampy/automatedPreprocessing/ImportData.py +++ b/src/vampy/automatedPreprocessing/ImportData.py @@ -15,28 +15,28 @@ import vtk # Array names used by VMTK. -BLANKINGARRAYNAME = 'Blanking' -GROUPIDSARRAYNAME = 'GroupIds' -RADIUSARRAYNAME = 'MaximumInscribedSphereRadius' -SECTIONARRAYNAME = 'CenterlineSectionArea' +BLANKINGARRAYNAME = "Blanking" +GROUPIDSARRAYNAME = "GroupIds" +RADIUSARRAYNAME = "MaximumInscribedSphereRadius" +SECTIONARRAYNAME = "CenterlineSectionArea" def loadFile(fileName): - """Load the given file, and return a vtkPolyData object for it. """ + """Load the given file, and return a vtkPolyData object for it.""" fileType = fileName[-3:] - if fileType == '': - raise RuntimeError('The file does not have an extension') - if fileType == 'stl': + if fileType == "": + raise RuntimeError("The file does not have an extension") + if fileType == "stl": reader = vtk.vtkSTLReader() reader.MergingOn() - elif fileType == 'vtk': + elif fileType == "vtk": reader = vtk.vtkPolyDataReader() - elif fileType == 'vtp': + elif fileType == "vtp": reader = vtk.vtkXMLPolyDataReader() - elif fileType == 'vtu': + elif fileType == "vtu": reader = vtk.vtkUnstructuredGridReader() else: - raise RuntimeError('Unknown file type %s' % fileType) + raise RuntimeError("Unknown file type %s" % fileType) reader.SetFileName(fileName) reader.Update() polyData = reader.GetOutput() @@ -71,7 +71,7 @@ def GetBlankedGroupsIdList(centerline): def GetRedundantBlankedIdList(centerline, blankedGroupsIdList): - """ Get the redundant blanked segments. + """Get the redundant blanked segments. Nominally, the x0 and x1 coordinates should be the same. Thus, the computed distance should be exactly 0.0. @@ -105,18 +105,33 @@ def GetRedundantBlankedIdList(centerline, blankedGroupsIdList): if otherBranchIndex in redundantBranchesIndex: continue centerline.GetCell(otherBranchIndex).GetPoints().GetPoint(0, otherX0) - otherNpts = centerline.GetCell(otherBranchIndex).GetPoints().GetNumberOfPoints() - centerline.GetCell(otherBranchIndex).GetPoints().GetPoint(otherNpts - 1, otherX1) - if vtk.vtkMath.Distance2BetweenPoints(currentX0, otherX0) < tol and \ - vtk.vtkMath.Distance2BetweenPoints(currentX1, otherX1) < tol: + otherNpts = ( + centerline.GetCell(otherBranchIndex).GetPoints().GetNumberOfPoints() + ) + centerline.GetCell(otherBranchIndex).GetPoints().GetPoint( + otherNpts - 1, otherX1 + ) + if ( + vtk.vtkMath.Distance2BetweenPoints(currentX0, otherX0) < tol + and vtk.vtkMath.Distance2BetweenPoints(currentX1, otherX1) < tol + ): redundantBranchesIndex.append(otherBranchIndex) if vtk.vtkMath.Distance2BetweenPoints(currentX0, otherX0) > minLength: - print('WARNING: POTENTIAL ISSUE DURING THE MERGING OF REDUNDANTS BLANKED SEGMENTS.') - print(' A distance between segments is suspicious.') - print(' The blanked segments of VTK Cell Id ' + repr(currentBranchIndex)) - print(' and, VTK Cell Id ' + repr(otherBranchIndex) + ' will be merged.') - print(' The segments points should be identical. ') - print(' Please check if this action was expected.') + print( + "WARNING: POTENTIAL ISSUE DURING THE MERGING OF REDUNDANTS BLANKED SEGMENTS." + ) + print(" A distance between segments is suspicious.") + print( + " The blanked segments of VTK Cell Id " + + repr(currentBranchIndex) + ) + print( + " and, VTK Cell Id " + + repr(otherBranchIndex) + + " will be merged." + ) + print(" The segments points should be identical. ") + print(" Please check if this action was expected.") print() return redundantBranchesIndex @@ -156,12 +171,22 @@ def ComputeGeometricTolerance(centerline): point1 = [0.0, 0.0, 0.0] centerline.GetCell(i).GetPoints().GetPoint(k, point0) centerline.GetCell(i).GetPoints().GetPoint(k + 1, point1) - if math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) < minLength: + if ( + math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) + < minLength + ): if math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) == 0.0: continue - minLength = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) - if math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) > maxLength: - maxLength = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) + minLength = math.sqrt( + vtk.vtkMath.Distance2BetweenPoints(point0, point1) + ) + if ( + math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) + > maxLength + ): + maxLength = math.sqrt( + vtk.vtkMath.Distance2BetweenPoints(point0, point1) + ) return minLength, maxLength @@ -223,7 +248,7 @@ def ComputeGroupRadius(centerline, branchGroupId): def ComputeBranchRadius(centerline, branchId): - """Return the radius for a branch with index branchId. """ + """Return the radius for a branch with index branchId.""" length = 0.0 resistance = 0.0 radiusArray = centerline.GetPointData().GetArray(RADIUSARRAYNAME) @@ -236,7 +261,7 @@ def ComputeBranchRadius(centerline, branchId): dx = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point0, point1)) length += dx r = radiusArray.GetComponent(centerline.GetCell(branchId).GetPointId(k), 0) - resistance += dx / r ** 4.0 + resistance += dx / r**4.0 branchRadius = (length / resistance) ** 0.25 return branchRadius @@ -254,8 +279,13 @@ def GetListsUniqueBlankedBranches(blankedGroupsIdList, redundantBlankedBranchesI return blankedGroupsIndex, blankedUniqueBranchesIndex -def SetNetworkStructure(centerline, network, verboseprint, - isConnectivityNeeded=True, isRadiusInletNeeded=True): +def SetNetworkStructure( + centerline, + network, + verboseprint, + isConnectivityNeeded=True, + isRadiusInletNeeded=True, +): """Fills a network structure with a vtkPolyData object. Each element has an unique index. The groups length and radius are @@ -270,6 +300,7 @@ def SetNetworkStructure(centerline, network, verboseprint, verboseprint("> Filling the network structure with the raw data.") if not (IsArrayDefined(centerline, GROUPIDSARRAYNAME)): from vmtk import vmtkscripts + centerlines = vmtkscripts.vmtkBranchExtractor() centerlines.Centerlines = centerline centerlines.RadiusArrayName = RADIUSARRAYNAME @@ -279,11 +310,12 @@ def SetNetworkStructure(centerline, network, verboseprint, # Treat the splitted centerline. maxGroupId = GetMaxGroupId(centerline) blankedGroupsIdList = GetBlankedGroupsIdList(centerline) - redundantBlankedBranchesIdList = GetRedundantBlankedIdList(centerline, - blankedGroupsIdList) - blankedGroupsIndex, blankedUniqueBranchesIndex = \ - GetListsUniqueBlankedBranches(blankedGroupsIdList, - redundantBlankedBranchesIdList) + redundantBlankedBranchesIdList = GetRedundantBlankedIdList( + centerline, blankedGroupsIdList + ) + blankedGroupsIndex, blankedUniqueBranchesIndex = GetListsUniqueBlankedBranches( + blankedGroupsIdList, redundantBlankedBranchesIdList + ) indexUniqueBranches = 0 for i in range(0, maxGroupId + 1): x0List = [] @@ -294,19 +326,26 @@ def SetNetworkStructure(centerline, network, verboseprint, for j in range(0, len(blankedGroupsIndex)): if blankedGroupsIndex[j] == i: el = Element(Id=indexUniqueBranches) - el.SetMeanRadius(ComputeBranchRadius(centerline, - blankedUniqueBranchesIndex[j])) - el.SetLength(ComputeBranchLength(centerline, - blankedUniqueBranchesIndex[j])) + el.SetMeanRadius( + ComputeBranchRadius(centerline, blankedUniqueBranchesIndex[j]) + ) + el.SetLength( + ComputeBranchLength(centerline, blankedUniqueBranchesIndex[j]) + ) el.SetBlanking(1) - npts = centerline.GetCell(blankedUniqueBranchesIndex[j]). \ - GetPoints().GetNumberOfPoints() + npts = ( + centerline.GetCell(blankedUniqueBranchesIndex[j]) + .GetPoints() + .GetNumberOfPoints() + ) x0 = [0.0, 0.0, 0.0] x1 = [0.0, 0.0, 0.0] - centerline.GetCell(blankedUniqueBranchesIndex[j]). \ - GetPoints().GetPoint(0, x0) - centerline.GetCell(blankedUniqueBranchesIndex[j]). \ - GetPoints().GetPoint(npts - 1, x1) + centerline.GetCell( + blankedUniqueBranchesIndex[j] + ).GetPoints().GetPoint(0, x0) + centerline.GetCell( + blankedUniqueBranchesIndex[j] + ).GetPoints().GetPoint(npts - 1, x1) x0List = [] x1List = [] VtkCellIdList = [] @@ -319,9 +358,15 @@ def SetNetworkStructure(centerline, network, verboseprint, el.SetVtkGroupIdList(VtkGroupIdList) el.SetVtkCellIdList(VtkCellIdList) network.AddElement(el) - verboseprint("> Edge Id " + repr(indexUniqueBranches) - + ", Length = " + repr(el.GetLength()) + " and Radius = " - + repr(el.GetMeanRadius()) + ".") + verboseprint( + "> Edge Id " + + repr(indexUniqueBranches) + + ", Length = " + + repr(el.GetLength()) + + " and Radius = " + + repr(el.GetMeanRadius()) + + "." + ) indexUniqueBranches += 1 else: el = Element(Id=indexUniqueBranches) @@ -348,9 +393,15 @@ def SetNetworkStructure(centerline, network, verboseprint, el.SetVtkGroupIdList(VtkGroupIdList) el.SetVtkCellIdList(VtkCellIdList) network.AddElement(el) - verboseprint("> Edge Id " + repr(indexUniqueBranches) - + ", Length = " + repr(el.GetLength()) + " and Radius = " - + repr(el.GetMeanRadius()) + ".") + verboseprint( + "> Edge Id " + + repr(indexUniqueBranches) + + ", Length = " + + repr(el.GetLength()) + + " and Radius = " + + repr(el.GetMeanRadius()) + + "." + ) indexUniqueBranches += 1 verboseprint("> ") @@ -359,7 +410,8 @@ def SetNetworkStructure(centerline, network, verboseprint, ComputeConnectivity(network, minLength, verboseprint) SetRadiusX0(centerline, network, verboseprint) network.SetNetworkInletRadius( - ComputeInletAverageRadius(centerline, 0.0, verboseprint)) + ComputeInletAverageRadius(centerline, 0.0, verboseprint) + ) return centerline @@ -393,16 +445,26 @@ def ComputeConnectivity(network, tolerance, verboseprint): for x0 in otherBranch.GetInPointsx0(): if vtk.vtkMath.Distance2BetweenPoints(x0, x1) < tol: if vtk.vtkMath.Distance2BetweenPoints(x0, x1) > tolerance: - print('WARNING: POTENTIAL CONNECTIVITY ISSUE. ') - print(' A distance between connected points is suspicious.') - print(' The segment(s) of CELL ID VTK ' + repr(treatedBranch.GetVtkCellIdList())) - print(' and, the segment(s) of CELL ID VTK ' + repr( - otherBranch.GetVtkCellIdList()) + ' will be considered') - print(' as connected. Please check if this action was expected.') + print("WARNING: POTENTIAL CONNECTIVITY ISSUE. ") + print( + " A distance between connected points is suspicious." + ) + print( + " The segment(s) of CELL ID VTK " + + repr(treatedBranch.GetVtkCellIdList()) + ) + print( + " and, the segment(s) of CELL ID VTK " + + repr(otherBranch.GetVtkCellIdList()) + + " will be considered" + ) + print( + " as connected. Please check if this action was expected." + ) print() otherBranch.SetInOutPointsIds( - treatedBranch.GetOutPointsx1Id(), - otherBranch.GetId() + 2) + treatedBranch.GetOutPointsx1Id(), otherBranch.GetId() + 2 + ) otherBranch.SetBehindSegment(treatedBranch.GetId()) treatedBranch.SetFrontSegment(otherBranch.GetId()) atLeastOneFound = True @@ -410,8 +472,8 @@ def ComputeConnectivity(network, tolerance, verboseprint): if not (atLeastOneFound): treatedBranch.SetIfOutlet(True) treatedBranch.SetInOutPointsIds( - treatedBranch.GetInPointsx0Id(), - treatedBranch.GetId() + 2) + treatedBranch.GetInPointsx0Id(), treatedBranch.GetId() + 2 + ) def SetRadiusX0(centerline, network, verboseprint): @@ -435,8 +497,9 @@ def ComputeInletAverageRadius(centerline, desiredLength, verboseprint): length = 0.0 resistance = 0.0 radiusArray = centerline.GetPointData().GetArray(RADIUSARRAYNAME) - npts = GetIndexCenterlineForADefinedLength(centerline, branchId, - desiredLength, verboseprint) + npts = GetIndexCenterlineForADefinedLength( + centerline, branchId, desiredLength, verboseprint + ) for k in range(0, npts - 1): point0 = [0.0, 0.0, 0.0] point1 = [0.0, 0.0, 0.0] @@ -448,7 +511,8 @@ def ComputeInletAverageRadius(centerline, desiredLength, verboseprint): resistance += dx / r ** (4.0) if npts < 2: branchMeanRadius = radiusArray.GetComponent( - centerline.GetCell(branchId).GetPointId(0), 0) + centerline.GetCell(branchId).GetPointId(0), 0 + ) else: branchMeanRadius = (length / resistance) ** (0.25) @@ -469,8 +533,9 @@ def ComputeInletAverageCrossSectionArea(centerline, desiredLength, verboseprint) length = 0.0 resistance = 0.0 sectionArray = centerline.GetPointData().GetArray(SECTIONARRAYNAME) - npts = GetIndexCenterlineForADefinedLength(centerline, branchId, - desiredLength, verboseprint) + npts = GetIndexCenterlineForADefinedLength( + centerline, branchId, desiredLength, verboseprint + ) for k in range(1, npts - 1): point0 = [0.0, 0.0, 0.0] point1 = [0.0, 0.0, 0.0] @@ -492,10 +557,11 @@ def ComputeInletAverageCrossSectionArea(centerline, desiredLength, verboseprint) return branchMeanRadius -def GetIndexCenterlineForADefinedLength(centerline, branchId, desiredLength, - verboseprint): +def GetIndexCenterlineForADefinedLength( + centerline, branchId, desiredLength, verboseprint +): """Get the index of the point such as the desired distance between the index and - the beginning of the branch is reached. """ + the beginning of the branch is reached.""" length = 0.0 done = False @@ -520,7 +586,7 @@ def GetIndexCenterlineForADefinedLength(centerline, branchId, desiredLength, def GetListProbePoints(centerline, network, verboseprint): - """Get points on a centerline spaced by the inlet diameter. """ + """Get points on a centerline spaced by the inlet diameter.""" diameterInlet = 2.0 * network.GetNetworkInletRadius() pointsList = [] @@ -534,8 +600,9 @@ def GetListProbePoints(centerline, network, verboseprint): while not (done): x = [0.0, 0.0, 0.0] desiredLength = float(ctr) * diameterInlet - idx = GetIndexCenterlineForADefinedLength(centerline, branchId, desiredLength, - verboseprint) + idx = GetIndexCenterlineForADefinedLength( + centerline, branchId, desiredLength, verboseprint + ) if idx is None: break centerline.GetCell(branchId).GetPoints().GetPoint(idx, x) @@ -544,7 +611,7 @@ def GetListProbePoints(centerline, network, verboseprint): if idx == npts - 2: done = True - return (pointsList) + return pointsList class Element(object): @@ -655,7 +722,7 @@ def GetInletRadius(self): return self.inletRadius def GetMeanArea(self): - meanArea = np.pi * (self.meanRadius ** 2.0) + meanArea = np.pi * (self.meanRadius**2.0) return meanArea def GetInPointsx0(self): diff --git a/src/vampy/automatedPreprocessing/NetworkBoundaryConditions.py b/src/vampy/automatedPreprocessing/NetworkBoundaryConditions.py index 00decc64..7087db5c 100644 --- a/src/vampy/automatedPreprocessing/NetworkBoundaryConditions.py +++ b/src/vampy/automatedPreprocessing/NetworkBoundaryConditions.py @@ -9,6 +9,7 @@ # Copyright (c) Christophe Chnafa. All rights reserved. + class FlowSplitting(object): """This class computes the flow splitting for a network according to [ADD REF AJRN] and [VALIDATION PAPER]""" @@ -33,7 +34,7 @@ def ComputeAlphas(self, network, verboseprint): """ if network.GetNumberOfOutlet() < 2: - raise RuntimeError('The network has only one outlet.') + raise RuntimeError("The network has only one outlet.") # For each element of the network, for element in network.elements: if not (element.IsBlanked()): @@ -50,12 +51,15 @@ def ComputeAlphas(self, network, verboseprint): if otherElement.GetInPointsx0Id() == element.GetInPointsx0Id(): adjacentBranchFound = True sumSurfaces += network.elements[ - otherElement.GetFrontSegment()].GetMeanArea() + otherElement.GetFrontSegment() + ].GetMeanArea() # At least one adjacent blanked branch should be found. if not (adjacentBranchFound): - raise RuntimeError('Unexpected error, ' - 'adjacent branch not found. Check the connectivity.') + raise RuntimeError( + "Unexpected error, " + "adjacent branch not found. Check the connectivity." + ) alpha = S / sumSurfaces element.SetAlpha(alpha) self.hasComputedAlphas = True @@ -74,7 +78,7 @@ def ComputeBetas(self, network, verboseprint): """ if not (self.hasComputedAlphas): - raise RuntimeError('Alpha coefficients need to be computed first.') + raise RuntimeError("Alpha coefficients need to be computed first.") # For each element of the network, ... for element in network.elements: if not (element.IsAnOutlet()): @@ -86,8 +90,10 @@ def ComputeBetas(self, network, verboseprint): # the network in direction of the flow inlet. while not (foundNetworkRoot): if currentElement.GetBehindSegment() is None: - raise RuntimeError('The network is constitued of one segment ' - 'or the input centerlines have a hanging segment.') + raise RuntimeError( + "The network is constitued of one segment " + "or the input centerlines have a hanging segment." + ) behindElement = network.elements[currentElement.GetBehindSegment()] if behindElement.IsBlanked(): beta *= behindElement.GetAlpha() @@ -121,7 +127,7 @@ def ComputeGammas(self, network, verboseprint): self.hasComputedGammas = True def CheckTotalFlowRate(self, network, verboseprint): - """Check if the sum of the outflows is 100%. """ + """Check if the sum of the outflows is 100%.""" tol = 0.000001 # Flow balance error tolerance. if self.hasComputedBetas: sumBeta = 0.0 @@ -129,11 +135,11 @@ def CheckTotalFlowRate(self, network, verboseprint): if element.IsAnOutlet(): sumBeta += element.GetBeta() if abs(sumBeta - 1.0) > tol: - raise RuntimeError('Unexpected error, sum(Beta) coefficients != 1.0') + raise RuntimeError("Unexpected error, sum(Beta) coefficients != 1.0") if self.hasComputedGammas: sumGamma = 0.0 for element in network.elements: if element.IsAnOutlet(): sumGamma += element.GetGamma() if abs(sumGamma - 1.0) > tol: - raise RuntimeError('Unexpected error, sum(Gamma) coefficients != 1.0') + raise RuntimeError("Unexpected error, sum(Gamma) coefficients != 1.0") diff --git a/src/vampy/automatedPreprocessing/ToolRepairSTL.py b/src/vampy/automatedPreprocessing/ToolRepairSTL.py index f2433989..56eabdab 100755 --- a/src/vampy/automatedPreprocessing/ToolRepairSTL.py +++ b/src/vampy/automatedPreprocessing/ToolRepairSTL.py @@ -39,10 +39,18 @@ def DumpQualityStats(iq, arrayname): range.append(an.GetComponent(0, 2)) average = an.GetComponent(0, 1) stdDev = math.sqrt(math.fabs(an.GetComponent(0, 3))) - outStr = '%s%g%s%g%s%g\n%s%g%s%g' % ( - ' cardinality: ', cardinality, - ' , range: ', range[0], ' - ', range[1], - ' average: ', average, ' , standard deviation: ', stdDev) + outStr = "%s%g%s%g%s%g\n%s%g%s%g" % ( + " cardinality: ", + cardinality, + " , range: ", + range[0], + " - ", + range[1], + " average: ", + average, + " , standard deviation: ", + stdDev, + ) print(outStr) return outStr @@ -67,7 +75,9 @@ def foundAndDeleteNaNTriangles(mesh): killThisTriangle = False nPointsForThisCell = mesh.GetCell(i).GetPoints().GetNumberOfPoints() if nPointsForThisCell > 3: - print("> WARNING: found Cell with more than 3 points: there is more than triangles.") + print( + "> WARNING: found Cell with more than 3 points: there is more than triangles." + ) for j in range(0, nPointsForThisCell): x = [0.0, 0.0, 0.0] mesh.GetCell(i).GetPoints().GetPoint(j, x) @@ -108,7 +118,7 @@ def cleanTheSurface(mesh): print("> Done.") print("> ") - return (outputPolyData) + return outputPolyData def closeAllTheHolesOnTheSurface(mesh): @@ -125,7 +135,7 @@ def closeAllTheHolesOnTheSurface(mesh): print("> Done.") print("> ") - return (outputPolyData) + return outputPolyData def cleanTheSurfaceUnstructGrid(mesh): @@ -141,7 +151,7 @@ def cleanTheSurfaceUnstructGrid(mesh): print("> Done.") print("> ") - return (outputPolyData) + return outputPolyData def writeSTLfile(outputPolyData, outputFileName, ascii): @@ -167,7 +177,9 @@ def checkIfThereIsHoles(outputBisPolyData): featureEdges.Update() numberOfOpenEdges = featureEdges.GetOutput().GetNumberOfCells() if numberOfOpenEdges > 0: - print("> The surface is open. It might be only the outlets if you did not try to close the surface.") + print( + "> The surface is open. It might be only the outlets if you did not try to close the surface." + ) else: print("> The surface is closed, there is no holes.") print(">") @@ -182,14 +194,16 @@ def checkIfThereIsNonTriangleCells(outputBisPolyData): print("> ERROR: There are elements that are not triangles") -def repairSTL(inputFileName, outputFileName, closeHoles, checkHoles, checkQuality, ascii): +def repairSTL( + inputFileName, outputFileName, closeHoles, checkHoles, checkQuality, ascii +): # Open file. mesh = loadFile(inputFileName) if checkQuality: quality = computeQualityMesh(mesh, vtk.VTK_QUALITY_ASPECT_RATIO) - print(DumpQualityStats(quality, 'Quality')) + print(DumpQualityStats(quality, "Quality")) # Check the number of pts and cells. surfaceOverview(mesh) @@ -223,35 +237,74 @@ def repairSTL(inputFileName, outputFileName, closeHoles, checkHoles, checkQualit checkIfThereIsNonTriangleCells(outputBisPolyData) - if not (outputFileName == ''): + if not (outputFileName == ""): writeSTLfile(outputBisPolyData, outputFileName, ascii) if __name__ == "__main__": parser = argparse.ArgumentParser( - description="ToolRepairSTL: correct an STL surface with NaN points and/or redundant nodes.") - parser.add_argument('-i', '--input', type=str, required=True, - dest='inputFileName', - help="Input file containing the surface data in a STL format.") - parser.add_argument('-o', '--output', type=str, required=False, - default='', dest='outputFileName', - help="Output file containing the cleaned surface data in a STL format.") - parser.add_argument('-p', '--patchesholes', required=False, - default=False, - dest='closeHoles', action="store_true", - help="Set this argument to true if you want to patch up the potential holes of the surface.") - parser.add_argument('-c', '--checkholes', required=False, - default=False, - dest='checkholes', action="store_true", - help="Set this argument to true if you want to check up the potential holes of the surface.") - parser.add_argument('-a', '--ascii', required=False, - default=False, - dest='ascii', action="store_true", - help="Set this argument to true if you want the output STL wrote in ASCII.") - parser.add_argument('-q', '--checkquality', required=False, - default=False, - dest='checkQuality', action="store_true", - help="Set this argument to true if you want to check up the potential holes of the surface.") + description="ToolRepairSTL: correct an STL surface with NaN points and/or redundant nodes." + ) + parser.add_argument( + "-i", + "--input", + type=str, + required=True, + dest="inputFileName", + help="Input file containing the surface data in a STL format.", + ) + parser.add_argument( + "-o", + "--output", + type=str, + required=False, + default="", + dest="outputFileName", + help="Output file containing the cleaned surface data in a STL format.", + ) + parser.add_argument( + "-p", + "--patchesholes", + required=False, + default=False, + dest="closeHoles", + action="store_true", + help="Set this argument to true if you want to patch up the potential holes of the surface.", + ) + parser.add_argument( + "-c", + "--checkholes", + required=False, + default=False, + dest="checkholes", + action="store_true", + help="Set this argument to true if you want to check up the potential holes of the surface.", + ) + parser.add_argument( + "-a", + "--ascii", + required=False, + default=False, + dest="ascii", + action="store_true", + help="Set this argument to true if you want the output STL wrote in ASCII.", + ) + parser.add_argument( + "-q", + "--checkquality", + required=False, + default=False, + dest="checkQuality", + action="store_true", + help="Set this argument to true if you want to check up the potential holes of the surface.", + ) args = parser.parse_args() - repairSTL(args.inputFileName, args.outputFileName, args.closeHoles, args.checkholes, args.checkQuality, args.ascii) + repairSTL( + args.inputFileName, + args.outputFileName, + args.closeHoles, + args.checkholes, + args.checkQuality, + args.ascii, + ) diff --git a/src/vampy/automatedPreprocessing/__init__.py b/src/vampy/automatedPreprocessing/__init__.py index bd2d466c..0f7ea3c7 100644 --- a/src/vampy/automatedPreprocessing/__init__.py +++ b/src/vampy/automatedPreprocessing/__init__.py @@ -1,9 +1,11 @@ -from . import automated_preprocessing -from . import preprocessing_common -from . import DisplayData -from . import ImportData -from . import NetworkBoundaryConditions -from . import simulate -from . import ToolRepairSTL -from . import visualize -from . import vmtk_pointselector +from . import ( + DisplayData, + ImportData, + NetworkBoundaryConditions, + ToolRepairSTL, + automated_preprocessing, + preprocessing_common, + simulate, + visualize, + vmtk_pointselector, +) diff --git a/src/vampy/automatedPreprocessing/automated_preprocessing.py b/src/vampy/automatedPreprocessing/automated_preprocessing.py index 4dd56a38..ae3fb455 100644 --- a/src/vampy/automatedPreprocessing/automated_preprocessing.py +++ b/src/vampy/automatedPreprocessing/automated_preprocessing.py @@ -1,28 +1,79 @@ import argparse import sys -from os import remove, path +from os import path, remove import numpy as np -from morphman import get_uncapped_surface, write_polydata, get_parameters, vtk_clean_polydata, \ - vtk_triangulate_surface, write_parameters, vmtk_cap_polydata, compute_centerlines, get_centerline_tolerance, \ - get_vtk_point_locator, extract_single_line, vtk_merge_polydata, get_point_data_array, smooth_voronoi_diagram, \ - create_new_surface, compute_centers, vmtk_smooth_surface, str2bool, vmtk_compute_voronoi_diagram, \ - prepare_output_surface, vmtk_compute_geometric_features +from morphman import ( + compute_centerlines, + compute_centers, + create_new_surface, + extract_single_line, + get_centerline_tolerance, + get_parameters, + get_point_data_array, + get_uncapped_surface, + get_vtk_point_locator, + prepare_output_surface, + smooth_voronoi_diagram, + str2bool, + vmtk_cap_polydata, + vmtk_compute_geometric_features, + vmtk_compute_voronoi_diagram, + vmtk_smooth_surface, + vtk_clean_polydata, + vtk_merge_polydata, + vtk_triangulate_surface, + write_parameters, + write_polydata, +) # Local imports from vampy.automatedPreprocessing import ToolRepairSTL -from vampy.automatedPreprocessing.preprocessing_common import read_polydata, get_centers_for_meshing, \ - dist_sphere_diam, dist_sphere_curvature, dist_sphere_constant, get_regions_to_refine, add_flow_extension, \ - write_mesh, mesh_alternative, generate_mesh, find_boundaries, compute_flow_rate, setup_model_network, \ - radiusArrayName, scale_surface, get_furtest_surface_point, check_if_closed_surface +from vampy.automatedPreprocessing.preprocessing_common import ( + add_flow_extension, + check_if_closed_surface, + compute_flow_rate, + dist_sphere_constant, + dist_sphere_curvature, + dist_sphere_diam, + find_boundaries, + generate_mesh, + get_centers_for_meshing, + get_furtest_surface_point, + get_regions_to_refine, + mesh_alternative, + radiusArrayName, + read_polydata, + scale_surface, + setup_model_network, + write_mesh, +) from vampy.automatedPreprocessing.simulate import run_simulation from vampy.automatedPreprocessing.visualize import visualize_model -def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_factor, smoothing_iterations, - meshing_method, refine_region, is_atrium, add_flow_extensions, visualize, config_path, - coarsening_factor, inlet_flow_extension_length, outlet_flow_extension_length, edge_length, - region_points, compress_mesh, add_boundary_layer, scale_factor, resampling_step): +def run_pre_processing( + input_model, + verbose_print, + smoothing_method, + smoothing_factor, + smoothing_iterations, + meshing_method, + refine_region, + is_atrium, + add_flow_extensions, + visualize, + config_path, + coarsening_factor, + inlet_flow_extension_length, + outlet_flow_extension_length, + edge_length, + region_points, + compress_mesh, + add_boundary_layer, + scale_factor, + resampling_step, +): """ Automatically generate mesh of surface model in .vtu and .xml format, including prescribed flow rates at inlet and outlet based on flow network model. @@ -52,7 +103,7 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f resampling_step (float): Float value determining the resampling step for centerline computations, in [m] """ # Get paths - case_name = input_model.rsplit(path.sep, 1)[-1].rsplit('.')[0] + case_name = input_model.rsplit(path.sep, 1)[-1].rsplit(".")[0] dir_path = input_model.rsplit(path.sep, 1)[0] print("\n--- Working on case:", case_name, "\n") @@ -94,7 +145,9 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f # to closed boundaries. Area_limit will set an upper limit of the detected area, may vary between models. # The circleness_limit parameters determines the detected regions' similarity to a circle, often assumed # to be close to a circle. - surface = get_uncapped_surface(surface, gradients_limit=0.01, area_limit=20, circleness_limit=5) + surface = get_uncapped_surface( + surface, gradients_limit=0.01, area_limit=20, circleness_limit=5 + ) write_polydata(surface, file_name_clipped_model) else: surface = read_polydata(file_name_clipped_model) @@ -112,8 +165,12 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f surface = ToolRepairSTL.cleanTheSurface(surface) foundNaN = ToolRepairSTL.foundAndDeleteNaNTriangles(surface) if foundNaN: - raise RuntimeError(("There is an issue with the surface. " - "Nan coordinates or some other shenanigans.")) + raise RuntimeError( + ( + "There is an issue with the surface. " + "Nan coordinates or some other shenanigans." + ) + ) else: parameters["check_surface"] = True write_parameters(parameters, base_path) @@ -133,8 +190,13 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f source = outlets if is_atrium else inlet target = inlet if is_atrium else outlets - centerlines, voronoi, _ = compute_centerlines(source, target, file_name_centerlines, capped_surface, - resampling=resampling_step) + centerlines, voronoi, _ = compute_centerlines( + source, + target, + file_name_centerlines, + capped_surface, + resampling=resampling_step, + ) tol = get_centerline_tolerance(centerlines) # Get 'center' and 'radius' of the regions(s) @@ -144,11 +206,19 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f if refine_region: regions = get_regions_to_refine(capped_surface, region_points, base_path) for i in range(len(regions) // 3): - print("--- Region to refine ({}): {:.3f} {:.3f} {:.3f}" - .format(i + 1, regions[3 * i], regions[3 * i + 1], regions[3 * i + 2])) - - centerline_region, _, _ = compute_centerlines(source, regions, file_name_refine_region_centerlines, - capped_surface, resampling=resampling_step) + print( + "--- Region to refine ({}): {:.3f} {:.3f} {:.3f}".format( + i + 1, regions[3 * i], regions[3 * i + 1], regions[3 * i + 2] + ) + ) + + centerline_region, _, _ = compute_centerlines( + source, + regions, + file_name_refine_region_centerlines, + capped_surface, + resampling=resampling_step, + ) # Extract the region centerline refine_region_centerline = [] @@ -164,7 +234,9 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f point = line.GetPoints().GetPoint(j) ID = locator.FindClosestPoint(point) tmp_point = centerlines.GetPoints().GetPoint(ID) - dist = np.sqrt(np.sum((np.asarray(point) - np.asarray(tmp_point)) ** 2)) + dist = np.sqrt( + np.sum((np.asarray(point) - np.asarray(tmp_point)) ** 2) + ) if dist <= tol: break @@ -175,14 +247,20 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f refine_region_centerline.append(tmp) else: - refine_region_centerline.append(read_polydata(file_name_region_centerlines.format(i))) + refine_region_centerline.append( + read_polydata(file_name_region_centerlines.format(i)) + ) # Merge the sac centerline region_centerlines = vtk_merge_polydata(refine_region_centerline) for region in refine_region_centerline: region_factor = 0.9 if is_atrium else 0.5 - region_center.append(region.GetPoints().GetPoint(int(region.GetNumberOfPoints() * region_factor))) + region_center.append( + region.GetPoints().GetPoint( + int(region.GetNumberOfPoints() * region_factor) + ) + ) tmp_misr = get_point_data_array(radiusArrayName, region) misr_max.append(tmp_misr.max()) @@ -192,15 +270,21 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f if not path.isfile(file_name_surface_smooth): # Get Voronoi diagram if not path.isfile(file_name_voronoi): - voronoi = vmtk_compute_voronoi_diagram(capped_surface, file_name_voronoi) + voronoi = vmtk_compute_voronoi_diagram( + capped_surface, file_name_voronoi + ) write_polydata(voronoi, file_name_voronoi) else: voronoi = read_polydata(file_name_voronoi) # Get smooth Voronoi diagram if not path.isfile(file_name_voronoi_smooth): - voronoi_smoothed = smooth_voronoi_diagram(voronoi, centerlines, smoothing_factor, - no_smooth_cl=region_centerlines) + voronoi_smoothed = smooth_voronoi_diagram( + voronoi, + centerlines, + smoothing_factor, + no_smooth_cl=region_centerlines, + ) write_polydata(voronoi_smoothed, file_name_voronoi_smooth) else: voronoi_smoothed = read_polydata(file_name_voronoi_smooth) @@ -209,8 +293,13 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f surface_smoothed = create_new_surface(voronoi_smoothed) # Uncapp the surface - surface_uncapped = prepare_output_surface(surface_smoothed, surface, centerlines, file_name_voronoi_surface, - test_merge=True) + surface_uncapped = prepare_output_surface( + surface_smoothed, + surface, + centerlines, + file_name_voronoi_surface, + test_merge=True, + ) # Check if there has been added new outlets num_outlets = centerlines.GetNumberOfLines() @@ -219,10 +308,13 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f if num_outlets != num_outlets_after: write_polydata(surface, file_name_surface_smooth) - print(("ERROR: Automatic clipping failed. You have to open {} and " + - "manually clipp the branch which still is capped. " + - "Overwrite the current {} and restart the script.").format( - file_name_surface_smooth, file_name_surface_smooth)) + print( + ( + "ERROR: Automatic clipping failed. You have to open {} and " + + "manually clipp the branch which still is capped. " + + "Overwrite the current {} and restart the script." + ).format(file_name_surface_smooth, file_name_surface_smooth) + ) sys.exit(0) surface = surface_uncapped @@ -237,10 +329,17 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f surface = read_polydata(file_name_surface_smooth) elif smoothing_method in ["laplace", "taubin"]: - print("--- Smooth surface: {} smoothing\n".format(smoothing_method.capitalize())) + print( + "--- Smooth surface: {} smoothing\n".format(smoothing_method.capitalize()) + ) if not path.isfile(file_name_surface_smooth): - surface = vmtk_smooth_surface(surface, smoothing_method, iterations=smoothing_iterations, passband=0.1, - relaxation=0.01) + surface = vmtk_smooth_surface( + surface, + smoothing_method, + iterations=smoothing_iterations, + passband=0.1, + relaxation=0.01, + ) # Save the smoothed surface write_polydata(surface, file_name_surface_smooth) @@ -256,13 +355,23 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f print("--- Adding flow extensions\n") # Add extension normal on boundary for atrium models extension = "centerlinedirection" if is_atrium else "boundarynormal" - surface_extended = add_flow_extension(surface, centerlines, include_outlet=False, - extension_length=inlet_flow_extension_length, - extension_mode=extension) - surface_extended = add_flow_extension(surface_extended, centerlines, include_outlet=True, - extension_length=outlet_flow_extension_length) - - surface_extended = vmtk_smooth_surface(surface_extended, "laplace", iterations=200) + surface_extended = add_flow_extension( + surface, + centerlines, + include_outlet=False, + extension_length=inlet_flow_extension_length, + extension_mode=extension, + ) + surface_extended = add_flow_extension( + surface_extended, + centerlines, + include_outlet=True, + extension_length=outlet_flow_extension_length, + ) + + surface_extended = vmtk_smooth_surface( + surface_extended, "laplace", iterations=200 + ) write_polydata(surface_extended, file_name_model_flow_ext) else: surface_extended = read_polydata(file_name_model_flow_ext) @@ -278,15 +387,23 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f print("--- Compute the model centerlines with flow extension.\n") # Compute the centerlines. if has_outlet: - inlet, outlets = get_centers_for_meshing(surface_extended, is_atrium, base_path, - use_flow_extensions=True) + inlet, outlets = get_centers_for_meshing( + surface_extended, is_atrium, base_path, use_flow_extensions=True + ) else: - inlet, _ = get_centers_for_meshing(surface_extended, is_atrium, base_path, use_flow_extensions=True) + inlet, _ = get_centers_for_meshing( + surface_extended, is_atrium, base_path, use_flow_extensions=True + ) # Flip outlets and inlets for atrium models source = outlets if is_atrium else inlet target = inlet if is_atrium else outlets - centerlines, _, _ = compute_centerlines(source, target, file_name_flow_centerlines, capped_surface, - resampling=resampling_step) + centerlines, _, _ = compute_centerlines( + source, + target, + file_name_flow_centerlines, + capped_surface, + resampling=resampling_step, + ) else: centerlines = read_polydata(file_name_flow_centerlines) @@ -300,27 +417,47 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f n = get_point_data_array("FrenetTangent", line, k=3) n_diff = np.linalg.norm(np.cross(n[1:], n[:-1]), axis=1) n_id = n_diff[::-1].argmax() - centerlines = extract_single_line(centerlines, 0, end_id=centerlines.GetNumberOfPoints() - n_id - 1) + centerlines = extract_single_line( + centerlines, 0, end_id=centerlines.GetNumberOfPoints() - n_id - 1 + ) # Choose input for the mesh print("--- Computing distance to sphere\n") if meshing_method == "constant": if not path.isfile(file_name_distance_to_sphere_const): - distance_to_sphere = dist_sphere_constant(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere_const, edge_length) + distance_to_sphere = dist_sphere_constant( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere_const, + edge_length, + ) else: distance_to_sphere = read_polydata(file_name_distance_to_sphere_const) elif meshing_method == "curvature": if not path.isfile(file_name_distance_to_sphere_curv): - distance_to_sphere = dist_sphere_curvature(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere_curv, coarsening_factor) + distance_to_sphere = dist_sphere_curvature( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere_curv, + coarsening_factor, + ) else: distance_to_sphere = read_polydata(file_name_distance_to_sphere_curv) elif meshing_method == "diameter": if not path.isfile(file_name_distance_to_sphere_diam): - distance_to_sphere = dist_sphere_diam(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere_diam, coarsening_factor) + distance_to_sphere = dist_sphere_diam( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere_diam, + coarsening_factor, + ) else: distance_to_sphere = read_polydata(file_name_distance_to_sphere_diam) @@ -328,25 +465,41 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f if not path.isfile(file_name_vtu_mesh): print("--- Computing mesh\n") try: - mesh, remeshed_surface = generate_mesh(distance_to_sphere, add_boundary_layer) + mesh, remeshed_surface = generate_mesh( + distance_to_sphere, add_boundary_layer + ) except Exception: distance_to_sphere = mesh_alternative(distance_to_sphere) - mesh, remeshed_surface = generate_mesh(distance_to_sphere, add_boundary_layer) + mesh, remeshed_surface = generate_mesh( + distance_to_sphere, add_boundary_layer + ) assert mesh.GetNumberOfPoints() > 0, "No points in mesh, try to remesh." - assert remeshed_surface.GetNumberOfPoints() > 0, "No points in surface mesh, try to remesh." + assert ( + remeshed_surface.GetNumberOfPoints() > 0 + ), "No points in surface mesh, try to remesh." if mesh.GetNumberOfPoints() < remeshed_surface.GetNumberOfPoints(): print("--- An error occurred during meshing. Will attempt to re-mesh \n") - mesh, remeshed_surface = generate_mesh(distance_to_sphere, add_boundary_layer) - - write_mesh(compress_mesh, file_name_surface_name, file_name_vtu_mesh, file_name_xml_mesh, - mesh, remeshed_surface) + mesh, remeshed_surface = generate_mesh( + distance_to_sphere, add_boundary_layer + ) + + write_mesh( + compress_mesh, + file_name_surface_name, + file_name_vtu_mesh, + file_name_xml_mesh, + mesh, + remeshed_surface, + ) else: mesh = read_polydata(file_name_vtu_mesh) - network, probe_points = setup_model_network(centerlines, file_name_probe_points, region_center, verbose_print) + network, probe_points = setup_model_network( + centerlines, file_name_probe_points, region_center, verbose_print + ) # Load updated parameters following meshing parameters = get_parameters(base_path) @@ -354,25 +507,43 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f print("--- Computing flow rates and flow split, and setting boundary IDs\n") mean_inflow_rate = compute_flow_rate(is_atrium, inlet, parameters) - find_boundaries(base_path, mean_inflow_rate, network, mesh, verbose_print, is_atrium) + find_boundaries( + base_path, mean_inflow_rate, network, mesh, verbose_print, is_atrium + ) # Display the flow split at the outlets, inlet flow rate, and probes. if visualize: - print("--- Visualizing flow split at outlets, inlet flow rate, and probes in VTK render window. ") + print( + "--- Visualizing flow split at outlets, inlet flow rate, and probes in VTK render window. " + ) print("--- Press 'q' inside the render window to exit.") - visualize_model(network.elements, probe_points, surface_extended, mean_inflow_rate) + visualize_model( + network.elements, probe_points, surface_extended, mean_inflow_rate + ) # Start simulation though ssh, without password if config_path is not None: - print("--- Uploading mesh and simulation files to cluster. Queueing simulation and post-processing.") + print( + "--- Uploading mesh and simulation files to cluster. Queueing simulation and post-processing." + ) run_simulation(config_path, dir_path, case_name) print("--- Removing unused pre-processing files") files_to_remove = [ - file_name_centerlines, file_name_refine_region_centerlines, file_name_region_centerlines, - file_name_distance_to_sphere_diam, file_name_distance_to_sphere_const, file_name_distance_to_sphere_curv, - file_name_voronoi, file_name_voronoi_smooth, file_name_voronoi_surface, file_name_surface_smooth, - file_name_model_flow_ext, file_name_clipped_model, file_name_flow_centerlines, file_name_surface_name + file_name_centerlines, + file_name_refine_region_centerlines, + file_name_region_centerlines, + file_name_distance_to_sphere_diam, + file_name_distance_to_sphere_const, + file_name_distance_to_sphere_curv, + file_name_voronoi, + file_name_voronoi_smooth, + file_name_voronoi_surface, + file_name_surface_smooth, + file_name_model_flow_ext, + file_name_clipped_model, + file_name_flow_centerlines, + file_name_surface_name, ] for file in files_to_remove: if path.exists(file): @@ -388,136 +559,197 @@ def read_command_line(input_path=None): Args: input_path (str): Input file path, positional argument with default None. """ - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Automated pre-processing for vascular modeling.") + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Automated pre-processing for vascular modeling.", + ) # Add common arguments required = input_path is None v = parser.add_mutually_exclusive_group(required=False) - v.add_argument('-v', '--verbosity', - dest='verbosity', - action='store_true', - default=False, - help="Activates the verbose mode.") - - parser.add_argument('-i', '--input-model', - type=str, - required=True, - help="Path to input model containing the 3D model. Expected format is VTK/VTP or STL.") - - parser.add_argument('-cm', '--compress-mesh', - type=str2bool, - required=False, - default=True, - help="Compress output mesh after generation.") - - parser.add_argument('-sm', '--smoothing-method', - type=str, - required=False, - default="no_smooth", - choices=["voronoi", "no_smooth", "laplace", "taubin"], - help="Determines smoothing method for surface smoothing. For Voronoi smoothing you can " + - "control the smoothing factor with --smoothing-factor (default = 0.25). For Laplace " + - "and Taubin smoothing, you can controll the amount of smoothing iterations with " + - "--smothing-iterations (default = 800).") - - parser.add_argument('-c', '--coarsening-factor', - type=float, - required=False, - default=1.0, - help="Refine or coarsen the standard mesh size. The higher the value the coarser the mesh.") - - parser.add_argument('-sf', '--smoothing-factor', - type=float, - required=False, - default=0.25, - help="Smoothing factor for Voronoi smoothing, removes all spheres which" + - " has a radius < MISR*(1-0.25), where MISR varying along the centerline.") - - parser.add_argument('-si', '--smoothing-iterations', - type=int, - required=False, - default=800, - help="Number of smoothing iterations for Laplace and Taubin type smoothing.") - - parser.add_argument('-m', '--meshing-method', - type=str, - choices=["diameter", "curvature", "constant"], - default="diameter", - help="Determines method of meshing. The method 'constant' is supplied with a constant edge " + - "length controlled by the -el argument, resulting in a constant density mesh. " + - "The 'curvature' method and 'diameter' method produces a variable density mesh," + - " based on the surface curvature and the distance from the " + - "centerline to the surface, respectively.") - - parser.add_argument('-el', '--edge-length', - default=None, - type=float, - help="Characteristic edge length used for the 'constant' meshing method.") + v.add_argument( + "-v", + "--verbosity", + dest="verbosity", + action="store_true", + default=False, + help="Activates the verbose mode.", + ) + + parser.add_argument( + "-i", + "--input-model", + type=str, + required=True, + help="Path to input model containing the 3D model. Expected format is VTK/VTP or STL.", + ) + + parser.add_argument( + "-cm", + "--compress-mesh", + type=str2bool, + required=False, + default=True, + help="Compress output mesh after generation.", + ) + + parser.add_argument( + "-sm", + "--smoothing-method", + type=str, + required=False, + default="no_smooth", + choices=["voronoi", "no_smooth", "laplace", "taubin"], + help="Determines smoothing method for surface smoothing. For Voronoi smoothing you can " + + "control the smoothing factor with --smoothing-factor (default = 0.25). For Laplace " + + "and Taubin smoothing, you can controll the amount of smoothing iterations with " + + "--smothing-iterations (default = 800).", + ) + + parser.add_argument( + "-c", + "--coarsening-factor", + type=float, + required=False, + default=1.0, + help="Refine or coarsen the standard mesh size. The higher the value the coarser the mesh.", + ) + + parser.add_argument( + "-sf", + "--smoothing-factor", + type=float, + required=False, + default=0.25, + help="Smoothing factor for Voronoi smoothing, removes all spheres which" + + " has a radius < MISR*(1-0.25), where MISR varying along the centerline.", + ) + + parser.add_argument( + "-si", + "--smoothing-iterations", + type=int, + required=False, + default=800, + help="Number of smoothing iterations for Laplace and Taubin type smoothing.", + ) + + parser.add_argument( + "-m", + "--meshing-method", + type=str, + choices=["diameter", "curvature", "constant"], + default="diameter", + help="Determines method of meshing. The method 'constant' is supplied with a constant edge " + + "length controlled by the -el argument, resulting in a constant density mesh. " + + "The 'curvature' method and 'diameter' method produces a variable density mesh," + + " based on the surface curvature and the distance from the " + + "centerline to the surface, respectively.", + ) + + parser.add_argument( + "-el", + "--edge-length", + default=None, + type=float, + help="Characteristic edge length used for the 'constant' meshing method.", + ) refine_region = parser.add_mutually_exclusive_group(required=False) - refine_region.add_argument('-r', '--refine-region', - action='store_true', - default=False, - help="Determine whether or not to refine a specific region of " + - "the input model") - - parser.add_argument('-rp', '--region-points', - type=float, - nargs="+", - default=None, - help="If -r or --refine-region is True, the user can provide the point(s)" - " which defines the regions to refine. " + - "Example providing the points (0.1, 5.0, -1) and (1, -5.2, 3.21):" + - " --region-points 0.1 5 -1 1 5.24 3.21") + refine_region.add_argument( + "-r", + "--refine-region", + action="store_true", + default=False, + help="Determine whether or not to refine a specific region of " + + "the input model", + ) + + parser.add_argument( + "-rp", + "--region-points", + type=float, + nargs="+", + default=None, + help="If -r or --refine-region is True, the user can provide the point(s)" + " which defines the regions to refine. " + + "Example providing the points (0.1, 5.0, -1) and (1, -5.2, 3.21):" + + " --region-points 0.1 5 -1 1 5.24 3.21", + ) atrium = parser.add_mutually_exclusive_group(required=False) - atrium.add_argument('-at', '--is-atrium', - action="store_true", - default=False, - help="Determine whether or not the model is an atrium model.") - - parser.add_argument('-f', '--add-flowextensions', - default=True, - type=str2bool, - help="Add flow extensions to to the model.") - - parser.add_argument('-fli', '--inlet-flowextension', - default=5, - type=float, - help="Length of flow extensions at inlet(s).") - - parser.add_argument('-flo', '--outlet-flowextension', - default=5, - type=float, - help="Length of flow extensions at outlet(s).") - - parser.add_argument('-viz', '--visualize', - default=True, - type=str2bool, - help="Visualize surface, inlet, outlet and probes after meshing.") - - parser.add_argument('-cp', '--config-path', - type=str, - default=None, - help='Path to configuration file for remote simulation. ' + - 'See ssh_config.json for details') - - parser.add_argument('-bl', '--add-boundary-layer', - default=True, - type=str2bool, - help="Adds boundary layers along geometry wall if true.") - - parser.add_argument('-sc', '--scale-factor', - default=None, - type=float, - - help="Scale input model by this factor. Used to scale model to [mm].") - - parser.add_argument('-rs', '--resampling-step', - default=0.1, - type=float, - help="Resampling step used to resample centerline in [m].") + atrium.add_argument( + "-at", + "--is-atrium", + action="store_true", + default=False, + help="Determine whether or not the model is an atrium model.", + ) + + parser.add_argument( + "-f", + "--add-flowextensions", + default=True, + type=str2bool, + help="Add flow extensions to to the model.", + ) + + parser.add_argument( + "-fli", + "--inlet-flowextension", + default=5, + type=float, + help="Length of flow extensions at inlet(s).", + ) + + parser.add_argument( + "-flo", + "--outlet-flowextension", + default=5, + type=float, + help="Length of flow extensions at outlet(s).", + ) + + parser.add_argument( + "-viz", + "--visualize", + default=True, + type=str2bool, + help="Visualize surface, inlet, outlet and probes after meshing.", + ) + + parser.add_argument( + "-cp", + "--config-path", + type=str, + default=None, + help="Path to configuration file for remote simulation. " + + "See ssh_config.json for details", + ) + + parser.add_argument( + "-bl", + "--add-boundary-layer", + default=True, + type=str2bool, + help="Adds boundary layers along geometry wall if true.", + ) + + parser.add_argument( + "-sc", + "--scale-factor", + default=None, + type=float, + help="Scale input model by this factor. Used to scale model to [mm].", + ) + + parser.add_argument( + "-rs", + "--resampling-step", + default=0.1, + type=float, + help="Resampling step used to resample centerline in [m].", + ) # Parse path to get default values if required: @@ -526,11 +758,15 @@ def read_command_line(input_path=None): args = parser.parse_args(["-i" + input_path]) if args.meshing_method == "constant" and args.edge_length is None: - raise ValueError("ERROR: Please provide the edge length for uniform density meshing using --edge-length.") + raise ValueError( + "ERROR: Please provide the edge length for uniform density meshing using --edge-length." + ) if args.refine_region and args.region_points is not None: if len(args.region_points) % 3 != 0: - raise ValueError("ERROR: Please provide the region points as a multiple of 3.") + raise ValueError( + "ERROR: Please provide the region points as a multiple of 3." + ) if args.verbosity: print() @@ -538,22 +774,38 @@ def read_command_line(input_path=None): def verbose_print(*args): for arg in args: - print(arg, end=' ') + print(arg, end=" ") print() + else: + def verbose_print(*args): return None verbose_print(args) - return dict(input_model=args.input_model, verbose_print=verbose_print, smoothing_method=args.smoothing_method, - smoothing_factor=args.smoothing_factor, smoothing_iterations=args.smoothing_iterations, - meshing_method=args.meshing_method, refine_region=args.refine_region, is_atrium=args.is_atrium, - add_flow_extensions=args.add_flowextensions, config_path=args.config_path, edge_length=args.edge_length, - coarsening_factor=args.coarsening_factor, inlet_flow_extension_length=args.inlet_flowextension, - visualize=args.visualize, region_points=args.region_points, compress_mesh=args.compress_mesh, - outlet_flow_extension_length=args.outlet_flowextension, add_boundary_layer=args.add_boundary_layer, - scale_factor=args.scale_factor, resampling_step=args.resampling_step) + return dict( + input_model=args.input_model, + verbose_print=verbose_print, + smoothing_method=args.smoothing_method, + smoothing_factor=args.smoothing_factor, + smoothing_iterations=args.smoothing_iterations, + meshing_method=args.meshing_method, + refine_region=args.refine_region, + is_atrium=args.is_atrium, + add_flow_extensions=args.add_flowextensions, + config_path=args.config_path, + edge_length=args.edge_length, + coarsening_factor=args.coarsening_factor, + inlet_flow_extension_length=args.inlet_flowextension, + visualize=args.visualize, + region_points=args.region_points, + compress_mesh=args.compress_mesh, + outlet_flow_extension_length=args.outlet_flowextension, + add_boundary_layer=args.add_boundary_layer, + scale_factor=args.scale_factor, + resampling_step=args.resampling_step, + ) def main_meshing(): diff --git a/src/vampy/automatedPreprocessing/preprocessing_common.py b/src/vampy/automatedPreprocessing/preprocessing_common.py index cefae56f..2440ac89 100644 --- a/src/vampy/automatedPreprocessing/preprocessing_common.py +++ b/src/vampy/automatedPreprocessing/preprocessing_common.py @@ -2,11 +2,32 @@ from os import path import numpy as np -from morphman import vtk_clean_polydata, vtk_triangulate_surface, get_parameters, write_parameters, read_polydata, \ - vmtkscripts, vtk, write_polydata, vtkvmtk, get_curvilinear_coordinate, vtk_compute_threshold, get_vtk_array, \ - get_distance, get_number_of_arrays, vmtk_smooth_surface, get_point_data_array, create_vtk_array, \ - get_vtk_point_locator, vtk_extract_feature_edges, get_uncapped_surface, vtk_compute_connectivity, \ - vtk_compute_mass_properties, extract_single_line, get_centerline_tolerance +from morphman import ( + create_vtk_array, + extract_single_line, + get_centerline_tolerance, + get_curvilinear_coordinate, + get_distance, + get_number_of_arrays, + get_parameters, + get_point_data_array, + get_uncapped_surface, + get_vtk_array, + get_vtk_point_locator, + read_polydata, + vmtk_smooth_surface, + vmtkscripts, + vtk, + vtk_clean_polydata, + vtk_compute_connectivity, + vtk_compute_mass_properties, + vtk_compute_threshold, + vtk_extract_feature_edges, + vtk_triangulate_surface, + write_parameters, + write_polydata, +) +from vmtk import vtkvmtk from vampy.automatedPreprocessing import ImportData from vampy.automatedPreprocessing.NetworkBoundaryConditions import FlowSplitting @@ -14,7 +35,7 @@ # Global array names distanceToSpheresArrayName = "DistanceToSpheres" -radiusArrayName = 'MaximumInscribedSphereRadius' +radiusArrayName = "MaximumInscribedSphereRadius" def get_regions_to_refine(surface, provided_points, dir_path): @@ -72,13 +93,25 @@ def provide_region_points(surface, provided_points, dir_path=None): regionSeedIds = SeedSelector.GetTargetSeedIds() get_point = surface.GetPoints().GetPoint - points = [list(get_point(regionSeedIds.GetId(i))) for i in range(regionSeedIds.GetNumberOfIds())] + points = [ + list(get_point(regionSeedIds.GetId(i))) + for i in range(regionSeedIds.GetNumberOfIds()) + ] else: surface_locator = get_vtk_point_locator(surface) - provided_points = [[provided_points[3 * i], provided_points[3 * i + 1], provided_points[3 * i + 2]] - for i in range(len(provided_points) // 3)] - - points = [list(surface.GetPoint(surface_locator.FindClosestPoint(p))) for p in provided_points] + provided_points = [ + [ + provided_points[3 * i], + provided_points[3 * i + 1], + provided_points[3 * i + 2], + ] + for i in range(len(provided_points) // 3) + ] + + points = [ + list(surface.GetPoint(surface_locator.FindClosestPoint(p))) + for p in provided_points + ] if dir_path is not None: info = {"number_of_regions": len(points)} @@ -134,7 +167,9 @@ def compute_centers_for_meshing(surface, is_atrium, case_path=None, test_capped= cells = vtk_extract_feature_edges(surface) if cells.GetNumberOfCells() == 0 and not test_capped: - print("WARNING: The model is capped, so it is uncapped, but the method is experimental.") + print( + "WARNING: The model is capped, so it is uncapped, but the method is experimental." + ) uncapped_surface = get_uncapped_surface(surface) compute_centers_for_meshing(uncapped_surface, is_atrium, case_path, test_capped) elif cells.GetNumberOfCells() == 0 and test_capped: @@ -161,8 +196,14 @@ def compute_centers_for_meshing(surface, is_atrium, case_path=None, test_capped= center = [] for i in range(int(region_array.max()) + 1): # Compute area - boundary = vtk_compute_threshold(outputs, "RegionId", lower=i - 0.1, upper=i + 0.1, threshold_type="between", - source=0) + boundary = vtk_compute_threshold( + outputs, + "RegionId", + lower=i - 0.1, + upper=i + 0.1, + threshold_type="between", + source=0, + ) delaunay_filter = vtk.vtkDelaunay2D() delaunay_filter.SetInputData(boundary) @@ -180,7 +221,10 @@ def compute_centers_for_meshing(surface, is_atrium, case_path=None, test_capped= boundaries_area_name = boundaries_name + "_area" boundary_id = area.index(max(area)) if case_path is not None: - info = {boundary_name: center[boundary_id].tolist(), boundary_area_name: area[boundary_id]} + info = { + boundary_name: center[boundary_id].tolist(), + boundary_area_name: area[boundary_id], + } p = 0 for i in range(len(area)): if i == boundary_id: @@ -252,7 +296,9 @@ def get_centers_for_meshing(surface, is_atrium, dir_path, use_flow_extensions=Fa return inlets, outlets -def dist_sphere_curvature(surface, centerlines, region_center, misr_max, save_path, factor): +def dist_sphere_curvature( + surface, centerlines, region_center, misr_max, save_path, factor +): """ Determines the target edge length for each cell on the surface, including potential refinement or coarsening of certain user specified areas. @@ -315,8 +361,11 @@ def dist_sphere_curvature(surface, centerlines, region_center, misr_max, save_pa # Add the center of the sac for i in range(len(region_center)): - distance_to_sphere = compute_distance_to_sphere(distance_to_sphere, region_center[i], - distance_scale=0.2 / (misr_max[i] * 2.5)) + distance_to_sphere = compute_distance_to_sphere( + distance_to_sphere, + region_center[i], + distance_scale=0.2 / (misr_max[i] * 2.5), + ) # Compute curvature curvatureFilter = vmtkscripts.vmtkSurfaceCurvature() @@ -331,7 +380,9 @@ def dist_sphere_curvature(surface, centerlines, region_center, misr_max, save_pa # Multiple the surface curvatureSurface = curvatureFilter.Surface curvatureArray = get_point_data_array("Curvature", curvatureSurface) - distance_to_sphere_array = get_point_data_array(distanceToSpheresArrayName, distance_to_sphere) + distance_to_sphere_array = get_point_data_array( + distanceToSpheresArrayName, distance_to_sphere + ) size_array = curvatureArray * distance_to_sphere_array * factor size_vtk_array = create_vtk_array(size_array, "Size") @@ -342,7 +393,9 @@ def dist_sphere_curvature(surface, centerlines, region_center, misr_max, save_pa return distance_to_sphere -def dist_sphere_constant(surface, centerlines, region_center, misr_max, save_path, edge_length): +def dist_sphere_constant( + surface, centerlines, region_center, misr_max, save_path, edge_length +): """ Determines the target edge length for each cell on the surface, including potential refinement or coarsening of certain user specified areas. @@ -370,15 +423,19 @@ def dist_sphere_constant(surface, centerlines, region_center, misr_max, save_pat # Reduce element size in region for i in range(len(region_center)): - distance_to_sphere = compute_distance_to_sphere(distance_to_sphere, - region_center[i], - min_distance=edge_length / 3, - max_distance=edge_length, - distance_scale=edge_length * 3 / 4 / (misr_max[i] * 2.)) + distance_to_sphere = compute_distance_to_sphere( + distance_to_sphere, + region_center[i], + min_distance=edge_length / 3, + max_distance=edge_length, + distance_scale=edge_length * 3 / 4 / (misr_max[i] * 2.0), + ) element_size = edge_length + np.zeros((surface.GetNumberOfPoints(), 1)) if len(region_center) != 0: - distance_to_spheres_array = get_point_data_array(distanceToSpheresArrayName, distance_to_sphere) + distance_to_spheres_array = get_point_data_array( + distanceToSpheresArrayName, distance_to_sphere + ) element_size = np.minimum(element_size, distance_to_spheres_array) vtk_array = create_vtk_array(element_size, "Size") @@ -417,8 +474,10 @@ def dist_sphere_diam(surface, centerlines, region_center, misr_max, save_path, f # Compute element size based on diameter upper = 20 lower = 6 - diameter_array = 2 * get_point_data_array("DistanceToCenterlines", distance_to_sphere) - element_size = 13. / 35 * diameter_array ** 2 + lower + diameter_array = 2 * get_point_data_array( + "DistanceToCenterlines", distance_to_sphere + ) + element_size = 13.0 / 35 * diameter_array**2 + lower element_size[element_size > upper] = upper element_size[element_size < lower] = lower elements_vtk = create_vtk_array(element_size, "Num elements") @@ -427,14 +486,18 @@ def dist_sphere_diam(surface, centerlines, region_center, misr_max, save_path, f # Reduce element size in aneurysm for i in range(len(region_center)): - distance_to_sphere = compute_distance_to_sphere(distance_to_sphere, - region_center[i], - max_distance=100, - distance_scale=0.2 / (misr_max[i] * 2.)) + distance_to_sphere = compute_distance_to_sphere( + distance_to_sphere, + region_center[i], + max_distance=100, + distance_scale=0.2 / (misr_max[i] * 2.0), + ) if len(region_center) == 0: element_size *= factor else: - distance_to_spheres_array = get_point_data_array(distanceToSpheresArrayName, distance_to_sphere) + distance_to_spheres_array = get_point_data_array( + distanceToSpheresArrayName, distance_to_sphere + ) element_size = np.minimum(element_size, distance_to_spheres_array) * factor vtk_array = create_vtk_array(element_size, "Size") @@ -467,8 +530,15 @@ def mesh_alternative(surface): return vmtk_smooth_surface(surface, "laplace", iterations=500) -def compute_distance_to_sphere(surface, center_sphere, radius_sphere=0.0, distance_offset=0.0, distance_scale=0.01, - min_distance=0.2, max_distance=0.3): +def compute_distance_to_sphere( + surface, + center_sphere, + radius_sphere=0.0, + distance_offset=0.0, + distance_scale=0.01, + min_distance=0.2, + max_distance=0.3, +): """ Computes cell specific target edge length (distances) based on input criterion. @@ -504,7 +574,9 @@ def compute_distance_to_sphere(surface, center_sphere, radius_sphere=0.0, distan distanceToSphere = dist_array.GetComponent(i, 0) # Get distance, but factor in size of sphere - newDist = get_distance(center_sphere, surface.GetPoints().GetPoint(i)) - radius_sphere + newDist = ( + get_distance(center_sphere, surface.GetPoints().GetPoint(i)) - radius_sphere + ) # Set offset and scale distance newDist = distance_offset + newDist * distance_scale @@ -567,7 +639,9 @@ def generate_mesh(surface, add_boundary_layer): return mesh, remeshSurface -def find_boundaries(model_path, mean_inflow_rate, network, mesh, verbose_print, is_atrium): +def find_boundaries( + model_path, mean_inflow_rate, network, mesh, verbose_print, is_atrium +): """ Finds inlet and outlet boundary IDs after complete meshing, including mean flow ratio and area ratios between outlets or inlets (determined by type of model) @@ -586,14 +660,20 @@ def find_boundaries(model_path, mean_inflow_rate, network, mesh, verbose_print, boundaryReferenceSystems.Surface = wallMesh boundaryReferenceSystems.Execute() refSystem = boundaryReferenceSystems.ReferenceSystems - cellEntityIdsArray = get_vtk_array('CellEntityIds', 0, refSystem.GetNumberOfPoints()) + cellEntityIdsArray = get_vtk_array( + "CellEntityIds", 0, refSystem.GetNumberOfPoints() + ) refSystem.GetPointData().AddArray(cellEntityIdsArray) # Extract the surface mesh of the end caps - boundarySurface = vtk_compute_threshold(mesh, "CellEntityIds", lower=1.5, threshold_type="lower") + boundarySurface = vtk_compute_threshold( + mesh, "CellEntityIds", lower=1.5, threshold_type="lower" + ) pointCells = vtk.vtkIdList() surfaceCellEntityIdsArray = vtk.vtkIntArray() - surfaceCellEntityIdsArray.DeepCopy(boundarySurface.GetCellData().GetArray('CellEntityIds')) + surfaceCellEntityIdsArray.DeepCopy( + boundarySurface.GetCellData().GetArray("CellEntityIds") + ) # Find the corresponding couple (mesh outlet ID, network ID). ids = [] @@ -611,11 +691,16 @@ def find_boundaries(model_path, mean_inflow_rate, network, mesh, verbose_print, networkPoint = element.GetOutPointsx1()[0] if element.IsAnInlet(): networkPoint = element.GetInPointsx0()[0] - if vtk.vtkMath.Distance2BetweenPoints(meshPoint, networkPoint) < distancePoints: - distancePoints = vtk.vtkMath.Distance2BetweenPoints(meshPoint, networkPoint) + if ( + vtk.vtkMath.Distance2BetweenPoints(meshPoint, networkPoint) + < distancePoints + ): + distancePoints = vtk.vtkMath.Distance2BetweenPoints( + meshPoint, networkPoint + ) closest = element.GetId() if network.elements[closest].IsAnInlet(): - verbose_print('I am the inlet, Sup?') + verbose_print("I am the inlet, Sup?") verbose_print(network.elements[closest].GetInPointsx0()[0]) ids.insert(0, [cellEntityId, mean_inflow_rate]) else: @@ -623,8 +708,10 @@ def find_boundaries(model_path, mean_inflow_rate, network, mesh, verbose_print, ids.append([cellEntityId, gamma]) verbose_print(gamma) verbose_print(network.elements[closest].GetOutPointsx1()[0]) - verbose_print('CellEntityId: %d\n' % cellEntityId) - verbose_print('meshPoint: %f, %f, %f\n' % (meshPoint[0], meshPoint[1], meshPoint[2])) + verbose_print("CellEntityId: %d\n" % cellEntityId) + verbose_print( + "meshPoint: %f, %f, %f\n" % (meshPoint[0], meshPoint[1], meshPoint[2]) + ) verbose_print(ids) # Store information for the solver. @@ -635,23 +722,22 @@ def find_boundaries(model_path, mean_inflow_rate, network, mesh, verbose_print, outlet_ids.append(ids[k][0] - 1) area_ratios.append(ids[k][1]) - info = { - "mean_flow_rate": mean_inflow_rate, - "area_ratio": area_ratios - } + info = {"mean_flow_rate": mean_inflow_rate, "area_ratio": area_ratios} # Swap outlet with inlet if meshing atrium model if is_atrium: - info['inlet_ids'] = outlet_ids - info['outlet_id'] = inlet_id + info["inlet_ids"] = outlet_ids + info["outlet_id"] = inlet_id else: - info['inlet_id'] = inlet_id - info['outlet_ids'] = outlet_ids + info["inlet_id"] = inlet_id + info["outlet_ids"] = outlet_ids write_parameters(info, model_path) -def setup_model_network(centerlines, file_name_probe_points, region_center, verbose_print): +def setup_model_network( + centerlines, file_name_probe_points, region_center, verbose_print +): """ Sets up network used for network boundary condition model. @@ -668,11 +754,15 @@ def setup_model_network(centerlines, file_name_probe_points, region_center, verb # Set the network object used in the scripts for # boundary conditions and probes. network = ImportData.Network() - centerlinesBranches = ImportData.SetNetworkStructure(centerlines, network, verbose_print) + centerlinesBranches = ImportData.SetNetworkStructure( + centerlines, network, verbose_print + ) if not path.isfile(file_name_probe_points): # Get the list of coordinates for the probe points along the network centerline. - listProbePoints = ImportData.GetListProbePoints(centerlinesBranches, network, verbose_print) + listProbePoints = ImportData.GetListProbePoints( + centerlinesBranches, network, verbose_print + ) listProbePoints += region_center print("--- Saving probes points in: %s\n" % file_name_probe_points) @@ -716,7 +806,14 @@ def compute_flow_rate(is_atrium, inlet, parameters): return mean_inflow_rate -def write_mesh(compress_mesh, file_name_surface_name, file_name_vtu_mesh, file_name_xml_mesh, mesh, remeshed_surface): +def write_mesh( + compress_mesh, + file_name_surface_name, + file_name_vtu_mesh, + file_name_xml_mesh, + mesh, + remeshed_surface, +): """ Writes the mesh to DOLFIN format, and compresses to .gz format @@ -734,7 +831,7 @@ def write_mesh(compress_mesh, file_name_surface_name, file_name_vtu_mesh, file_n # Write mesh to FEniCS to format if compress_mesh: - file_name_xml_mesh += '.gz' + file_name_xml_mesh += ".gz" writer = vtkvmtk.vtkvmtkDolfinWriter() writer.SetInputData(mesh) writer.SetFileName(file_name_xml_mesh) @@ -742,22 +839,27 @@ def write_mesh(compress_mesh, file_name_surface_name, file_name_vtu_mesh, file_n writer.SetBoundaryDataIdOffset(-1) writer.SetStoreCellMarkers(0) - print('--- Writing Dolfin file') + print("--- Writing Dolfin file") writer.Write() # Compress mesh locally to bypass VMTK issue if compress_mesh: - file = open(file_name_xml_mesh, 'rb') + file = open(file_name_xml_mesh, "rb") xml = file.read() file.close() - gzfile = gzip.open(file_name_xml_mesh, 'wb') + gzfile = gzip.open(file_name_xml_mesh, "wb") gzfile.write(xml) gzfile.close() -def add_flow_extension(surface, centerlines, include_outlet, extension_length=2.0, - extension_mode="boundarynormal"): +def add_flow_extension( + surface, + centerlines, + include_outlet, + extension_length=2.0, + extension_mode="boundarynormal", +): """ Adds flow extensions to either all inlets or all outlets with specified extension length. @@ -863,14 +965,14 @@ def get_furtest_surface_point(inlet, surface): def check_if_closed_surface(surface): """ - Checks if the given surface is capped (i.e., has no feature edges). + Checks if the given surface is capped (i.e., has no feature edges). - Args: - surface (vtkPolyData): The surface to check for capping. + Args: + surface (vtkPolyData): The surface to check for capping. - Returns: - bool: True if the surface is capped, False otherwise. - """ + Returns: + bool: True if the surface is capped, False otherwise. + """ cells = vtk_extract_feature_edges(surface) return cells.GetNumberOfCells() == 0 diff --git a/src/vampy/automatedPreprocessing/simulate.py b/src/vampy/automatedPreprocessing/simulate.py index e4040eae..377d0fe7 100644 --- a/src/vampy/automatedPreprocessing/simulate.py +++ b/src/vampy/automatedPreprocessing/simulate.py @@ -34,21 +34,21 @@ def run_simulation(config_path, local_dir, case_name): config = json.load(open(config_path)) try: - hostname = config['hostname'] - username = config['username'] - password = config['password'] - remote_vampy_folder = config['remote_vampy_folder'] - local_mesh_folder = config['local_mesh_folder'] - job_script = config['job_script'] + hostname = config["hostname"] + username = config["username"] + password = config["password"] + remote_vampy_folder = config["remote_vampy_folder"] + local_mesh_folder = config["local_mesh_folder"] + job_script = config["job_script"] except KeyError: - raise ValueError('Invalid configuration file') + raise ValueError("Invalid configuration file") # Use case folder if local folder is blank - local_dir = local_dir if local_mesh_folder == '' else local_mesh_folder + local_dir = local_dir if local_mesh_folder == "" else local_mesh_folder # Get path to home folder on remote client.connect(hostname, username=username, password=password) - stdin, stdout, stderr = client.exec_command('echo $HOME') + stdin, stdout, stderr = client.exec_command("echo $HOME") home = str(stdout.read().strip().decode("utf-8")) sftp = client.open_sftp() @@ -66,14 +66,20 @@ def run_simulation(config_path, local_dir, case_name): if not exists(sftp, case_name + ".xml.gz"): sftp.put(path.join(local_dir, case_name + ".xml.gz"), case_name + ".xml.gz") if not exists(sftp, case_name + "_probe_point"): - sftp.put(path.join(local_dir, case_name + "_probe_point"), case_name + "_probe_point") + sftp.put( + path.join(local_dir, case_name + "_probe_point"), case_name + "_probe_point" + ) if not exists(sftp, case_name + "_info.json"): - sftp.put(path.join(local_dir, case_name + "_info.json"), case_name + "_info.json") + sftp.put( + path.join(local_dir, case_name + "_info.json"), case_name + "_info.json" + ) sftp.close() # Run script - stdin, stdout, stderr = client.exec_command('sbatch {}'.format(path.join(remote_vampy_folder, job_script))) + stdin, stdout, stderr = client.exec_command( + "sbatch {}".format(path.join(remote_vampy_folder, job_script)) + ) for msg in stdout: print(msg) diff --git a/src/vampy/automatedPreprocessing/vmtk_pointselector.py b/src/vampy/automatedPreprocessing/vmtk_pointselector.py index 512f5148..d216b935 100644 --- a/src/vampy/automatedPreprocessing/vmtk_pointselector.py +++ b/src/vampy/automatedPreprocessing/vmtk_pointselector.py @@ -16,7 +16,7 @@ def __init__(self, guiText=""): self.text.SetDisplayPosition(10, 2) -class vmtkSeedSelector(): +class vmtkSeedSelector: def __init__(self): self._Surface = None self._SeedIds = None @@ -51,18 +51,24 @@ def UndoCallback(self, obj): def PickCallback(self, obj): picker = vtk.vtkCellPicker() - picker.SetTolerance(1E-4 * self._Surface.GetLength()) + picker.SetTolerance(1e-4 * self._Surface.GetLength()) eventPosition = self.vmtkRenderer.RenderWindowInteractor.GetEventPosition() - result = picker.Pick(float(eventPosition[0]), float(eventPosition[1]), 0.0, self.vmtkRenderer.Renderer) + result = picker.Pick( + float(eventPosition[0]), + float(eventPosition[1]), + 0.0, + self.vmtkRenderer.Renderer, + ) if result == 0: return pickPosition = picker.GetPickPosition() pickedCellPointIds = self._Surface.GetCell(picker.GetCellId()).GetPointIds() - minDistance = 1E10 + minDistance = 1e10 pickedSeedId = -1 for i in range(pickedCellPointIds.GetNumberOfIds()): - distance = vtk.vtkMath.Distance2BetweenPoints(pickPosition, - self._Surface.GetPoint(pickedCellPointIds.GetId(i))) + distance = vtk.vtkMath.Distance2BetweenPoints( + pickPosition, self._Surface.GetPoint(pickedCellPointIds.GetId(i)) + ) if distance < minDistance: minDistance = distance pickedSeedId = pickedCellPointIds.GetId(i) @@ -82,7 +88,7 @@ def InitializeSeeds(self): def Execute(self): if self._Surface is None: - self.PrintError('vmtkPickPointSeedSelector Error: Surface not set.') + self.PrintError("vmtkPickPointSeedSelector Error: Surface not set.") return self._TargetSeedIds.Initialize() @@ -116,8 +122,8 @@ def Execute(self): self.SeedActor.PickableOff() self.vmtkRenderer.Renderer.AddActor(self.SeedActor) - self.vmtkRenderer.AddKeyBinding('u', 'Undo.', self.UndoCallback) - self.vmtkRenderer.AddKeyBinding('space', 'Add points.', self.PickCallback) + self.vmtkRenderer.AddKeyBinding("u", "Undo.", self.UndoCallback) + self.vmtkRenderer.AddKeyBinding("space", "Add points.", self.PickCallback) surfaceMapper = vtk.vtkPolyDataMapper() if version < 6: surfaceMapper.SetInput(self._Surface) @@ -130,7 +136,7 @@ def Execute(self): self.vmtkRenderer.Renderer.AddActor(surfaceActor) - text = 'Please position the mouse and press space to add the top of the region of interest, \'u\' to undo\n' + text = "Please position the mouse and press space to add the top of the region of interest, 'u' to undo\n" guiText = VtkText(text) self.vmtkRenderer.Renderer.AddActor(guiText.text) diff --git a/src/vampy/simulation/Artery.py b/src/vampy/simulation/Artery.py index bb028a53..256c37f6 100755 --- a/src/vampy/simulation/Artery.py +++ b/src/vampy/simulation/Artery.py @@ -2,20 +2,29 @@ import pickle from os import makedirs from pprint import pprint -from dolfin import set_log_level import numpy as np +from dolfin import set_log_level from oasis.problems.NSfracStep import * from vampy.simulation.Probe import Probes # type: ignore -from vampy.simulation.Womersley import make_womersley_bcs, compute_boundary_geometry_acrn -from vampy.simulation.simulation_common import get_file_paths, store_u_mean, print_mesh_information +from vampy.simulation.simulation_common import ( + get_file_paths, + print_mesh_information, + store_u_mean, +) +from vampy.simulation.Womersley import ( + compute_boundary_geometry_acrn, + make_womersley_bcs, +) # FEniCS specific command to control the desired level of logging, here set to critical errors set_log_level(50) -def problem_parameters(commandline_kwargs, NS_parameters, NS_expressions, **NS_namespace): +def problem_parameters( + commandline_kwargs, NS_parameters, NS_expressions, **NS_namespace +): """ Problem file for running CFD simulation in arterial models consisting of one inlet, and two or more outlets. A Womersley velocity profile is applied at the inlet, and a flow split pressure condition is applied at the outlets, @@ -32,9 +41,9 @@ def problem_parameters(commandline_kwargs, NS_parameters, NS_expressions, **NS_n """ if "restart_folder" in commandline_kwargs.keys(): restart_folder = commandline_kwargs["restart_folder"] - f = open(path.join(restart_folder, 'params.dat'), 'rb') + f = open(path.join(restart_folder, "params.dat"), "rb") NS_parameters.update(pickle.load(f)) - NS_parameters['restart_folder'] = restart_folder + NS_parameters["restart_folder"] = restart_folder else: # Parameters are in mm and ms cardiac_cycle = float(commandline_kwargs.get("cardiac_cycle", 951)) @@ -64,7 +73,7 @@ def problem_parameters(commandline_kwargs, NS_parameters, NS_expressions, **NS_n velocity_degree=1, # Polynomial order of finite element for velocity. Normally linear (1) or quadratic (2) pressure_degree=1, # Polynomial order of finite element for pressure. Normally linear (1) use_krylov_solvers=True, - krylov_solvers=dict(monitor_convergence=False) + krylov_solvers=dict(monitor_convergence=False), ) mesh_file = NS_parameters["mesh_path"].split("/")[-1] @@ -85,8 +94,21 @@ def mesh(mesh_path, **NS_namespace): return mesh -def create_bcs(t, NS_expressions, V, Q, area_ratio, area_inlet, mesh, mesh_path, nu, id_in, id_out, pressure_degree, - **NS_namespace): +def create_bcs( + t, + NS_expressions, + V, + Q, + area_ratio, + area_inlet, + mesh, + mesh_path, + nu, + id_in, + id_out, + pressure_degree, + **NS_namespace, +): # Mesh function boundary = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1, mesh.domains()) @@ -95,24 +117,39 @@ def create_bcs(t, NS_expressions, V, Q, area_ratio, area_inlet, mesh, mesh_path, with open(info) as f: info = json.load(f) - id_in[:] = info['inlet_id'] - id_out[:] = info['outlet_ids'] + id_in[:] = info["inlet_id"] + id_out[:] = info["outlet_ids"] id_wall = min(id_in + id_out) - 1 - Q_mean = info['mean_flow_rate'] + Q_mean = info["mean_flow_rate"] - area_ratio[:] = info['area_ratio'] - area_inlet.append(info['inlet_area']) + area_ratio[:] = info["area_ratio"] + area_inlet.append(info["inlet_area"]) # Load normalized time and flow rate values - t_values, Q_ = np.loadtxt(path.join(path.dirname(path.abspath(__file__)), "ICA_values")).T - Q_values = Q_mean * Q_ # Specific flow rate = Normalized flow wave form * Prescribed flow rate + t_values, Q_ = np.loadtxt( + path.join(path.dirname(path.abspath(__file__)), "ICA_values") + ).T + Q_values = ( + Q_mean * Q_ + ) # Specific flow rate = Normalized flow wave form * Prescribed flow rate t_values *= 1000 # Scale time in normalised flow wave form to [ms] - tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn(mesh, id_in[0], boundary) + tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn( + mesh, id_in[0], boundary + ) # Create Womersley boundary condition at inlet - inlet = make_womersley_bcs(t_values, Q_values, mesh, nu, tmp_area, tmp_center, tmp_radius, tmp_normal, - V.ufl_element()) + inlet = make_womersley_bcs( + t_values, + Q_values, + mesh, + nu, + tmp_area, + tmp_center, + tmp_radius, + tmp_normal, + V.ufl_element(), + ) NS_expressions["inlet"] = inlet # Initialize inlet expressions with initial time @@ -136,7 +173,13 @@ def create_bcs(t, NS_expressions, V, Q, area_ratio, area_inlet, mesh, mesh_path, bc_p.append(bc) NS_expressions[ID] = outflow if MPI.rank(MPI.comm_world) == 0: - print(("Boundary ID={:d}, pressure: {:0.6f}, area fraction: {:0.4f}".format(ID, p_initial, area_ratio[i]))) + print( + ( + "Boundary ID={:d}, pressure: {:0.6f}, area fraction: {:0.4f}".format( + ID, p_initial, area_ratio[i] + ) + ) + ) # No slip condition at wall wall = Constant(0.0) @@ -146,15 +189,28 @@ def create_bcs(t, NS_expressions, V, Q, area_ratio, area_inlet, mesh, mesh_path, bc_inlet = [DirichletBC(V, inlet[i], boundary, id_in[0]) for i in range(3)] # Return boundary conditions in dictionary - return dict(u0=[bc_inlet[0], bc_wall], - u1=[bc_inlet[1], bc_wall], - u2=[bc_inlet[2], bc_wall], - p=bc_p) + return dict( + u0=[bc_inlet[0], bc_wall], + u1=[bc_inlet[1], bc_wall], + u2=[bc_inlet[2], bc_wall], + p=bc_p, + ) # Oasis hook called before simulation start -def pre_solve_hook(mesh, V, Q, newfolder, mesh_path, restart_folder, velocity_degree, cardiac_cycle, - save_solution_after_cycle, dt, **NS_namespace): +def pre_solve_hook( + mesh, + V, + Q, + newfolder, + mesh_path, + restart_folder, + velocity_degree, + cardiac_cycle, + save_solution_after_cycle, + dt, + **NS_namespace, +): # Mesh function boundary = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1, mesh.domains()) @@ -162,7 +218,9 @@ def pre_solve_hook(mesh, V, Q, newfolder, mesh_path, restart_folder, velocity_de n = FacetNormal(mesh) eval_dict = {} rel_path = mesh_path.split(".xml")[0] + "_probe_point" - probe_points = np.load(rel_path, encoding='latin1', fix_imports=True, allow_pickle=True) + probe_points = np.load( + rel_path, encoding="latin1", fix_imports=True, allow_pickle=True + ) # Store points file in checkpoint if MPI.rank(MPI.comm_world) == 0: @@ -195,22 +253,55 @@ def pre_solve_hook(mesh, V, Q, newfolder, mesh_path, restart_folder, velocity_de # Time step when solutions for post-processing should start being saved save_solution_at_tstep = int(cardiac_cycle * save_solution_after_cycle / dt) - return dict(eval_dict=eval_dict, boundary=boundary, n=n, U=U, u_mean=u_mean, u_mean0=u_mean0, u_mean1=u_mean1, - u_mean2=u_mean2, save_solution_at_tstep=save_solution_at_tstep) + return dict( + eval_dict=eval_dict, + boundary=boundary, + n=n, + U=U, + u_mean=u_mean, + u_mean0=u_mean0, + u_mean1=u_mean1, + u_mean2=u_mean2, + save_solution_at_tstep=save_solution_at_tstep, + ) # Oasis hook called after each time step -def temporal_hook(u_, p_, mesh, tstep, dump_probe_frequency, eval_dict, newfolder, id_in, id_out, boundary, n, - save_solution_frequency, NS_parameters, NS_expressions, area_ratio, t, save_solution_at_tstep, - U, area_inlet, nu, u_mean0, u_mean1, u_mean2, **NS_namespace): +def temporal_hook( + u_, + p_, + mesh, + tstep, + dump_probe_frequency, + eval_dict, + newfolder, + id_in, + id_out, + boundary, + n, + save_solution_frequency, + NS_parameters, + NS_expressions, + area_ratio, + t, + save_solution_at_tstep, + U, + area_inlet, + nu, + u_mean0, + u_mean1, + u_mean2, + **NS_namespace, +): # Update boundary condition to current time for uc in NS_expressions["inlet"]: uc.set_t(t) # Compute flux and update pressure condition if tstep > 2: - Q_ideals, Q_in, Q_outs = update_pressure_condition(NS_expressions, area_ratio, boundary, id_in, id_out, mesh, n, - tstep, u_) + Q_ideals, Q_in, Q_outs = update_pressure_condition( + NS_expressions, area_ratio, boundary, id_in, id_out, mesh, n, tstep, u_ + ) # Compute flow rates and updated pressure at outlets, and mean velocity and Reynolds number at inlet if MPI.rank(MPI.comm_world) == 0 and tstep % 10 == 0: @@ -218,12 +309,16 @@ def temporal_hook(u_, p_, mesh, tstep, dump_probe_frequency, eval_dict, newfolde diam_inlet = np.sqrt(4 * area_inlet[0] / np.pi) Re = U_mean * diam_inlet / nu print("=" * 10, "Time step " + str(tstep), "=" * 10) - print("Sum of Q_out = {:0.4f}, Q_in = {:0.4f}, mean velocity (inlet): {:0.4f}, Reynolds number (inlet): {:0.4f}" - .format(sum(Q_outs), Q_in, U_mean, Re)) + print( + f"Sum of Q_out = {sum(Q_outs):0.4f}, Q_in = {Q_in:0.4f}," + + f" mean velocity (inlet): {U_mean:0.4f}," + + f" Reynolds number (inlet): {Re:0.4f}" + ) for i, out_id in enumerate(id_out): - print(("For outlet with boundary ID={:d}: target flow rate: {:0.4f} mL/s, " + - "computed flow rate: {:0.4f} mL/s, pressure updated to: {:0.4f}") - .format(out_id, Q_ideals[i], Q_outs[i], NS_expressions[out_id].p)) + print( + f"For outlet with boundary ID={out_id:d}: target flow rate: {Q_ideals[i]:0.4f} mL/s, " + + f"computed flow rate: {Q_outs[i]:0.4f} mL/s, pressure updated to: {NS_expressions[out_id].p:0.4f}" + ) print() # Sample velocity and pressure in points/probes @@ -268,9 +363,9 @@ def temporal_hook(u_, p_, mesh, tstep, dump_probe_frequency, eval_dict, newfolde assign(U.sub(2), u_[2]) # Get save paths - files = NS_parameters['files'] - p_path = files['p'] - u_path = files['u'] + files = NS_parameters["files"] + p_path = files["p"] + u_path = files["u"] file_mode = "w" if not path.exists(p_path) else "a" # Save pressure @@ -290,10 +385,28 @@ def temporal_hook(u_, p_, mesh, tstep, dump_probe_frequency, eval_dict, newfolde # Oasis hook called after the simulation has finished -def theend_hook(u_mean, u_mean0, u_mean1, u_mean2, T, dt, save_solution_at_tstep, save_solution_frequency, - **NS_namespace): - store_u_mean(T, dt, save_solution_at_tstep, save_solution_frequency, u_mean, u_mean0, u_mean1, u_mean2, - NS_parameters) +def theend_hook( + u_mean, + u_mean0, + u_mean1, + u_mean2, + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + **NS_namespace, +): + store_u_mean( + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + u_mean, + u_mean0, + u_mean1, + u_mean2, + NS_parameters, + ) def beta(err, p): @@ -312,15 +425,17 @@ def beta(err, p): if err >= 0.1: return 0.5 else: - return 1 - 5 * err ** 2 + return 1 - 5 * err**2 else: if err >= 0.1: return 1.5 else: - return 1 + 5 * err ** 2 + return 1 + 5 * err**2 -def update_pressure_condition(NS_expressions, area_ratio, boundary, id_in, id_out, mesh, n, tstep, u_): +def update_pressure_condition( + NS_expressions, area_ratio, boundary, id_in, id_out, mesh, n, tstep, u_ +): """ Use a dual-pressure boundary condition as pressure condition at outlet. """ @@ -362,6 +477,6 @@ def update_pressure_condition(NS_expressions, area_ratio, boundary, id_in, id_ou if p_old > 2 and delta < 0: NS_expressions[out_id].p = p_old else: - NS_expressions[out_id].p = p_old * beta(R_err, p_old) * M_err ** E + NS_expressions[out_id].p = p_old * beta(R_err, p_old) * M_err**E return Q_ideals, Q_in, Q_outs diff --git a/src/vampy/simulation/Atrium.py b/src/vampy/simulation/Atrium.py index c350389e..e8d96008 100644 --- a/src/vampy/simulation/Atrium.py +++ b/src/vampy/simulation/Atrium.py @@ -2,21 +2,35 @@ import pickle from os import makedirs from pprint import pprint -from dolfin import set_log_level, MPI import numpy as np +from dolfin import MPI, set_log_level from oasis.problems.NSfracStep import * from vampy.simulation.Probe import Probes # type: ignore -from vampy.simulation.Womersley import make_womersley_bcs, compute_boundary_geometry_acrn -from vampy.simulation.simulation_common import store_u_mean, get_file_paths, print_mesh_information +from vampy.simulation.simulation_common import ( + get_file_paths, + print_mesh_information, + store_u_mean, +) +from vampy.simulation.Womersley import ( + compute_boundary_geometry_acrn, + make_womersley_bcs, +) # FEniCS specific command to control the desired level of logging, here set to critical errors set_log_level(50) comm = MPI.comm_world -def problem_parameters(commandline_kwargs, NS_parameters, scalar_components, Schmidt, NS_expressions, **NS_namespace): +def problem_parameters( + commandline_kwargs, + NS_parameters, + scalar_components, + Schmidt, + NS_expressions, + **NS_namespace +): """ Problem file for running CFD simulation in left atrial models consisting of arbitrary number of pulmonary veins (PV) (normally 3 to 7), and one outlet being the mitral valve. A Womersley velocity profile is applied at the inlets, @@ -32,9 +46,9 @@ def problem_parameters(commandline_kwargs, NS_parameters, scalar_components, Sch """ if "restart_folder" in commandline_kwargs.keys(): restart_folder = commandline_kwargs["restart_folder"] - f = open(path.join(restart_folder, 'params.dat'), 'rb') + f = open(path.join(restart_folder, "params.dat"), "rb") NS_parameters.update(pickle.load(f)) - NS_parameters['restart_folder'] = restart_folder + NS_parameters["restart_folder"] = restart_folder else: # Override some problem specific parameters # Parameters are in mm and ms @@ -64,7 +78,7 @@ def problem_parameters(commandline_kwargs, NS_parameters, scalar_components, Sch velocity_degree=1, pressure_degree=1, use_krylov_solvers=True, - krylov_solvers=dict(monitor_convergence=False) + krylov_solvers=dict(monitor_convergence=False), ) mesh_file = NS_parameters["mesh_path"].split("/")[-1] @@ -85,7 +99,9 @@ def mesh(mesh_path, **NS_namespace): return atrium_mesh -def create_bcs(NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS_namespace): +def create_bcs( + NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS_namespace +): # Variables needed during the simulation boundary = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1, mesh.domains()) @@ -94,11 +110,11 @@ def create_bcs(NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS with open(info_path) as f: info = json.load(f) - id_in[:] = info['inlet_ids'] - id_out[:] = info['outlet_id'] + id_in[:] = info["inlet_ids"] + id_out[:] = info["outlet_id"] id_wall = min(id_in + id_out) - 1 - Q_mean = info['mean_flow_rate'] + Q_mean = info["mean_flow_rate"] # Find corresponding areas ds = Measure("ds", domain=mesh, subdomain_data=boundary) @@ -107,17 +123,32 @@ def create_bcs(NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS area_total += assemble(Constant(1.0) * ds(ID)) # Load normalized time and flow rate values - t_values, Q_ = np.loadtxt(path.join(path.dirname(path.abspath(__file__)), "PV_values")).T - Q_values = Q_mean * Q_ # Specific flow rate = Normalized flow wave form * Prescribed flow rate + t_values, Q_ = np.loadtxt( + path.join(path.dirname(path.abspath(__file__)), "PV_values") + ).T + Q_values = ( + Q_mean * Q_ + ) # Specific flow rate = Normalized flow wave form * Prescribed flow rate t_values *= 1000 # Scale time in normalised flow wave form to [ms] for ID in id_in: - tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn(mesh, ID, boundary) + tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn( + mesh, ID, boundary + ) Q_scaled = Q_values * tmp_area / area_total # Create Womersley boundary condition at inlet - inlet = make_womersley_bcs(t_values, Q_scaled, mesh, nu, tmp_area, tmp_center, tmp_radius, tmp_normal, - V.ufl_element()) + inlet = make_womersley_bcs( + t_values, + Q_scaled, + mesh, + nu, + tmp_area, + tmp_center, + tmp_radius, + tmp_normal, + V.ufl_element(), + ) NS_expressions["inlet_{}".format(ID)] = inlet # Initial condition @@ -128,7 +159,10 @@ def create_bcs(NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS # Create inlet boundary conditions bc_inlets = {} for ID in id_in: - bc_inlet = [DirichletBC(V, NS_expressions["inlet_{}".format(ID)][i], boundary, ID) for i in range(3)] + bc_inlet = [ + DirichletBC(V, NS_expressions["inlet_{}".format(ID)][i], boundary, ID) + for i in range(3) + ] bc_inlets[ID] = bc_inlet # Set outlet boundary conditions, assuming one outlet (Mitral Valve) @@ -145,21 +179,34 @@ def create_bcs(NS_expressions, mesh, mesh_path, nu, t, V, Q, id_in, id_out, **NS return dict(u0=bc_u0, u1=bc_u1, u2=bc_u2, p=bc_p) -def pre_solve_hook(V, Q, cardiac_cycle, dt, save_solution_after_cycle, mesh_path, mesh, newfolder, velocity_degree, - restart_folder, **NS_namespace): +def pre_solve_hook( + V, + Q, + cardiac_cycle, + dt, + save_solution_after_cycle, + mesh_path, + mesh, + newfolder, + velocity_degree, + restart_folder, + **NS_namespace +): # Extract diameter at mitral valve info_path = mesh_path.split(".xml")[0] + "_info.json" with open(info_path) as f: info = json.load(f) - outlet_area = info['outlet_area'] + outlet_area = info["outlet_area"] D_mitral = np.sqrt(4 * outlet_area / np.pi) # Create point for evaluation n = FacetNormal(mesh) eval_dict = {} rel_path = mesh_path.split(".xml")[0] + "_probe_point" - probe_points = np.load(rel_path, encoding='latin1', fix_imports=True, allow_pickle=True) + probe_points = np.load( + rel_path, encoding="latin1", fix_imports=True, allow_pickle=True + ) # Store points file in checkpoint if MPI.rank(MPI.comm_world) == 0: @@ -192,13 +239,41 @@ def pre_solve_hook(V, Q, cardiac_cycle, dt, save_solution_after_cycle, mesh_path # Time step when solutions for post-processing should start being saved save_solution_at_tstep = int(cardiac_cycle * save_solution_after_cycle / dt) - return dict(D_mitral=D_mitral, n=n, eval_dict=eval_dict, U=U, u_mean=u_mean, u_mean0=u_mean0, u_mean1=u_mean1, - u_mean2=u_mean2, save_solution_at_tstep=save_solution_at_tstep) - - -def temporal_hook(mesh, dt, t, save_solution_frequency, u_, NS_expressions, id_in, tstep, newfolder, - eval_dict, dump_probe_frequency, p_, save_solution_at_tstep, nu, D_mitral, U, u_mean0, u_mean1, - u_mean2, **NS_namespace): + return dict( + D_mitral=D_mitral, + n=n, + eval_dict=eval_dict, + U=U, + u_mean=u_mean, + u_mean0=u_mean0, + u_mean1=u_mean1, + u_mean2=u_mean2, + save_solution_at_tstep=save_solution_at_tstep, + ) + + +def temporal_hook( + mesh, + dt, + t, + save_solution_frequency, + u_, + NS_expressions, + id_in, + tstep, + newfolder, + eval_dict, + dump_probe_frequency, + p_, + save_solution_at_tstep, + nu, + D_mitral, + U, + u_mean0, + u_mean1, + u_mean2, + **NS_namespace +): # Update inlet condition for ID in id_in: for i in [0, 1, 2]: @@ -227,9 +302,13 @@ def temporal_hook(mesh, dt, t, save_solution_frequency, u_, NS_expressions, id_i CFL_mean = u_mean * dt / h CFL_max = u_max * dt / h - info = 'Time = {0:2.4e}, timestep = {1:6d}, max Reynolds number={2:2.3f}, mean Reynolds number={3:2.3f}' + \ - ', max velocity={4:2.3f}, mean velocity={5:2.3f}, max CFL={6:2.3f}, mean CFL={7:2.3f}' - info_green(info.format(t, tstep, Re_max, Re_mean, u_max, u_mean, CFL_max, CFL_mean)) + info = ( + "Time = {0:2.4e}, timestep = {1:6d}, max Reynolds number={2:2.3f}, mean Reynolds number={3:2.3f}" + + ", max velocity={4:2.3f}, mean velocity={5:2.3f}, max CFL={6:2.3f}, mean CFL={7:2.3f}" + ) + info_green( + info.format(t, tstep, Re_max, Re_mean, u_max, u_mean, CFL_max, CFL_mean) + ) # Sample velocity and pressure in points/probes eval_dict["centerline_u_x_probes"](u_[0]) @@ -273,9 +352,9 @@ def temporal_hook(mesh, dt, t, save_solution_frequency, u_, NS_expressions, id_i assign(U.sub(2), u_[2]) # Get save paths - files = NS_parameters['files'] - p_path = files['p'] - u_path = files['u'] + files = NS_parameters["files"] + p_path = files["p"] + u_path = files["u"] file_mode = "w" if not path.exists(p_path) else "a" # Save pressure @@ -295,7 +374,25 @@ def temporal_hook(mesh, dt, t, save_solution_frequency, u_, NS_expressions, id_i # Oasis hook called after the simulation has finished -def theend_hook(u_mean, u_mean0, u_mean1, u_mean2, T, dt, save_solution_at_tstep, save_solution_frequency, - **NS_namespace): - store_u_mean(T, dt, save_solution_at_tstep, save_solution_frequency, u_mean, u_mean0, u_mean1, u_mean2, - NS_parameters) +def theend_hook( + u_mean, + u_mean0, + u_mean1, + u_mean2, + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + **NS_namespace +): + store_u_mean( + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + u_mean, + u_mean0, + u_mean1, + u_mean2, + NS_parameters, + ) diff --git a/src/vampy/simulation/Probe.py b/src/vampy/simulation/Probe.py index f34b7cb3..b36d38d7 100644 --- a/src/vampy/simulation/Probe.py +++ b/src/vampy/simulation/Probe.py @@ -10,12 +10,12 @@ import cppimport from mpi4py.MPI import COMM_WORLD as comm -from numpy import zeros, squeeze, save +from numpy import save, squeeze, zeros ProbeObject = object try: - probe11 = cppimport.imp('probe.probe11') + probe11 = cppimport.imp("probe.probe11") ProbeObject = probe11.Probes except ImportError: print("Failed to import probe.probe11") diff --git a/src/vampy/simulation/Womersley.py b/src/vampy/simulation/Womersley.py index 8df79183..e02593d5 100755 --- a/src/vampy/simulation/Womersley.py +++ b/src/vampy/simulation/Womersley.py @@ -20,9 +20,18 @@ import math import numpy as np -from dolfin import UserExpression, SubsetIterator, MPI, assemble, Constant, sqrt, FacetNormal, SpatialCoordinate, \ - Measure -from scipy.integrate import simps +from dolfin import ( + MPI, + Constant, + FacetNormal, + Measure, + SpatialCoordinate, + SubsetIterator, + UserExpression, + assemble, + sqrt, +) +from scipy.integrate import simpson as simps from scipy.interpolate import UnivariateSpline from scipy.special import jn @@ -30,9 +39,9 @@ def x_to_r2(x, c, n): """Compute r**2 from a coordinate x, center point c, and normal vector n. - r is defined as the distance from c to x', where x' is - the projection of x onto the plane defined by c and n. - """ + r is defined as the distance from c to x', where x' is + the projection of x onto the plane defined by c and n. + """ # Steps: # rv = x - c # rvn = rv . n @@ -104,12 +113,14 @@ def compute_area(mesh, ind, facet_domains): # Compute area of boundary tesselation by integrating 1.0 over all facets A = assemble(Constant(1.0, name="one") * dsi) - assert A > 0.0, "Expecting positive area, probably mismatch between mesh and markers!" + assert ( + A > 0.0 + ), "Expecting positive area, probably mismatch between mesh and markers!" return A def fourier_coefficients(x, y, T, N): - '''From x-array and y-spline and period T, calculate N complex Fourier coefficients.''' + """From x-array and y-spline and period T, calculate N complex Fourier coefficients.""" omega = 2 * np.pi / T ck = [] ck.append(1 / T * simps(y(x), x)) @@ -133,8 +144,18 @@ def fourier_coefficients(x, y, T, N): class WomersleyComponent(UserExpression): # Subclassing the expression class restricts the number of arguments, args # is therefore a dict of arguments. - def __init__(self, radius, center, normal, normal_component, period, nu, element, Q=None, - V=None): + def __init__( + self, + radius, + center, + normal, + normal_component, + period, + nu, + element, + Q=None, + V=None, + ): # Spatial args self.radius = radius self.center = center @@ -151,7 +172,9 @@ def __init__(self, radius, center, normal, normal_component, period, nu, element self.Vn = V self.N = len(self.Vn) else: - raise ValueError("Invalid transient data type, missing argument 'Q' or 'V'.") + raise ValueError( + "Invalid transient data type, missing argument 'Q' or 'V'." + ) # Physical args self.nu = nu @@ -167,7 +190,7 @@ def __init__(self, radius, center, normal, normal_component, period, nu, element super().__init__(element=element) def _precompute_bessel_functions(self): - '''Calculate the Bessel functions of the Womersley profile''' + """Calculate the Bessel functions of the Womersley profile""" self.omega = 2 * np.pi / self.period self.ns = np.arange(1, self.N) @@ -179,28 +202,33 @@ def _precompute_bessel_functions(self): # Compute vectorized for 1...N-1 (keeping element 0 in arrays to make indexing work out later) alpha[1:] = self.radius * np.sqrt(self.ns * (self.omega / self.nu)) - self.beta[1:] = alpha[1:] * np.sqrt(1j ** 3) + self.beta[1:] = alpha[1:] * np.sqrt(1j**3) self.jn0_betas[1:] = jn(0, self.beta[1:]) self.jn1_betas[1:] = jn(1, self.beta[1:]) def _precompute_r_dependent_coeffs(self, y): - pir2 = np.pi * self.radius ** 2 + pir2 = np.pi * self.radius**2 # Compute intermediate terms for womersley function r_dependent_coeffs = np.zeros(self.N, dtype=np.complex_) - if hasattr(self, 'Vn'): + if hasattr(self, "Vn"): # r_dependent_coeffs[0] = (self.Vn[0]/2.0) * (1 - y**2) - r_dependent_coeffs[0] = self.Vn[0] * (1 - y ** 2) + r_dependent_coeffs[0] = self.Vn[0] * (1 - y**2) for n in self.ns: jn0b = self.jn0_betas[n] - r_dependent_coeffs[n] = self.Vn[n] * (jn0b - jn(0, self.beta[n] * y)) / (jn0b - 1.0) - elif hasattr(self, 'Qn'): - r_dependent_coeffs[0] = (2 * self.Qn[0] / pir2) * (1 - y ** 2) + r_dependent_coeffs[n] = ( + self.Vn[n] * (jn0b - jn(0, self.beta[n] * y)) / (jn0b - 1.0) + ) + elif hasattr(self, "Qn"): + r_dependent_coeffs[0] = (2 * self.Qn[0] / pir2) * (1 - y**2) for n in self.ns: bn = self.beta[n] j0bn = self.jn0_betas[n] j1bn = self.jn1_betas[n] - r_dependent_coeffs[n] = (self.Qn[n] / pir2) * (j0bn - jn(0, - bn * y)) / (j0bn - (2.0 / bn) * j1bn) + r_dependent_coeffs[n] = ( + (self.Qn[n] / pir2) + * (j0bn - jn(0, bn * y)) + / (j0bn - (2.0 / bn) * j1bn) + ) else: raise ValueError("Missing Vn or Qn!") return r_dependent_coeffs @@ -231,9 +259,22 @@ def eval(self, value, x): value[0] = -self.normal_component * self.scale_value * wom -def make_womersley_bcs(t, Q, mesh, nu, area, center, radius, normal, - element, scale_to=None, coeffstype="Q", - N=1001, num_fourier_coefficients=20, **NS_namespace): +def make_womersley_bcs( + t, + Q, + mesh, + nu, + area, + center, + radius, + normal, + element, + scale_to=None, + coeffstype="Q", + N=1001, + num_fourier_coefficients=20, + **NS_namespace +): """Generate a list of expressions for the components of a Womersley profile.""" # Compute transient profile as interpolation of given coefficients period = max(t) @@ -242,7 +283,9 @@ def make_womersley_bcs(t, Q, mesh, nu, area, center, radius, normal, # Compute fourier coefficients of transient profile timedisc = np.linspace(0, period, N) - Cn = fourier_coefficients(timedisc, transient_profile, period, num_fourier_coefficients) + Cn = fourier_coefficients( + timedisc, transient_profile, period, num_fourier_coefficients + ) # Create Expressions for each direction expressions = [] @@ -253,7 +296,10 @@ def make_womersley_bcs(t, Q, mesh, nu, area, center, radius, normal, elif coeffstype == "V": V = Cn Q = None - expressions.append(WomersleyComponent(radius, center, normal, ncomp, period, nu, - element, Q=Q, V=V)) + expressions.append( + WomersleyComponent( + radius, center, normal, ncomp, period, nu, element, Q=Q, V=V + ) + ) return expressions diff --git a/src/vampy/simulation/__init__.py b/src/vampy/simulation/__init__.py index 8a09c942..4aee045b 100644 --- a/src/vampy/simulation/__init__.py +++ b/src/vampy/simulation/__init__.py @@ -1,5 +1 @@ -from . import Artery -from . import Atrium -from . import simulation_common -from . import Womersley -from . import Probe +from . import Artery, Atrium, Probe, Womersley, simulation_common diff --git a/src/vampy/simulation/simulation_common.py b/src/vampy/simulation/simulation_common.py index 8242fdff..b6e3b6c2 100644 --- a/src/vampy/simulation/simulation_common.py +++ b/src/vampy/simulation/simulation_common.py @@ -1,7 +1,7 @@ -from os import path, makedirs +from os import makedirs, path import numpy as np -from dolfin import MPI, assemble, Constant, assign, HDF5File, Measure +from dolfin import MPI, Constant, HDF5File, Measure, assemble, assign def get_file_paths(folder): @@ -64,9 +64,21 @@ def print_mesh_information(mesh): if MPI.rank(MPI.comm_world) == 0: print("=== Mesh information ===") - print("X range: {} to {} (delta: {:.4f})".format(min(xmin), max(xmax), max(xmax) - min(xmin))) - print("Y range: {} to {} (delta: {:.4f})".format(min(ymin), max(ymax), max(ymax) - min(ymin))) - print("Z range: {} to {} (delta: {:.4f})".format(min(zmin), max(zmax), max(zmax) - min(zmin))) + print( + "X range: {} to {} (delta: {:.4f})".format( + min(xmin), max(xmax), max(xmax) - min(xmin) + ) + ) + print( + "Y range: {} to {} (delta: {:.4f})".format( + min(ymin), max(ymax), max(ymax) - min(ymin) + ) + ) + print( + "Z range: {} to {} (delta: {:.4f})".format( + min(zmin), max(zmax), max(zmax) - min(zmin) + ) + ) print("Number of cells: {}".format(sum(num_cells))) print("Number of cells per processor: {}".format(int(np.mean(num_cells)))) print("Number of edges: {}".format(sum(num_edges))) @@ -77,8 +89,17 @@ def print_mesh_information(mesh): print("Number of cells per volume: {:.4f}".format(sum(num_cells) / volume)) -def store_u_mean(T, dt, save_solution_at_tstep, save_solution_frequency, u_mean, u_mean0, u_mean1, u_mean2, - NS_parameters): +def store_u_mean( + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + u_mean, + u_mean0, + u_mean1, + u_mean2, + NS_parameters, +): """ Store the time averaged velocity into file u_mean @@ -93,7 +114,7 @@ def store_u_mean(T, dt, save_solution_at_tstep, save_solution_frequency, u_mean, u_mean2 (Function): Function storing z-component """ # Get the file path - files = NS_parameters['files'] + files = NS_parameters["files"] u_mean_path = files["u_mean"] # Divide the accumulated velocity by the number of steps diff --git a/tests/test_compute_flow_and_simulation_metrics.py b/tests/test_compute_flow_and_simulation_metrics.py index bdb8feef..68486ef8 100644 --- a/tests/test_compute_flow_and_simulation_metrics.py +++ b/tests/test_compute_flow_and_simulation_metrics.py @@ -1,27 +1,60 @@ import shutil from os import path, remove -from vampy.automatedPostprocessing.compute_flow_and_simulation_metrics import compute_flow_and_simulation_metrics +from vampy.automatedPostprocessing.compute_flow_and_simulation_metrics import ( + compute_flow_and_simulation_metrics, +) def test_compute_flow_and_simulation_metrics_for_full_cycle(): # Path to test results and params - results_path, nu, dt, velocity_degree, T, save_frequency, start_cycle, step, flow_metrics_path = \ - get_default_parameters() + ( + results_path, + nu, + dt, + velocity_degree, + T, + save_frequency, + start_cycle, + step, + flow_metrics_path, + ) = get_default_parameters() average_over_cycles = False times_to_average = [] # Run post-processing - compute_flow_and_simulation_metrics(results_path, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles) + compute_flow_and_simulation_metrics( + results_path, + nu, + dt, + velocity_degree, + T, + times_to_average, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) # Check that output folder exists assert path.exists(flow_metrics_path) and path.isdir(flow_metrics_path) # Check that output files exist - metric_names = ["u_time_avg", "l_plus", "t_plus", "CFL", "strain", "length_scale", "time_scale", - "velocity_scale", "characteristic_edge_length", "dissipation", "kinetic_energy", - "turbulent_kinetic_energy", "turbulent_dissipation"] + metric_names = [ + "u_time_avg", + "l_plus", + "t_plus", + "CFL", + "strain", + "length_scale", + "time_scale", + "velocity_scale", + "characteristic_edge_length", + "dissipation", + "kinetic_energy", + "turbulent_kinetic_energy", + "turbulent_dissipation", + ] for name in metric_names: xdmf_path = path.join(flow_metrics_path, "{}.xdmf".format(name)) @@ -42,8 +75,17 @@ def test_compute_flow_and_simulation_metrics_for_full_cycle(): def test_compute_flow_and_simulation_metrics_at_one_instance(): # Path to test results and params - results_path, nu, dt, velocity_degree, T, save_frequency, start_cycle, step, flow_metrics_path = \ - get_default_parameters() + ( + results_path, + nu, + dt, + velocity_degree, + T, + save_frequency, + start_cycle, + step, + flow_metrics_path, + ) = get_default_parameters() average_over_cycles = False @@ -52,16 +94,38 @@ def test_compute_flow_and_simulation_metrics_at_one_instance(): times_to_average = [time_in_ms, 2 * time_in_ms, 4 * time_in_ms] # Run post-processing - compute_flow_and_simulation_metrics(results_path, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles) + compute_flow_and_simulation_metrics( + results_path, + nu, + dt, + velocity_degree, + T, + times_to_average, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) # Check that output folder exists assert path.exists(flow_metrics_path) and path.isdir(flow_metrics_path) # Check that output files exist - metric_names = ["u_time_avg", "l_plus", "t_plus", "CFL", "strain", "length_scale", "time_scale", - "velocity_scale", "characteristic_edge_length", "dissipation", "kinetic_energy", - "turbulent_kinetic_energy", "turbulent_dissipation"] + metric_names = [ + "u_time_avg", + "l_plus", + "t_plus", + "CFL", + "strain", + "length_scale", + "time_scale", + "velocity_scale", + "characteristic_edge_length", + "dissipation", + "kinetic_energy", + "turbulent_kinetic_energy", + "turbulent_dissipation", + ] for time in times_to_average: for name in metric_names: xdmf_path = path.join(flow_metrics_path, "{}_{}.xdmf".format(name, time)) @@ -82,8 +146,17 @@ def test_compute_flow_and_simulation_metrics_at_one_instance(): def test_compute_flow_and_simulation_metrics_averaged_over_one_cycle(): # Path to test results and params - results_path, nu, dt, velocity_degree, T, save_frequency, start_cycle, step, flow_metrics_path = \ - get_default_parameters() + ( + results_path, + nu, + dt, + velocity_degree, + T, + save_frequency, + start_cycle, + step, + flow_metrics_path, + ) = get_default_parameters() times_to_average = [] # Average over cycle 1 @@ -91,19 +164,43 @@ def test_compute_flow_and_simulation_metrics_averaged_over_one_cycle(): cycle = 1 # Run post-processing - compute_flow_and_simulation_metrics(results_path, nu, dt, velocity_degree, T, times_to_average, save_frequency, - start_cycle, step, average_over_cycles) + compute_flow_and_simulation_metrics( + results_path, + nu, + dt, + velocity_degree, + T, + times_to_average, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) # Check that output folder exists assert path.exists(flow_metrics_path) and path.isdir(flow_metrics_path) # Check that cycle averaged output exist - metric_names = ["u_time_avg", "l_plus", "t_plus", "CFL", "strain", "length_scale", "time_scale", - "velocity_scale", "characteristic_edge_length", "dissipation", "kinetic_energy", - "turbulent_kinetic_energy", "turbulent_dissipation"] + metric_names = [ + "u_time_avg", + "l_plus", + "t_plus", + "CFL", + "strain", + "length_scale", + "time_scale", + "velocity_scale", + "characteristic_edge_length", + "dissipation", + "kinetic_energy", + "turbulent_kinetic_energy", + "turbulent_dissipation", + ] for name in metric_names: - xdmf_path = path.join(flow_metrics_path, "{}_cycle_{:02d}.xdmf".format(name, cycle)) + xdmf_path = path.join( + flow_metrics_path, "{}_cycle_{:02d}.xdmf".format(name, cycle) + ) h5_path = path.join(flow_metrics_path, "{}_cycle_{:02d}.h5".format(name, cycle)) assert path.exists(xdmf_path) and path.exists(h5_path) assert path.getsize(xdmf_path) > 0 @@ -139,7 +236,17 @@ def get_default_parameters(): T = 951 step = 1 - return results_path, nu, dt, velocity_degree, T, save_frequency, start_cycle, step, flow_metrics_path + return ( + results_path, + nu, + dt, + velocity_degree, + T, + save_frequency, + start_cycle, + step, + flow_metrics_path, + ) if __name__ == "__main__": diff --git a/tests/test_compute_hemodynamc_indices.py b/tests/test_compute_hemodynamc_indices.py index 0535ad04..2cb4ac86 100644 --- a/tests/test_compute_hemodynamc_indices.py +++ b/tests/test_compute_hemodynamc_indices.py @@ -1,18 +1,40 @@ import shutil from os import path -from vampy.automatedPostprocessing.compute_hemodynamic_indices import compute_hemodynamic_indices +from vampy.automatedPostprocessing.compute_hemodynamic_indices import ( + compute_hemodynamic_indices, +) def test_compute_hemodynamic_indices(): # Path to test results and params - results_path, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, step, indices_path = \ - get_default_parameters() + ( + results_path, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + indices_path, + ) = get_default_parameters() average_over_cycles = False # Run post-processing - compute_hemodynamic_indices(results_path, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, step, - average_over_cycles) + compute_hemodynamic_indices( + results_path, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) # Check that output folder exists assert path.exists(indices_path) and path.isdir(indices_path) @@ -33,16 +55,36 @@ def test_compute_hemodynamic_indices(): def test_compute_hemodynamic_indices_averaged_over_one_cycle(): # Path to test results and params - results_path, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, step, indices_path \ - = get_default_parameters() + ( + results_path, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + indices_path, + ) = get_default_parameters() # Average over cycle 1 average_over_cycles = True cycle = 1 # Run post-processing - compute_hemodynamic_indices(results_path, nu, rho, dt, T, velocity_degree, save_frequency, - start_cycle, step, average_over_cycles) + compute_hemodynamic_indices( + results_path, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + average_over_cycles, + ) # Check that output folder exists assert path.exists(indices_path) and path.isdir(indices_path) @@ -82,7 +124,18 @@ def get_default_parameters(): start_cycle = 1 step = 1 - return results_path, nu, rho, dt, T, velocity_degree, save_frequency, start_cycle, step, indices_path + return ( + results_path, + nu, + rho, + dt, + T, + velocity_degree, + save_frequency, + start_cycle, + step, + indices_path, + ) if __name__ == "__main__": diff --git a/tests/test_compute_velocity_and_pressure.py b/tests/test_compute_velocity_and_pressure.py index f2a7daa1..539800cb 100644 --- a/tests/test_compute_velocity_and_pressure.py +++ b/tests/test_compute_velocity_and_pressure.py @@ -1,6 +1,8 @@ from os import path, remove -from vampy.automatedPostprocessing.compute_velocity_and_pressure import compute_velocity_and_pressure +from vampy.automatedPostprocessing.compute_velocity_and_pressure import ( + compute_velocity_and_pressure, +) def test_compute_velocity_and_pressure(): @@ -12,7 +14,9 @@ def test_compute_velocity_and_pressure(): step = 1 # Run post-processing - compute_velocity_and_pressure(results_path, dt, velocity_degree, pressure_degree, step) + compute_velocity_and_pressure( + results_path, dt, velocity_degree, pressure_degree, step + ) # Check that output files exist metric_names = ["velocity", "pressure"] diff --git a/tests/test_pre_processing.py b/tests/test_pre_processing.py index ceb996c0..b242288c 100644 --- a/tests/test_pre_processing.py +++ b/tests/test_pre_processing.py @@ -2,8 +2,10 @@ from dolfin import Mesh -from vampy.automatedPreprocessing.automated_preprocessing import read_command_line, \ - run_pre_processing +from vampy.automatedPreprocessing.automated_preprocessing import ( + read_command_line, + run_pre_processing, +) from vampy.automatedPreprocessing.preprocessing_common import read_polydata @@ -11,15 +13,18 @@ def test_mesh_model_with_one_inlet(): model_path = "tests/test_data/tube/tube.stl" # Get default input parameters common_input = read_command_line(model_path) - common_input.update(dict(meshing_method="diameter", - smoothing_method="taubin", - refine_region=False, - coarsening_factor=1.3, - visualize=False, - compress_mesh=False, - outlet_flow_extension_length=1, - inlet_flow_extension_length=1 - )) + common_input.update( + dict( + meshing_method="diameter", + smoothing_method="taubin", + refine_region=False, + coarsening_factor=1.3, + visualize=False, + compress_mesh=False, + outlet_flow_extension_length=1, + inlet_flow_extension_length=1, + ) + ) # Run pre processing run_pre_processing(**common_input) @@ -46,15 +51,18 @@ def test_mesh_model_with_one_inlet_and_two_outlets(): model_path = "tests/test_data/artery/artery.stl" # Get default input parameters common_input = read_command_line(model_path) - common_input.update(dict(meshing_method="diameter", - smoothing_method="taubin", - refine_region=False, - coarsening_factor=1.3, - visualize=False, - compress_mesh=False, - outlet_flow_extension_length=1, - inlet_flow_extension_length=1 - )) + common_input.update( + dict( + meshing_method="diameter", + smoothing_method="taubin", + refine_region=False, + coarsening_factor=1.3, + visualize=False, + compress_mesh=False, + outlet_flow_extension_length=1, + inlet_flow_extension_length=1, + ) + ) # Run pre processing run_pre_processing(**common_input) diff --git a/tests/test_visualize_probes.py b/tests/test_visualize_probes.py index d31f4f09..defacb3a 100644 --- a/tests/test_visualize_probes.py +++ b/tests/test_visualize_probes.py @@ -15,7 +15,15 @@ def test_visualize_all_probes(): spectrogram_path = path.join(probe_path, "spectrogram.png") # Run post-processing - visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=[], show_figure=False, save_figure=True) + visualize_probes( + probe_path, + probe_frequency, + T, + dt, + probes_to_plot=[], + show_figure=False, + save_figure=True, + ) # Check that output image exists for file_path in [u_p_path, ke_path, spectrum_path, spectrogram_path]: @@ -35,7 +43,15 @@ def test_visualize_two_probes(): probe_frequency = 100 # Run post-processing - visualize_probes(probe_path, probe_frequency, T, dt, probes_to_plot=probes, show_figure=False, save_figure=True) + visualize_probes( + probe_path, + probe_frequency, + T, + dt, + probes_to_plot=probes, + show_figure=False, + save_figure=True, + ) # Check that output image exists for probe in probes: From 51c6f20f888ea965d615662a09fec10813fdf636 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Wed, 9 Oct 2024 22:10:49 +0200 Subject: [PATCH 22/23] Run linting --- src/vampy/__init__.py | 54 +-- .../compute_flow_and_simulation_metrics.py | 4 +- .../compute_velocity_and_pressure.py | 67 +++- .../postprocessing_common.py | 10 +- src/vampy/automatedPreprocessing/__init__.py | 20 +- .../automated_preprocessing.py | 321 +++++++++++++----- .../automatedPreprocessing/moving_common.py | 108 ++++-- .../preprocessing_common.py | 89 +++-- .../automatedPreprocessing/repair_tools.py | 6 +- src/vampy/automatedPreprocessing/visualize.py | 4 +- .../vmtk_pointselector.py | 2 +- src/vampy/simulation/Artery.py | 51 ++- src/vampy/simulation/Atrium.py | 43 ++- src/vampy/simulation/MovingAtrium.py | 254 +++++++++++--- src/vampy/simulation/Womersley.py | 33 +- src/vampy/simulation/__init__.py | 15 +- src/vampy/simulation/simulation_common.py | 31 +- tests/test_compute_velocity_and_pressure.py | 4 +- tests/test_pre_processing.py | 63 ++-- tests/test_simulation.py | 46 ++- 20 files changed, 890 insertions(+), 335 deletions(-) diff --git a/src/vampy/__init__.py b/src/vampy/__init__.py index da6f2ad7..f768a4ab 100644 --- a/src/vampy/__init__.py +++ b/src/vampy/__init__.py @@ -3,41 +3,43 @@ from importlib.metadata import metadata # Imports from post-processing -from .automatedPostprocessing import compute_flow_and_simulation_metrics -from .automatedPostprocessing import compute_hemodynamic_indices -from .automatedPostprocessing import compute_velocity_and_pressure -from .automatedPostprocessing import postprocessing_common -from .automatedPostprocessing import visualize_probes +from .automatedPostprocessing import ( + compute_flow_and_simulation_metrics, + compute_hemodynamic_indices, + compute_velocity_and_pressure, + postprocessing_common, + visualize_probes, +) + # Imports from pre-processing try: - from .automatedPreprocessing import DisplayData - from .automatedPreprocessing import ImportData - from .automatedPreprocessing import NetworkBoundaryConditions - from .automatedPreprocessing import repair_tools - from .automatedPreprocessing import automated_preprocessing - from .automatedPreprocessing import preprocessing_common - from .automatedPreprocessing import simulate - from .automatedPreprocessing import visualize - from .automatedPreprocessing import vmtk_pointselector + from .automatedPreprocessing import ( + DisplayData, + ImportData, + NetworkBoundaryConditions, + automated_preprocessing, + preprocessing_common, + repair_tools, + simulate, + visualize, + vmtk_pointselector, + ) except ModuleNotFoundError: - print("WARNING: morphMan is not installed, automated preprocessing is not available") + print( + "WARNING: morphMan is not installed, automated preprocessing is not available" + ) # Imports from simulation scripts try: - from .simulation import Artery - from .simulation import Atrium - from .simulation import Probe - from .simulation import Womersley - from .simulation import simulation_common + from .simulation import Artery, Atrium, Probe, Womersley, simulation_common except ModuleNotFoundError: print("WARNING: Oasis is not installed, running CFD is not available") try: - from .simulation import Probe - from .simulation import Womersley - from .simulation import simulation_common - from .simulation import MovingAtrium + from .simulation import MovingAtrium, Probe, Womersley, simulation_common except ModuleNotFoundError: - print("WARNING: OasisMove is not installed, running moving domain simulations (MovingAtrium) is not available") + print( + "WARNING: OasisMove is not installed, running moving domain simulations (MovingAtrium) is not available" + ) meta = metadata("vampy") @@ -67,5 +69,5 @@ "simulation_common", "Probe", "Womersley", - 'MovingAtrium' + "MovingAtrium", ] diff --git a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py index 90f68796..ce14a87b 100755 --- a/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py +++ b/src/vampy/automatedPostprocessing/compute_flow_and_simulation_metrics.py @@ -482,7 +482,7 @@ def define_functions_and_iterate_dataset( print("=" * 10, "Starting post processing", "=" * 10) counter = 0 - tolerance = 1E-12 + tolerance = 1e-12 for data, data_avg in zip(dataset, dataset_avg): counter += 1 @@ -557,7 +557,7 @@ def define_functions_and_iterate_dataset( # Compute length scale t0 = Timer("Length scale") - length_scale.vector().set_local((nu ** 3 / (eps + tolerance)) ** (1. / 4)) + length_scale.vector().set_local((nu**3 / (eps + tolerance)) ** (1.0 / 4)) length_scale.vector().apply("insert") length_scale_cycle_avg.vector().axpy(1, length_scale.vector()) t0.stop() diff --git a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py index 1d0d961e..3e50eafe 100644 --- a/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py +++ b/src/vampy/automatedPostprocessing/compute_velocity_and_pressure.py @@ -11,7 +11,10 @@ parameters, ) -from vampy.automatedPostprocessing.postprocessing_common import read_command_line, get_dataset_names +from vampy.automatedPostprocessing.postprocessing_common import ( + get_dataset_names, + read_command_line, +) try: parameters["reorder_dofs_serial"] = False @@ -19,7 +22,9 @@ pass -def compute_velocity_and_pressure(folder, dt, save_frequency, velocity_degree, pressure_degree, step): +def compute_velocity_and_pressure( + folder, dt, save_frequency, velocity_degree, pressure_degree, step +): """ Loads velocity and pressure from compressed .h5 CFD solution and converts and saves to .xdmf format for visualization (in e.g. ParaView). @@ -46,10 +51,16 @@ def compute_velocity_and_pressure(folder, dt, save_frequency, velocity_degree, p file_d = HDF5File(MPI.comm_world, file_path_d, "r") # Read in datasets - dataset_u = get_dataset_names(file_u, step=step, vector_filename="/velocity/vector_%d") - dataset_p = get_dataset_names(file_p, step=step, vector_filename="/pressure/vector_%d") + dataset_u = get_dataset_names( + file_u, step=step, vector_filename="/velocity/vector_%d" + ) + dataset_p = get_dataset_names( + file_p, step=step, vector_filename="/pressure/vector_%d" + ) if file_d is not None: - dataset_d = get_dataset_names(file_d, step=step, vector_filename="/deformation/vector_%d") + dataset_d = get_dataset_names( + file_d, step=step, vector_filename="/deformation/vector_%d" + ) # Read mesh saved as HDF5 format mesh = Mesh() @@ -119,10 +130,42 @@ def compute_velocity_and_pressure(folder, dt, save_frequency, velocity_degree, p def main_convert(): - folder, _, _, dt, velocity_degree, pressure_degree, _, _, save_frequency, _, _, step, _ = read_command_line() - compute_velocity_and_pressure(folder, dt, save_frequency, velocity_degree, pressure_degree, step) - - -if __name__ == '__main__': - folder, _, _, dt, velocity_degree, pressure_degree, _, _, save_frequency, _, _, step, _ = read_command_line() - compute_velocity_and_pressure(folder, dt, save_frequency, velocity_degree, pressure_degree, step) + ( + folder, + _, + _, + dt, + velocity_degree, + pressure_degree, + _, + _, + save_frequency, + _, + _, + step, + _, + ) = read_command_line() + compute_velocity_and_pressure( + folder, dt, save_frequency, velocity_degree, pressure_degree, step + ) + + +if __name__ == "__main__": + ( + folder, + _, + _, + dt, + velocity_degree, + pressure_degree, + _, + _, + save_frequency, + _, + _, + step, + _, + ) = read_command_line() + compute_velocity_and_pressure( + folder, dt, save_frequency, velocity_degree, pressure_degree, step + ) diff --git a/src/vampy/automatedPostprocessing/postprocessing_common.py b/src/vampy/automatedPostprocessing/postprocessing_common.py index 2f1af3c4..34ae9e9e 100644 --- a/src/vampy/automatedPostprocessing/postprocessing_common.py +++ b/src/vampy/automatedPostprocessing/postprocessing_common.py @@ -298,8 +298,14 @@ def norm_l2(self, u): return pow(inner(u, u), 0.5) -def get_dataset_names(data_file, num_files=100000, step=1, start=0, print_info=True, - vector_filename="/velocity/vector_%d"): +def get_dataset_names( + data_file, + num_files=100000, + step=1, + start=0, + print_info=True, + vector_filename="/velocity/vector_%d", +): """ Read velocity fields datasets and extract names of files diff --git a/src/vampy/automatedPreprocessing/__init__.py b/src/vampy/automatedPreprocessing/__init__.py index 59052680..3ef9b766 100644 --- a/src/vampy/automatedPreprocessing/__init__.py +++ b/src/vampy/automatedPreprocessing/__init__.py @@ -1,9 +1,11 @@ -from . import automated_preprocessing -from . import preprocessing_common -from . import DisplayData -from . import ImportData -from . import NetworkBoundaryConditions -from . import simulate -from . import repair_tools -from . import visualize -from . import vmtk_pointselector +from . import ( + DisplayData, + ImportData, + NetworkBoundaryConditions, + automated_preprocessing, + preprocessing_common, + repair_tools, + simulate, + visualize, + vmtk_pointselector, +) diff --git a/src/vampy/automatedPreprocessing/automated_preprocessing.py b/src/vampy/automatedPreprocessing/automated_preprocessing.py index 74ce19c7..9d381a87 100644 --- a/src/vampy/automatedPreprocessing/automated_preprocessing.py +++ b/src/vampy/automatedPreprocessing/automated_preprocessing.py @@ -3,29 +3,92 @@ from os import path, remove import numpy as np -from morphman import get_uncapped_surface, write_polydata, read_polydata, get_parameters, vtk_clean_polydata, \ - vtk_triangulate_surface, write_parameters, vmtk_cap_polydata, compute_centerlines, get_centerline_tolerance, \ - get_vtk_point_locator, extract_single_line, vtk_merge_polydata, get_point_data_array, smooth_voronoi_diagram, \ - create_new_surface, compute_centers, vmtk_smooth_surface, str2bool, vmtk_compute_voronoi_diagram, \ - prepare_output_surface, vmtk_compute_geometric_features +from morphman import ( + compute_centerlines, + compute_centers, + create_new_surface, + extract_single_line, + get_centerline_tolerance, + get_parameters, + get_point_data_array, + get_uncapped_surface, + get_vtk_point_locator, + prepare_output_surface, + read_polydata, + smooth_voronoi_diagram, + str2bool, + vmtk_cap_polydata, + vmtk_compute_geometric_features, + vmtk_compute_voronoi_diagram, + vmtk_smooth_surface, + vtk_clean_polydata, + vtk_merge_polydata, + vtk_triangulate_surface, + write_parameters, + write_polydata, +) # Local imports -from vampy.automatedPreprocessing.moving_common import get_point_map, project_displacement, save_displacement -from vampy.automatedPreprocessing.preprocessing_common import get_centers_for_meshing, \ - dist_sphere_diam, dist_sphere_curvature, dist_sphere_constant, get_regions_to_refine, add_flow_extension, \ - write_mesh, mesh_alternative, generate_mesh, find_boundaries, compute_flow_rate, setup_model_network, \ - radiusArrayName, scale_surface, get_furtest_surface_point, check_if_closed_surface, remesh_surface, \ - dist_sphere_geodesic -from vampy.automatedPreprocessing.repair_tools import find_and_delete_nan_triangles, clean_surface, print_surface_info +from vampy.automatedPreprocessing.moving_common import ( + get_point_map, + project_displacement, + save_displacement, +) +from vampy.automatedPreprocessing.preprocessing_common import ( + add_flow_extension, + check_if_closed_surface, + compute_flow_rate, + dist_sphere_constant, + dist_sphere_curvature, + dist_sphere_diam, + dist_sphere_geodesic, + find_boundaries, + generate_mesh, + get_centers_for_meshing, + get_furtest_surface_point, + get_regions_to_refine, + mesh_alternative, + radiusArrayName, + remesh_surface, + scale_surface, + setup_model_network, + write_mesh, +) +from vampy.automatedPreprocessing.repair_tools import ( + clean_surface, + find_and_delete_nan_triangles, + print_surface_info, +) from vampy.automatedPreprocessing.simulate import run_simulation from vampy.automatedPreprocessing.visualize import visualize_model -def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_factor, smoothing_iterations, - meshing_method, refine_region, is_atrium, add_flow_extensions, visualize, config_path, - coarsening_factor, inlet_flow_extension_length, outlet_flow_extension_length, edge_length, - region_points, compress_mesh, add_boundary_layer, scale_factor, resampling_step, - flow_rate_factor, moving_mesh, clamp_boundaries, max_geodesic_distance): +def run_pre_processing( + input_model, + verbose_print, + smoothing_method, + smoothing_factor, + smoothing_iterations, + meshing_method, + refine_region, + is_atrium, + add_flow_extensions, + visualize, + config_path, + coarsening_factor, + inlet_flow_extension_length, + outlet_flow_extension_length, + edge_length, + region_points, + compress_mesh, + add_boundary_layer, + scale_factor, + resampling_step, + flow_rate_factor, + moving_mesh, + clamp_boundaries, + max_geodesic_distance, +): """ Automatically generate mesh of surface model in .vtu and .xml format, including prescribed flow rates at inlet and outlet based on flow network model. @@ -68,7 +131,9 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f file_name_centerlines = base_path + "_centerlines.vtp" file_name_refine_region_centerlines = base_path + "_refine_region_centerline.vtp" file_name_region_centerlines = base_path + "_sac_centerline_{}.vtp" - file_name_distance_to_sphere = base_path + f"_distance_to_sphere_{meshing_method}.vtp" + file_name_distance_to_sphere = ( + base_path + f"_distance_to_sphere_{meshing_method}.vtp" + ) file_name_distance_to_sphere_initial = base_path + "_distance_to_sphere_initial.vtp" file_name_probe_points = base_path + "_probe_point.json" file_name_voronoi = base_path + "_voronoi.vtp" @@ -170,8 +235,8 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f regions = get_regions_to_refine(capped_surface, region_points, base_path) for i in range(len(regions) // 3): print( - f"--- Region to refine ({i + 1}): " + - f"{regions[3 * i]:.3f} {regions[3 * i + 1]:.3f} {regions[3 * i + 2]:.3f}\n" + f"--- Region to refine ({i + 1}): " + + f"{regions[3 * i]:.3f} {regions[3 * i + 1]:.3f} {regions[3 * i + 2]:.3f}\n" ) centerline_region, _, _ = compute_centerlines( @@ -270,9 +335,11 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f if num_outlets != num_outlets_after: write_polydata(surface, file_name_surface_smooth) - print(f"ERROR: Automatic clipping failed. You have to open {file_name_surface_smooth} and " + - "manually clipp the branch which still is capped. " + - f"Overwrite the current {file_name_surface_smooth} and restart the script.") + print( + f"ERROR: Automatic clipping failed. You have to open {file_name_surface_smooth} and " + + "manually clipp the branch which still is capped. " + + f"Overwrite the current {file_name_surface_smooth} and restart the script." + ) sys.exit(0) surface = surface_uncapped @@ -310,8 +377,14 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f remeshed = read_polydata(file_name_remeshed) else: print("\n--- Remeshing surface for moving mesh\n") - surface = dist_sphere_constant(surface, centerlines, region_center, misr_max, - file_name_distance_to_sphere_initial, edge_length) + surface = dist_sphere_constant( + surface, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere_initial, + edge_length, + ) remeshed = remesh_surface(surface, edge_length, "edgelengtharray") remeshed = vtk_clean_polydata(remeshed) @@ -327,17 +400,27 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f extension = "centerlinedirection" if is_atrium else "boundarynormal" if is_atrium: # Flip lengths if model is atrium - inlet_flow_extension_length, outlet_flow_extension_length = \ - outlet_flow_extension_length, inlet_flow_extension_length + inlet_flow_extension_length, outlet_flow_extension_length = ( + outlet_flow_extension_length, + inlet_flow_extension_length, + ) # Add extensions to inlet (artery) / outlet (atrium) - surface_extended = add_flow_extension(remeshed, centerlines, is_inlet=True, - extension_length=inlet_flow_extension_length) + surface_extended = add_flow_extension( + remeshed, + centerlines, + is_inlet=True, + extension_length=inlet_flow_extension_length, + ) # Add extensions to outlets (artery) / inlets (Atrium) - surface_extended = add_flow_extension(surface_extended, centerlines, is_inlet=False, - extension_length=outlet_flow_extension_length, - extension_mode=extension) + surface_extended = add_flow_extension( + surface_extended, + centerlines, + is_inlet=False, + extension_length=outlet_flow_extension_length, + extension_mode=extension, + ) surface_extended = vmtk_smooth_surface( surface_extended, "laplace", iterations=200 @@ -355,8 +438,17 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f distance, point_map = get_point_map(remeshed, surface_extended) # Project displacement between surfaces - points = project_displacement(clamp_boundaries, distance, folder_extended_surfaces, folder_moved_surfaces, - point_map, surface, surface_extended, remeshed, scale_factor) + points = project_displacement( + clamp_boundaries, + distance, + folder_extended_surfaces, + folder_moved_surfaces, + point_map, + surface, + surface_extended, + remeshed, + scale_factor, + ) # Save displacement to numpy array save_displacement(file_name_displacement_points, points) @@ -410,20 +502,43 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f distance_to_sphere = read_polydata(file_name_distance_to_sphere) else: if meshing_method == "constant": - distance_to_sphere = dist_sphere_constant(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere, edge_length) + distance_to_sphere = dist_sphere_constant( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere, + edge_length, + ) elif meshing_method == "curvature": - distance_to_sphere = dist_sphere_curvature(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere, coarsening_factor) + distance_to_sphere = dist_sphere_curvature( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere, + coarsening_factor, + ) elif meshing_method == "diameter": - distance_to_sphere = dist_sphere_diam(surface_extended, centerlines, region_center, misr_max, - file_name_distance_to_sphere, coarsening_factor) + distance_to_sphere = dist_sphere_diam( + surface_extended, + centerlines, + region_center, + misr_max, + file_name_distance_to_sphere, + coarsening_factor, + ) elif meshing_method == "geodesic": if edge_length is None: print("Edge length needs to supplied when using Geodesic meshing") sys.exit(0) - distance_to_sphere = dist_sphere_geodesic(surface_extended, region_center, max_geodesic_distance, - file_name_distance_to_sphere, edge_length) + distance_to_sphere = dist_sphere_geodesic( + surface_extended, + region_center, + max_geodesic_distance, + file_name_distance_to_sphere, + edge_length, + ) else: print(f"Method '{meshing_method}' is not a valid meshing method") sys.exit(0) @@ -464,8 +579,9 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f else: mesh = read_polydata(file_name_vtu_mesh) - network, probe_points = setup_model_network(centerlines, file_name_probe_points, region_center, verbose_print, - is_atrium) + network, probe_points = setup_model_network( + centerlines, file_name_probe_points, region_center, verbose_print, is_atrium + ) # Load updated parameters following meshing parameters = get_parameters(base_path) @@ -496,10 +612,20 @@ def run_pre_processing(input_model, verbose_print, smoothing_method, smoothing_f print("--- Removing unused pre-processing files") files_to_remove = [ - file_name_centerlines, file_name_refine_region_centerlines, file_name_region_centerlines, - file_name_distance_to_sphere, file_name_remeshed, file_name_distance_to_sphere_initial, - file_name_voronoi, file_name_voronoi_smooth, file_name_voronoi_surface, file_name_surface_smooth, - file_name_model_flow_ext, file_name_clipped_model, file_name_flow_centerlines, file_name_surface_name, + file_name_centerlines, + file_name_refine_region_centerlines, + file_name_region_centerlines, + file_name_distance_to_sphere, + file_name_remeshed, + file_name_distance_to_sphere_initial, + file_name_voronoi, + file_name_voronoi_smooth, + file_name_voronoi_surface, + file_name_surface_smooth, + file_name_model_flow_ext, + file_name_clipped_model, + file_name_flow_centerlines, + file_name_surface_name, ] for file in files_to_remove: if path.exists(file): @@ -590,15 +716,18 @@ def read_command_line(input_path=None): help="Number of smoothing iterations for Laplace and Taubin type smoothing.", ) - parser.add_argument('-m', '--meshing-method', - type=str, - choices=["diameter", "curvature", "constant", "geodesic"], - default="diameter", - help="Determines method of meshing. The method 'constant' is supplied with a constant edge " + - "length controlled by the -el argument, resulting in a constant density mesh. " + - "The 'curvature' method and 'diameter' method produces a variable density mesh," + - " based on the surface curvature and the distance from the " + - "centerline to the surface, respectively.") + parser.add_argument( + "-m", + "--meshing-method", + type=str, + choices=["diameter", "curvature", "constant", "geodesic"], + default="diameter", + help="Determines method of meshing. The method 'constant' is supplied with a constant edge " + + "length controlled by the -el argument, resulting in a constant density mesh. " + + "The 'curvature' method and 'diameter' method produces a variable density mesh," + + " based on the surface curvature and the distance from the " + + "centerline to the surface, respectively.", + ) parser.add_argument( "-el", @@ -704,26 +833,34 @@ def read_command_line(input_path=None): help="Resampling step used to resample centerline in [m].", ) - parser.add_argument('-fr', '--flow-rate-factor', - default=0.27, - type=float, - help="Flow rate factor.") + parser.add_argument( + "-fr", "--flow-rate-factor", default=0.27, type=float, help="Flow rate factor." + ) - parser.add_argument('-mm', '--moving-mesh', - action="store_true", - default=False, - help="If true, assumes a dynamic/moving mesh and will perform computation of projection " + - "between moved surfaces located in the '[filename_model]_moved' folder.") + parser.add_argument( + "-mm", + "--moving-mesh", + action="store_true", + default=False, + help="If true, assumes a dynamic/moving mesh and will perform computation of projection " + + "between moved surfaces located in the '[filename_model]_moved' folder.", + ) - parser.add_argument('-cl', '--clamp-boundaries', - action="store_true", - default=False, - help="Clamps boundaries at inlet(s) and outlet(s) if true. Only used for moving mesh.") + parser.add_argument( + "-cl", + "--clamp-boundaries", + action="store_true", + default=False, + help="Clamps boundaries at inlet(s) and outlet(s) if true. Only used for moving mesh.", + ) - parser.add_argument('-gd', '--max-geodesic-distance', - default=10, - type=float, - help="Maximum distance when performing geodesic distance. In [mm].") + parser.add_argument( + "-gd", + "--max-geodesic-distance", + default=10, + type=float, + help="Maximum distance when performing geodesic distance. In [mm].", + ) # Parse path to get default values if required: @@ -758,16 +895,32 @@ def verbose_print(*args): verbose_print(args) - return dict(input_model=args.input_model, verbose_print=verbose_print, smoothing_method=args.smoothing_method, - smoothing_factor=args.smoothing_factor, smoothing_iterations=args.smoothing_iterations, - meshing_method=args.meshing_method, refine_region=args.refine_region, is_atrium=args.is_atrium, - add_flow_extensions=args.add_flowextensions, config_path=args.config_path, edge_length=args.edge_length, - coarsening_factor=args.coarsening_factor, inlet_flow_extension_length=args.inlet_flowextension, - visualize=args.visualize, region_points=args.region_points, compress_mesh=args.compress_mesh, - outlet_flow_extension_length=args.outlet_flowextension, add_boundary_layer=args.add_boundary_layer, - scale_factor=args.scale_factor, resampling_step=args.resampling_step, - flow_rate_factor=args.flow_rate_factor, moving_mesh=args.moving_mesh, - clamp_boundaries=args.clamp_boundaries, max_geodesic_distance=args.max_geodesic_distance) + return dict( + input_model=args.input_model, + verbose_print=verbose_print, + smoothing_method=args.smoothing_method, + smoothing_factor=args.smoothing_factor, + smoothing_iterations=args.smoothing_iterations, + meshing_method=args.meshing_method, + refine_region=args.refine_region, + is_atrium=args.is_atrium, + add_flow_extensions=args.add_flowextensions, + config_path=args.config_path, + edge_length=args.edge_length, + coarsening_factor=args.coarsening_factor, + inlet_flow_extension_length=args.inlet_flowextension, + visualize=args.visualize, + region_points=args.region_points, + compress_mesh=args.compress_mesh, + outlet_flow_extension_length=args.outlet_flowextension, + add_boundary_layer=args.add_boundary_layer, + scale_factor=args.scale_factor, + resampling_step=args.resampling_step, + flow_rate_factor=args.flow_rate_factor, + moving_mesh=args.moving_mesh, + clamp_boundaries=args.clamp_boundaries, + max_geodesic_distance=args.max_geodesic_distance, + ) def main_meshing(): diff --git a/src/vampy/automatedPreprocessing/moving_common.py b/src/vampy/automatedPreprocessing/moving_common.py index 3a9d1814..c8474aff 100644 --- a/src/vampy/automatedPreprocessing/moving_common.py +++ b/src/vampy/automatedPreprocessing/moving_common.py @@ -6,11 +6,11 @@ from vampy.automatedPreprocessing.preprocessing_common import scale_surface - ############################################################## # A Collection of utility scripts for moving mesh generation # ############################################################## + def get_point_map(remeshed, remeshed_extended, profile="linear"): """ Create a map between original surface model and model with @@ -33,15 +33,31 @@ def get_point_map(remeshed, remeshed_extended, profile="linear"): num_ext = remeshed_extended.Points.shape[0] - remeshed.Points.shape[0] # Get locators - inner_feature = vtk_compute_connectivity(vtk_extract_feature_edges(remeshed.VTKObject)) - outer_feature = vtk_compute_connectivity(vtk_extract_feature_edges(remeshed_extended.VTKObject)) + inner_feature = vtk_compute_connectivity( + vtk_extract_feature_edges(remeshed.VTKObject) + ) + outer_feature = vtk_compute_connectivity( + vtk_extract_feature_edges(remeshed_extended.VTKObject) + ) locator_remeshed = get_vtk_point_locator(remeshed.VTKObject) - n_features = outer_feature.GetPointData().GetArray("RegionId").GetValue(outer_feature.GetNumberOfPoints() - 1) + n_features = ( + outer_feature.GetPointData() + .GetArray("RegionId") + .GetValue(outer_feature.GetNumberOfPoints() - 1) + ) inner_features = np.array( - [vtk_compute_threshold(inner_feature, "RegionId", i, i + 0.1, source=0) for i in range(n_features + 1)]) + [ + vtk_compute_threshold(inner_feature, "RegionId", i, i + 0.1, source=0) + for i in range(n_features + 1) + ] + ) outer_features = np.array( - [vtk_compute_threshold(outer_feature, "RegionId", i, i + 0.1, source=0) for i in range(n_features + 1)]) + [ + vtk_compute_threshold(outer_feature, "RegionId", i, i + 0.1, source=0) + for i in range(n_features + 1) + ] + ) inner_regions = [dsa.WrapDataObject(feature) for feature in inner_features] inner_locators = [get_vtk_point_locator(feature) for feature in inner_features] @@ -50,7 +66,9 @@ def get_point_map(remeshed, remeshed_extended, profile="linear"): outer_regions = [dsa.WrapDataObject(feature) for feature in outer_features] outer_locators = [get_vtk_point_locator(feature) for feature in outer_features] outer_points = [feature.GetNumberOfPoints() for feature in outer_features] - boundary_map = {i: j for i, j in zip(np.argsort(inner_points), np.argsort(outer_points))} + boundary_map = { + i: j for i, j in zip(np.argsort(inner_points), np.argsort(outer_points)) + } # Get distance and point map distances = np.zeros(num_ext) @@ -82,14 +100,20 @@ def get_point_map(remeshed, remeshed_extended, profile="linear"): inner_id = inner_locators[regionId].FindClosestPoint(point) outer_id = outer_locators[regionId_out].FindClosestPoint(point) - dist_to_boundary = get_distance(point, outer_regions[regionId_out].Points[outer_id]) - dist_between_boundaries = get_distance(inner_regions[regionId].Points[inner_id], - outer_regions[regionId_out].Points[outer_id]) + dist_to_boundary = get_distance( + point, outer_regions[regionId_out].Points[outer_id] + ) + dist_between_boundaries = get_distance( + inner_regions[regionId].Points[inner_id], + outer_regions[regionId_out].Points[outer_id], + ) if profile == "linear": - distances[i] = (dist_to_boundary / dist_between_boundaries) + distances[i] = dist_to_boundary / dist_between_boundaries elif profile == "sine": distances[i] = easing_cos(dist_to_boundary, dist_between_boundaries) - point_map[i] = locator_remeshed.FindClosestPoint(inner_regions[regionId].Points[inner_id]) + point_map[i] = locator_remeshed.FindClosestPoint( + inner_regions[regionId].Points[inner_id] + ) # Let the points corresponding to the caps have distance 0 point_map = point_map.astype(int) @@ -107,11 +131,21 @@ def easing_cos(dist_b, dist_bb): Returns: distances (ndarray): Sinusoidal profile based on distance between point and boundary """ - return - 0.5 * (np.cos(np.pi * dist_b / dist_bb) - 1) - - -def move_surface_model(surface, original, remeshed, remeshed_extended, distance, point_map, file_path, i, points, - clamp_boundaries): + return -0.5 * (np.cos(np.pi * dist_b / dist_bb) - 1) + + +def move_surface_model( + surface, + original, + remeshed, + remeshed_extended, + distance, + point_map, + file_path, + i, + points, + clamp_boundaries, +): """ Computes and applies the displacement between the original and the given surface, then projects this displacement onto a remeshed surface. The displaced points of the remeshed surface are then saved @@ -158,10 +192,11 @@ def move_surface_model(surface, original, remeshed, remeshed_extended, distance, # Manipulate displacement in the extensions displacement = new_surface.PointData["displacement"] + n_points = remeshed.Points.shape[0] if clamp_boundaries: - displacement[remeshed.Points.shape[0]:] = distance * displacement[point_map] + displacement[n_points:] = distance * displacement[point_map] else: - displacement[remeshed.Points.shape[0]:] = displacement[point_map] + displacement[n_points:] = displacement[point_map] # Move the mesh points new_surface.Points += displacement @@ -197,8 +232,17 @@ def save_displacement(file_name_displacement_points, points, number_of_points=10 points.dump(file_name_displacement_points) -def project_displacement(clamp_boundaries, distance, folder_extended_surfaces, folder_moved_surfaces, point_map, - surface, surface_extended, remeshed, scale_factor): +def project_displacement( + clamp_boundaries, + distance, + folder_extended_surfaces, + folder_moved_surfaces, + point_map, + surface, + surface_extended, + remeshed, + scale_factor, +): """ Projects the displacement of moved surfaces located in a specified folder onto an extended surface. The projected surfaces are then saved to a designated output directory. @@ -220,7 +264,13 @@ def project_displacement(clamp_boundaries, distance, folder_extended_surfaces, f (num_points, 3, num_surfaces). """ # Add extents to all surfaces - extended_surfaces = sorted([f for f in listdir(folder_moved_surfaces) if f.endswith(".vtp") or f.endswith(".stl")]) + extended_surfaces = sorted( + [ + f + for f in listdir(folder_moved_surfaces) + if f.endswith(".vtp") or f.endswith(".stl") + ] + ) if not path.exists(folder_extended_surfaces): mkdir(folder_extended_surfaces) @@ -240,6 +290,16 @@ def project_displacement(clamp_boundaries, distance, folder_extended_surfaces, f new_path = path.join(folder_extended_surfaces, model_path.split("/")[-1]) if not path.exists(new_path): - move_surface_model(tmp_surface, surface, remeshed, surface_extended, distance, point_map, new_path, i, - points, clamp_boundaries) + move_surface_model( + tmp_surface, + surface, + remeshed, + surface_extended, + distance, + point_map, + new_path, + i, + points, + clamp_boundaries, + ) return points diff --git a/src/vampy/automatedPreprocessing/preprocessing_common.py b/src/vampy/automatedPreprocessing/preprocessing_common.py index d79f79ba..41a2d908 100644 --- a/src/vampy/automatedPreprocessing/preprocessing_common.py +++ b/src/vampy/automatedPreprocessing/preprocessing_common.py @@ -4,19 +4,40 @@ import numpy as np import vtkmodules.numpy_interface.dataset_adapter as dsa -from morphman import vtk_clean_polydata, vtk_triangulate_surface, get_parameters, write_parameters, read_polydata, \ - vmtkscripts, vtk, write_polydata, get_curvilinear_coordinate, vtk_compute_threshold, get_vtk_array, \ - get_distance, get_number_of_arrays, vmtk_smooth_surface, get_point_data_array, create_vtk_array, \ - get_vtk_point_locator, vtk_extract_feature_edges, get_uncapped_surface, vtk_compute_connectivity, \ - vtk_compute_mass_properties, extract_single_line, get_centerline_tolerance +from morphman import ( + create_vtk_array, + extract_single_line, + get_centerline_tolerance, + get_curvilinear_coordinate, + get_distance, + get_number_of_arrays, + get_parameters, + get_point_data_array, + get_uncapped_surface, + get_vtk_array, + get_vtk_point_locator, + read_polydata, + vmtk_smooth_surface, + vmtkscripts, + vtk, + vtk_clean_polydata, + vtk_compute_connectivity, + vtk_compute_mass_properties, + vtk_compute_threshold, + vtk_extract_feature_edges, + vtk_triangulate_surface, + write_parameters, + write_polydata, +) +from vmtk import vtkvmtk from vampy.automatedPreprocessing import ImportData from vampy.automatedPreprocessing.NetworkBoundaryConditions import FlowSplitting from vampy.automatedPreprocessing.vmtk_pointselector import vmtkPickPointSeedSelector -from vmtk import vtkvmtk + # Global array names distanceToSpheresArrayName = "DistanceToSpheres" -radiusArrayName = 'MaximumInscribedSphereRadius' +radiusArrayName = "MaximumInscribedSphereRadius" cellEntityArrayName = "CellEntityIds" dijkstraArrayName = "DijkstraDistanceToPoints" @@ -218,10 +239,14 @@ def compute_centers_for_meshing(surface, is_atrium, case_path=None, test_capped= write_parameters(info, case_path) - boundary_center = center[boundary_id].tolist() # center of the inlet (artery) / outlet (atrium) + boundary_center = center[ + boundary_id + ].tolist() # center of the inlet (artery) / outlet (atrium) center.pop(boundary_id) - center_ = [item for sublist in center for item in sublist] # centers of the outlets (artery) / inlets (atrium) + center_ = [ + item for sublist in center for item in sublist + ] # centers of the outlets (artery) / inlets (atrium) return center_, boundary_center @@ -718,7 +743,9 @@ def find_boundaries( write_parameters(info, model_path) -def setup_model_network(centerlines, file_name_probe_points, region_center, verbose_print, is_atrium): +def setup_model_network( + centerlines, file_name_probe_points, region_center, verbose_print, is_atrium +): """ Sets up network used for network boundary condition model. @@ -749,10 +776,10 @@ def setup_model_network(centerlines, file_name_probe_points, region_center, verb print("--- Saving probes points in: %s\n" % file_name_probe_points) probe_points = np.array(listProbePoints) - with open(file_name_probe_points, 'w') as outfile: + with open(file_name_probe_points, "w") as outfile: json.dump(probe_points.tolist(), outfile) else: - with open(file_name_probe_points, 'r') as infile: + with open(file_name_probe_points, "r") as infile: probe_points = np.array(json.load(infile)) # Set the flow split and inlet boundary condition @@ -840,7 +867,13 @@ def write_mesh( gzfile.close() -def add_flow_extension(surface, centerlines, is_inlet, extension_length=2.0, extension_mode="boundarynormal"): +def add_flow_extension( + surface, + centerlines, + is_inlet, + extension_length=2.0, + extension_mode="boundarynormal", +): """ Adds flow extensions to either all inlets or all outlets with specified extension length. @@ -975,7 +1008,9 @@ def remesh_surface(surface, edge_length, element_size_mode="edgelength", exclude """ surface = dsa.WrapDataObject(surface) if cellEntityArrayName not in surface.CellData.keys(): - surface.CellData.append(np.zeros(surface.VTKObject.GetNumberOfCells()) + 1, cellEntityArrayName) + surface.CellData.append( + np.zeros(surface.VTKObject.GetNumberOfCells()) + 1, cellEntityArrayName + ) remeshing = vmtkscripts.vmtkSurfaceRemeshing() remeshing.Surface = surface.VTKObject @@ -1003,18 +1038,18 @@ def remesh_surface(surface, edge_length, element_size_mode="edgelength", exclude def dist_sphere_geodesic(surface, region_center, max_distance, save_path, edge_length): """ - Determines the target edge length for each cell on the surface, including - potential refinement or coarsening of certain user specified areas. - Level of refinement/coarseness is determined based on user selected region and the geodesic distance from it. - Args: - surface (vtkPolyData): Input surface model - region_center (list): Point representing region to refine - save_path (str): Location to store processed surface - edge_length (float): Target edge length - max_distance (float): Max distance of geodesic measure - Returns: - surface (vtkPolyData): Processed surface model with info on cell specific target edge length - """ + Determines the target edge length for each cell on the surface, including + potential refinement or coarsening of certain user specified areas. + Level of refinement/coarseness is determined based on user selected region and the geodesic distance from it. + Args: + surface (vtkPolyData): Input surface model + region_center (list): Point representing region to refine + save_path (str): Location to store processed surface + edge_length (float): Target edge length + max_distance (float): Max distance of geodesic measure + Returns: + surface (vtkPolyData): Processed surface model with info on cell specific target edge length + """ # Define refine region point ID locator = get_vtk_point_locator(surface) @@ -1040,7 +1075,7 @@ def dist_sphere_geodesic(surface, region_center, max_distance, save_path, edge_l dist_array = geodesic_distance.GetPointData().GetArray(dijkstraArrayName) for i in range(N): dist = dist_array.GetComponent(i, 0) / max_distance - newDist = 1 / 3 * edge_length * (1 + 2 * dist ** 3) + newDist = 1 / 3 * edge_length * (1 + 2 * dist**3) dist_array.SetComponent(i, 0, newDist) # Set element size based on geodesic distance diff --git a/src/vampy/automatedPreprocessing/repair_tools.py b/src/vampy/automatedPreprocessing/repair_tools.py index f1444419..2adf6822 100755 --- a/src/vampy/automatedPreprocessing/repair_tools.py +++ b/src/vampy/automatedPreprocessing/repair_tools.py @@ -36,7 +36,9 @@ def find_and_delete_nan_triangles(surface): killThisTriangle = False nPointsForThisCell = surface.GetCell(i).GetPoints().GetNumberOfPoints() if nPointsForThisCell > 3: - print("> WARNING: found Cell with more than 3 points: there is more than triangles.") + print( + "> WARNING: found Cell with more than 3 points: there is more than triangles." + ) for j in range(0, nPointsForThisCell): x = [0.0, 0.0, 0.0] surface.GetCell(i).GetPoints().GetPoint(j, x) @@ -77,4 +79,4 @@ def clean_surface(surface): print("> Done.") print("> ") - return (outputPolyData) + return outputPolyData diff --git a/src/vampy/automatedPreprocessing/visualize.py b/src/vampy/automatedPreprocessing/visualize.py index 6d539a78..0b3f820f 100644 --- a/src/vampy/automatedPreprocessing/visualize.py +++ b/src/vampy/automatedPreprocessing/visualize.py @@ -5,7 +5,9 @@ version = vtk.vtkVersion().GetVTKMajorVersion() -def visualize_model(network_elements, probe_points, output_surface, mean_inflow_rate): # pragma: no cover +def visualize_model( + network_elements, probe_points, output_surface, mean_inflow_rate +): # pragma: no cover """ Visualize surface model / mesh with distributed flow rates at each inlet/outlet in an interactive VTK window. diff --git a/src/vampy/automatedPreprocessing/vmtk_pointselector.py b/src/vampy/automatedPreprocessing/vmtk_pointselector.py index a2872a71..fa728b16 100644 --- a/src/vampy/automatedPreprocessing/vmtk_pointselector.py +++ b/src/vampy/automatedPreprocessing/vmtk_pointselector.py @@ -16,7 +16,7 @@ def __init__(self, guiText=""): self.text.SetDisplayPosition(10, 2) -class vmtkSeedSelector(): # pragma: no cover +class vmtkSeedSelector: # pragma: no cover def __init__(self): self._Surface = None self._SeedIds = None diff --git a/src/vampy/simulation/Artery.py b/src/vampy/simulation/Artery.py index 7899c5f0..ee477828 100755 --- a/src/vampy/simulation/Artery.py +++ b/src/vampy/simulation/Artery.py @@ -4,16 +4,24 @@ from pprint import pprint import numpy as np -from dolfin import set_log_level, MPI +from dolfin import MPI, set_log_level from vampy.simulation.Probe import Probes # type: ignore -from vampy.simulation.Womersley import make_womersley_bcs, compute_boundary_geometry_acrn -from vampy.simulation.simulation_common import get_file_paths, store_u_mean, print_mesh_information, \ - store_velocity_and_pressure_h5, dump_probes +from vampy.simulation.simulation_common import ( + dump_probes, + get_file_paths, + print_mesh_information, + store_u_mean, + store_velocity_and_pressure_h5, +) +from vampy.simulation.Womersley import ( + compute_boundary_geometry_acrn, + make_womersley_bcs, +) # Check for oasis and oasismove -package_name_oasis = 'oasis' -package_name_oasismove = 'oasismove' +package_name_oasis = "oasis" +package_name_oasismove = "oasismove" oasis_exists = importlib.util.find_spec(package_name_oasis) oasismove_exists = importlib.util.find_spec(package_name_oasismove) if oasismove_exists: @@ -140,11 +148,14 @@ def create_bcs( Q_mean * Q_ ) # Specific flow rate = Normalized flow wave form * Prescribed flow rate t_values *= 1000 # Scale time in normalised flow wave form to [ms] - _, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn(mesh, id_in[0], boundary) + _, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn( + mesh, id_in[0], boundary + ) # Create Womersley boundary condition at inlet - inlet = make_womersley_bcs(t_values, Q_values, nu, tmp_center, tmp_radius, tmp_normal, - V.ufl_element()) + inlet = make_womersley_bcs( + t_values, Q_values, nu, tmp_center, tmp_radius, tmp_normal, V.ufl_element() + ) NS_expressions["inlet"] = inlet # Initialize inlet expressions with initial time @@ -169,7 +180,9 @@ def create_bcs( bc_p.append(bc) NS_expressions[ID] = outflow if MPI.rank(comm) == 0: - print(f"Boundary ID={ID}, pressure: {p_initial:.5f}, area fraction: {area_ratio[i]:0.5f}") + print( + f"Boundary ID={ID}, pressure: {p_initial:.5f}, area fraction: {area_ratio[i]:0.5f}" + ) # No slip condition at wall wall = Constant(0.0) @@ -208,7 +221,7 @@ def pre_solve_hook( n = FacetNormal(mesh) eval_dict = {} rel_path = mesh_path.split(".xml")[0] + "_probe_point.json" - with open(rel_path, 'r') as infile: + with open(rel_path, "r") as infile: probe_points = np.array(json.load(infile)) # Store points file in checkpoint @@ -298,11 +311,15 @@ def temporal_hook( diam_inlet = np.sqrt(4 * area_inlet[0] / np.pi) Re = U_mean * diam_inlet / nu print("=" * 10, "Time step " + str(tstep), "=" * 10) - print(f"Sum of Q_out = {sum(Q_outs):.4f}, Q_in = {Q_in:.4f}, mean velocity (inlet): {U_mean:.4f}, " + - f"Reynolds number (inlet): {Re:.4f}") + print( + f"Sum of Q_out = {sum(Q_outs):.4f}, Q_in = {Q_in:.4f}, mean velocity (inlet): {U_mean:.4f}, " + + f"Reynolds number (inlet): {Re:.4f}" + ) for i, out_id in enumerate(id_out): - print(f"For outlet with boundary ID={out_id}: target flow rate: {Q_ideals[i]:.4f} mL/s, " + - f"computed flow rate: {Q_outs[i]:.4f} mL/s, pressure updated to: {NS_expressions[out_id].p:.4f}") + print( + f"For outlet with boundary ID={out_id}: target flow rate: {Q_ideals[i]:.4f} mL/s, " + + f"computed flow rate: {Q_outs[i]:.4f} mL/s, pressure updated to: {NS_expressions[out_id].p:.4f}" + ) print() # Sample velocity and pressure in points/probes @@ -317,7 +334,9 @@ def temporal_hook( # Save velocity and pressure for post-processing if tstep % save_solution_frequency == 0 and tstep >= save_solution_at_tstep: - store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2) + store_velocity_and_pressure_h5( + NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2 + ) # Oasis hook called after the simulation has finished diff --git a/src/vampy/simulation/Atrium.py b/src/vampy/simulation/Atrium.py index 29c48574..5d5c29c8 100644 --- a/src/vampy/simulation/Atrium.py +++ b/src/vampy/simulation/Atrium.py @@ -4,16 +4,24 @@ from pprint import pprint import numpy as np -from dolfin import set_log_level, MPI +from dolfin import MPI, set_log_level from vampy.simulation.Probe import Probes # type: ignore -from vampy.simulation.Womersley import make_womersley_bcs, compute_boundary_geometry_acrn -from vampy.simulation.simulation_common import store_u_mean, get_file_paths, print_mesh_information, \ - store_velocity_and_pressure_h5, dump_probes +from vampy.simulation.simulation_common import ( + dump_probes, + get_file_paths, + print_mesh_information, + store_u_mean, + store_velocity_and_pressure_h5, +) +from vampy.simulation.Womersley import ( + compute_boundary_geometry_acrn, + make_womersley_bcs, +) # Check for oasis and oasismove -package_name_oasis = 'oasis' -package_name_oasismove = 'oasismove' +package_name_oasis = "oasis" +package_name_oasismove = "oasismove" oasis_exists = importlib.util.find_spec(package_name_oasis) oasismove_exists = importlib.util.find_spec(package_name_oasismove) if oasismove_exists: @@ -34,7 +42,7 @@ def problem_parameters( scalar_components, Schmidt, NS_expressions, - **NS_namespace + **NS_namespace, ): """ Problem file for running CFD simulation in left atrial models consisting of arbitrary number of pulmonary veins (PV) @@ -143,7 +151,9 @@ def create_bcs( Q_scaled = Q_values * tmp_area / area_total # Create Womersley boundary condition at inlet - inlet = make_womersley_bcs(t_values, Q_scaled, nu, tmp_center, tmp_radius, tmp_normal, V.ufl_element()) + inlet = make_womersley_bcs( + t_values, Q_scaled, nu, tmp_center, tmp_radius, tmp_normal, V.ufl_element() + ) NS_expressions[f"inlet_{ID}"] = inlet # Initial condition @@ -154,7 +164,10 @@ def create_bcs( # Create inlet boundary conditions bc_inlets = {} for ID in id_in: - bc_inlet = [DirichletBC(V, NS_expressions[f"inlet_{ID}"][i], boundary, ID) for i in range(3)] + bc_inlet = [ + DirichletBC(V, NS_expressions[f"inlet_{ID}"][i], boundary, ID) + for i in range(3) + ] bc_inlets[ID] = bc_inlet # Set outlet boundary conditions, assuming one outlet (Mitral Valve) @@ -182,7 +195,7 @@ def pre_solve_hook( newfolder, velocity_degree, restart_folder, - **NS_namespace + **NS_namespace, ): # Extract diameter at mitral valve info_path = mesh_path.split(".xml")[0] + "_info.json" @@ -196,7 +209,7 @@ def pre_solve_hook( n = FacetNormal(mesh) eval_dict = {} rel_path = mesh_path.split(".xml")[0] + "_probe_point.json" - with open(rel_path, 'r') as infile: + with open(rel_path, "r") as infile: probe_points = np.array(json.load(infile)) # Store points file in checkpoint @@ -263,7 +276,7 @@ def temporal_hook( u_mean0, u_mean1, u_mean2, - **NS_namespace + **NS_namespace, ): # Update inlet condition for ID in id_in: @@ -313,7 +326,9 @@ def temporal_hook( # Save velocity and pressure for post-processing if tstep % save_solution_frequency == 0 and tstep >= save_solution_at_tstep: - store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2) + store_velocity_and_pressure_h5( + NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2 + ) # Oasis hook called after the simulation has finished @@ -326,7 +341,7 @@ def theend_hook( dt, save_solution_at_tstep, save_solution_frequency, - **NS_namespace + **NS_namespace, ): store_u_mean( T, diff --git a/src/vampy/simulation/MovingAtrium.py b/src/vampy/simulation/MovingAtrium.py index f1fc69e0..687642e7 100644 --- a/src/vampy/simulation/MovingAtrium.py +++ b/src/vampy/simulation/MovingAtrium.py @@ -1,23 +1,33 @@ import json import pickle from os import makedirs -from dolfin import set_log_level, UserExpression +from dolfin import UserExpression, set_log_level from oasismove.problems.NSfracStep import * from oasismove.problems.NSfracStep.MovingCommon import get_visualization_writers -from scipy.interpolate import splrep, splev +from scipy.interpolate import splev, splrep from scipy.spatial import KDTree from vampy.simulation.Probe import Probes # type: ignore -from vampy.simulation.Womersley import make_womersley_bcs, compute_boundary_geometry_acrn -from vampy.simulation.simulation_common import store_u_mean, get_file_paths, print_mesh_information, \ - store_velocity_and_pressure_h5, dump_probes +from vampy.simulation.simulation_common import ( + dump_probes, + get_file_paths, + print_mesh_information, + store_u_mean, + store_velocity_and_pressure_h5, +) +from vampy.simulation.Womersley import ( + compute_boundary_geometry_acrn, + make_womersley_bcs, +) # FEniCS specific command to control the desired level of logging, here set to critical errors set_log_level(50) -def problem_parameters(commandline_kwargs, scalar_components, NS_parameters, **NS_namespace): +def problem_parameters( + commandline_kwargs, scalar_components, NS_parameters, **NS_namespace +): """ Problem file for running CFD simulation in left atrial models consisting of arbitrary number of pulmonary veins (PV) (normally 3 to 7), and one outlet being the mitral valve. A Womersley velocity profile is applied at the inlets, @@ -36,10 +46,10 @@ def problem_parameters(commandline_kwargs, scalar_components, NS_parameters, **N if "restart_folder" in commandline_kwargs.keys(): restart_folder = commandline_kwargs["restart_folder"] mesh_path = commandline_kwargs["mesh_path"] - f = open(path.join(restart_folder, 'params.dat'), 'rb') + f = open(path.join(restart_folder, "params.dat"), "rb") NS_parameters.update(pickle.load(f)) - NS_parameters['restart_folder'] = restart_folder - NS_parameters['mesh_path'] = mesh_path + NS_parameters["restart_folder"] = restart_folder + NS_parameters["mesh_path"] = mesh_path globals().update(NS_parameters) else: # Override some problem specific parameters @@ -84,10 +94,12 @@ def problem_parameters(commandline_kwargs, scalar_components, NS_parameters, **N velocity_degree=1, pressure_degree=1, use_krylov_solvers=True, - krylov_solvers={'monitor_convergence': False, - 'report': False, - 'relative_tolerance': 1e-8, - 'absolute_tolerance': 1e-8} + krylov_solvers={ + "monitor_convergence": False, + "report": False, + "relative_tolerance": 1e-8, + "absolute_tolerance": 1e-8, + }, ) if track_blood: @@ -137,7 +149,7 @@ def eval(self, _, x): self.counter += 1 _, index = self.tree.query(x) # FIXME: Set spline parameter objectively - s = 1E-2 + s = 1e-2 x_ = splrep(self.time, self.points[index, 0, :], s=s, per=True) y_ = splrep(self.time, self.points[index, 1, :], s=s, per=True) z_ = splrep(self.time, self.points[index, 2, :], s=s, per=True) @@ -158,20 +170,37 @@ def __init__(self, t, motion, max_counter, direction, cycle, **kwargs): def eval(self, values, _): self.counter += 1 # No motion for inlet/outlet - values[:] = splev(self.t % self.cycle, self.motion[self.counter][self.direction], der=1) + values[:] = splev( + self.t % self.cycle, self.motion[self.counter][self.direction], der=1 + ) if self.counter == self.max_counter: self.counter = -1 -def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, mesh, mesh_path, nu, t, - V, Q, id_in, id_out, track_blood, **NS_namespace): +def create_bcs( + NS_expressions, + dynamic_mesh, + x_, + cardiac_cycle, + backflow_facets, + mesh, + mesh_path, + nu, + t, + V, + Q, + id_in, + id_out, + track_blood, + **NS_namespace, +): if mesh_path.endswith(".xml") or mesh_path.endswith(".xml.gz"): mesh_filename = ".xml" elif mesh_path.endswith(".h5"): mesh_filename = ".h5" rank = MPI.rank(MPI.comm_world) - coords = ['x', 'y', 'z'] + coords = ["x", "y", "z"] # Variables needed during the simulation boundary = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1, mesh.domains()) if mesh_path.endswith(".h5"): @@ -183,12 +212,12 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, with open(info_path) as f: info = json.load(f) - id_in[:] = info['inlet_ids'] - id_out[:] = info['outlet_id'] + id_in[:] = info["inlet_ids"] + id_out[:] = info["outlet_id"] id_wall = min(id_in + id_out) - 1 # Apply backflow at the outlet (Mitral valve) - backflow_facets[:] = info['outlet_id'] + backflow_facets[:] = info["outlet_id"] # Find corresponding areas ds_new = Measure("ds", domain=mesh, subdomain_data=boundary) @@ -205,18 +234,22 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, # Use default flow rate flow_rate_path = path.join(path.dirname(path.abspath(__file__)), "PV_values") # Load normalized time and flow rate values - Q_mean = info['mean_flow_rate'] + Q_mean = info["mean_flow_rate"] t_values, Q_ = np.loadtxt(flow_rate_path).T Q_ *= Q_mean t_values *= 1000 # Scale time in normalised flow wave form to [ms] for ID in id_in: - tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn(mesh, ID, boundary) + tmp_area, tmp_center, tmp_radius, tmp_normal = compute_boundary_geometry_acrn( + mesh, ID, boundary + ) Q_scaled = Q_ * tmp_area / area_total # Create Womersley boundary condition at inlets - inlet = make_womersley_bcs(t_values, Q_scaled, nu, tmp_center, tmp_radius, tmp_normal, V.ufl_element()) + inlet = make_womersley_bcs( + t_values, Q_scaled, nu, tmp_center, tmp_radius, tmp_normal, V.ufl_element() + ) NS_expressions[f"inlet_{ID}"] = inlet # Initial condition @@ -227,7 +260,10 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, # Create inlet boundary conditions bc_inlets = {} for ID in id_in: - bc_inlet = [DirichletBC(V, NS_expressions[f"inlet_{ID}"][i], boundary, ID) for i in range(3)] + bc_inlet = [ + DirichletBC(V, NS_expressions[f"inlet_{ID}"][i], boundary, ID) + for i in range(3) + ] bc_inlets[ID] = bc_inlet # Set outlet boundary conditions, assuming one outlet (Mitral Valve) @@ -238,12 +274,16 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, # Moving walls if rank == 0: print("Loading displacement points") - points = np.load(mesh_path.split(mesh_filename)[0] + "_points.np", allow_pickle=True) + points = np.load( + mesh_path.split(mesh_filename)[0] + "_points.np", allow_pickle=True + ) if rank == 0: print("Creating splines for displacement") # Define wall movement - motion_mapping = MeshMotionMapping(points, cardiac_cycle, element=V.ufl_element()) + motion_mapping = MeshMotionMapping( + points, cardiac_cycle, element=V.ufl_element() + ) bc_tmp = DirichletBC(V, motion_mapping, boundary, id_wall) bc_tmp.apply(x_["u0"]) x_["u0"].zero() @@ -259,10 +299,20 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, print("Creating wall boundary conditions") for i, coord in enumerate(coords): - wall_ = WallMotion(t, wall_motion, wall_counter_max, i, cardiac_cycle, element=V.ufl_element()) + wall_ = WallMotion( + t, + wall_motion, + wall_counter_max, + i, + cardiac_cycle, + element=V.ufl_element(), + ) NS_expressions["wall_%s" % coord] = wall_ - bc_wall = [DirichletBC(V, NS_expressions["wall_%s" % coord], boundary, id_wall) for coord in coords] + bc_wall = [ + DirichletBC(V, NS_expressions["wall_%s" % coord], boundary, id_wall) + for coord in coords + ] else: # No slip on walls bc_wall = [DirichletBC(V, Constant(0.0), boundary, id_wall)] * len(coords) @@ -280,9 +330,23 @@ def create_bcs(NS_expressions, dynamic_mesh, x_, cardiac_cycle, backflow_facets, return dict(u0=bc_u0, u1=bc_u1, u2=bc_u2, p=bc_p) -def pre_solve_hook(u_components, id_in, id_out, dynamic_mesh, V, Q, cardiac_cycle, dt, - save_solution_after_cycle, mesh_path, mesh, newfolder, velocity_degree, - restart_folder, **NS_namespace): +def pre_solve_hook( + u_components, + id_in, + id_out, + dynamic_mesh, + V, + Q, + cardiac_cycle, + dt, + save_solution_after_cycle, + mesh_path, + mesh, + newfolder, + velocity_degree, + restart_folder, + **NS_namespace, +): id_wall = min(id_in + id_out) - 1 # Extract diameter at mitral valve if mesh_path.endswith(".xml") or mesh_path.endswith(".xml.gz"): @@ -294,14 +358,14 @@ def pre_solve_hook(u_components, id_in, id_out, dynamic_mesh, V, Q, cardiac_cycl with open(info_path) as f: info = json.load(f) - outlet_area = info['outlet_area'] + outlet_area = info["outlet_area"] D_mitral = np.sqrt(4 * outlet_area / np.pi) # Create point for evaluation n = FacetNormal(mesh) eval_dict = {} rel_path = mesh_path.split(mesh_filename)[0] + "_probe_point.json" - with open(rel_path, 'r') as infile: + with open(rel_path, "r") as infile: probe_points = np.array(json.load(infile)) # Store points file in checkpoint @@ -315,7 +379,7 @@ def pre_solve_hook(u_components, id_in, id_out, dynamic_mesh, V, Q, cardiac_cycl if restart_folder is None: # Get files to store results - files = get_file_paths(newfolder, ['brt', 'd']) + files = get_file_paths(newfolder, ["brt", "d"]) NS_parameters.update(dict(files=files)) else: files = NS_namespace["files"] @@ -331,7 +395,7 @@ def pre_solve_hook(u_components, id_in, id_out, dynamic_mesh, V, Q, cardiac_cycl makedirs(probes_folder) # Define xdmf writers - viz_U, viz_blood = get_visualization_writers(newfolder, ['velocity', 'blood']) + viz_U, viz_blood = get_visualization_writers(newfolder, ["velocity", "blood"]) # Extract dof map and coordinates coordinates = mesh.coordinates() @@ -374,10 +438,27 @@ def pre_solve_hook(u_components, id_in, id_out, dynamic_mesh, V, Q, cardiac_cycl else: bc_mesh = {} - return dict(outlet_area=outlet_area, id_wall=id_wall, D_mitral=D_mitral, n=n, eval_dict=eval_dict, U=U, D=D, - u_mean=u_mean, u_mean0=u_mean0, u_mean1=u_mean1, u_mean2=u_mean2, boundary=boundary, bc_mesh=bc_mesh, - coordinates=coordinates, viz_U=viz_U, save_solution_at_tstep=save_solution_at_tstep, dof_map=dof_map, - probes_folder=probes_folder, viz_blood=viz_blood) + return dict( + outlet_area=outlet_area, + id_wall=id_wall, + D_mitral=D_mitral, + n=n, + eval_dict=eval_dict, + U=U, + D=D, + u_mean=u_mean, + u_mean0=u_mean0, + u_mean1=u_mean1, + u_mean2=u_mean2, + boundary=boundary, + bc_mesh=bc_mesh, + coordinates=coordinates, + viz_U=viz_U, + save_solution_at_tstep=save_solution_at_tstep, + dof_map=dof_map, + probes_folder=probes_folder, + viz_blood=viz_blood, + ) def update_boundary_conditions(t, dynamic_mesh, NS_expressions, id_in, **NS_namespace): @@ -392,10 +473,42 @@ def update_boundary_conditions(t, dynamic_mesh, NS_expressions, id_in, **NS_name NS_expressions[f"wall_{coord}"].t = t -def temporal_hook(mesh, id_wall, id_out, cardiac_cycle, dt, t, save_solution_frequency, u_, id_in, tstep, newfolder, - eval_dict, dump_probe_frequency, p_, save_solution_at_tstep, nu, D_mitral, U, u_mean0, u_mean1, - u_mean2, save_flow_metrics_frequency, save_volume_frequency, save_solution_frequency_xdmf, viz_U, - boundary, outlet_area, q_, w_, viz_blood, track_blood, D, du_, **NS_namespace): +def temporal_hook( + mesh, + id_wall, + id_out, + cardiac_cycle, + dt, + t, + save_solution_frequency, + u_, + id_in, + tstep, + newfolder, + eval_dict, + dump_probe_frequency, + p_, + save_solution_at_tstep, + nu, + D_mitral, + U, + u_mean0, + u_mean1, + u_mean2, + save_flow_metrics_frequency, + save_volume_frequency, + save_solution_frequency_xdmf, + viz_U, + boundary, + outlet_area, + q_, + w_, + viz_blood, + track_blood, + D, + du_, + **NS_namespace, +): if tstep % save_volume_frequency == 0: compute_volume(mesh, t, newfolder) @@ -405,12 +518,29 @@ def temporal_hook(mesh, id_wall, id_out, cardiac_cycle, dt, t, save_solution_fre viz_U.write(U, t) if track_blood: - viz_blood.write(q_['blood'], t) + viz_blood.write(q_["blood"], t) if tstep % save_flow_metrics_frequency == 0: h = mesh.hmin() - compute_flow_quantities(u_, D_mitral, nu, mesh, t, tstep, dt, h, outlet_area, boundary, id_out, id_in, id_wall, - period=cardiac_cycle, newfolder=newfolder, dynamic_mesh=False, write_to_file=True) + compute_flow_quantities( + u_, + D_mitral, + nu, + mesh, + t, + tstep, + dt, + h, + outlet_area, + boundary, + id_out, + id_in, + id_wall, + period=cardiac_cycle, + newfolder=newfolder, + dynamic_mesh=False, + write_to_file=True, + ) # Sample velocity and pressure in points/probes eval_dict["centerline_u_x_probes"](u_[0]) @@ -424,11 +554,31 @@ def temporal_hook(mesh, id_wall, id_out, cardiac_cycle, dt, t, save_solution_fre # Save velocity and pressure for post-processing if tstep % save_solution_frequency == 0 and tstep >= save_solution_at_tstep: - store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2, D, du_) + store_velocity_and_pressure_h5( + NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2, D, du_ + ) # Oasis hook called after the simulation has finished -def theend_hook(u_mean, u_mean0, u_mean1, u_mean2, T, dt, save_solution_at_tstep, save_solution_frequency, - **NS_namespace): - store_u_mean(T, dt, save_solution_at_tstep, save_solution_frequency, u_mean, u_mean0, u_mean1, u_mean2, - NS_parameters) +def theend_hook( + u_mean, + u_mean0, + u_mean1, + u_mean2, + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + **NS_namespace, +): + store_u_mean( + T, + dt, + save_solution_at_tstep, + save_solution_frequency, + u_mean, + u_mean0, + u_mean1, + u_mean2, + NS_parameters, + ) diff --git a/src/vampy/simulation/Womersley.py b/src/vampy/simulation/Womersley.py index c4da6705..5f7f03a5 100755 --- a/src/vampy/simulation/Womersley.py +++ b/src/vampy/simulation/Womersley.py @@ -20,7 +20,14 @@ import math import numpy as np -from dolfin import UserExpression, assemble, Constant, FacetNormal, SpatialCoordinate, Measure +from dolfin import ( + Constant, + FacetNormal, + Measure, + SpatialCoordinate, + UserExpression, + assemble, +) from scipy.integrate import simpson from scipy.interpolate import UnivariateSpline from scipy.special import jn @@ -162,8 +169,8 @@ def _precompute_r_dependent_coeffs(self, y): pir2 = np.pi * self.radius**2 # Compute intermediate terms for womersley function r_dependent_coeffs = np.zeros(self.N, dtype=np.complex_) - if hasattr(self, 'Vn'): - r_dependent_coeffs[0] = self.Vn[0] * (1 - y ** 2) + if hasattr(self, "Vn"): + r_dependent_coeffs[0] = self.Vn[0] * (1 - y**2) for n in self.ns: jn0b = self.jn0_betas[n] r_dependent_coeffs[n] = ( @@ -210,8 +217,20 @@ def eval(self, value, x): value[0] = -self.normal_component * self.scale_value * wom -def make_womersley_bcs(t, Q, nu, center, radius, normal, element, coeffstype="Q", - N=1001, num_fourier_coefficients=20, Cn=None, **NS_namespace): +def make_womersley_bcs( + t, + Q, + nu, + center, + radius, + normal, + element, + coeffstype="Q", + N=1001, + num_fourier_coefficients=20, + Cn=None, + **NS_namespace +): """ Generate a list of expressions for the components of a Womersley profile. Users can specify either the flow rate or fourier coefficients of the flow rate @@ -233,7 +252,9 @@ def make_womersley_bcs(t, Q, nu, center, radius, normal, element, coeffstype="Q" # Compute fourier coefficients of transient profile timedisc = np.linspace(0, period, N) - Cn = fourier_coefficients(timedisc, transient_profile, period, num_fourier_coefficients) + Cn = fourier_coefficients( + timedisc, transient_profile, period, num_fourier_coefficients + ) # Create Expressions for each direction expressions = [] diff --git a/src/vampy/simulation/__init__.py b/src/vampy/simulation/__init__.py index 3e86bcde..9fc6aaca 100644 --- a/src/vampy/simulation/__init__.py +++ b/src/vampy/simulation/__init__.py @@ -1,16 +1,11 @@ try: - from . import Artery - from . import Atrium - from . import simulation_common - from . import Womersley - from . import Probe + from . import Artery, Atrium, Probe, Womersley, simulation_common except ModuleNotFoundError: print("WARNING: Oasis is not installed, running CFD is not available") try: - from . import simulation_common - from . import Womersley - from . import Probe - from . import MovingAtrium + from . import MovingAtrium, Probe, Womersley, simulation_common except ModuleNotFoundError: - print("WARNING: OasisMove is not installed, running moving domain CFD is not available") + print( + "WARNING: OasisMove is not installed, running moving domain CFD is not available" + ) diff --git a/src/vampy/simulation/simulation_common.py b/src/vampy/simulation/simulation_common.py index 9b9bcbb9..4f13e2b3 100644 --- a/src/vampy/simulation/simulation_common.py +++ b/src/vampy/simulation/simulation_common.py @@ -64,9 +64,15 @@ def print_mesh_information(mesh): if MPI.rank(MPI.comm_world) == 0: print("=== Mesh information ===") - print(f"X range: {min(xmin)} to {max(xmax)} (delta: {max(xmax) - min(xmin):.4f})") - print(f"Y range: {min(ymin)} to {max(ymax)} (delta: {max(ymax) - min(ymin):.4f})") - print(f"Z range: {min(zmin)} to {max(zmax)} (delta: {max(zmax) - min(zmin):.4f})") + print( + f"X range: {min(xmin)} to {max(xmax)} (delta: {max(xmax) - min(xmin):.4f})" + ) + print( + f"Y range: {min(ymin)} to {max(ymax)} (delta: {max(ymax) - min(ymin):.4f})" + ) + print( + f"Z range: {min(zmin)} to {max(zmax)} (delta: {max(zmax) - min(zmin):.4f})" + ) print(f"Number of cells: {sum(num_cells)}") print(f"Number of cells per processor: {int(np.mean(num_cells))}") print(f"Number of edges: {sum(num_edges)}") @@ -119,7 +125,9 @@ def store_u_mean( u_mean_file.write(u_mean, "u_mean") -def store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2, D=None, du_=None): +def store_velocity_and_pressure_h5( + NS_parameters, U, p_, tstep, u_, u_mean0, u_mean1, u_mean2, D=None, du_=None +): """ Store the velocity and pressure values to an HDF5 file. @@ -143,8 +151,8 @@ def store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_m assign(U.sub(i), u_[i]) # Get save paths - p_path = NS_parameters['files']['p'] - u_path = NS_parameters['files']['u'] + p_path = NS_parameters["files"]["p"] + u_path = NS_parameters["files"]["u"] file_mode = "w" if not path.exists(p_path) else "a" # Save pressure @@ -166,7 +174,7 @@ def store_velocity_and_pressure_h5(NS_parameters, U, p_, tstep, u_, u_mean0, u_m assign(D.sub(i), du_[i]) # Save path to deformation - d_path = NS_parameters['files']['d'] + d_path = NS_parameters["files"]["d"] # Save deformation with HDF5File(MPI.comm_world, d_path, file_mode=file_mode) as viz_d: @@ -193,7 +201,12 @@ def dump_probes(eval_dict, newfolder, tstep): makedirs(filepath) # Extract probe arrays for each variable - variables = ["centerline_u_x_probes", "centerline_u_y_probes", "centerline_u_z_probes", "centerline_p_probes"] + variables = [ + "centerline_u_x_probes", + "centerline_u_y_probes", + "centerline_u_z_probes", + "centerline_p_probes", + ] arrs = {var: eval_dict[var].array() for var in variables} # Dump stats on the master process @@ -202,7 +215,7 @@ def dump_probes(eval_dict, newfolder, tstep): "centerline_u_x_probes": f"u_x_{tstep}.probes", "centerline_u_y_probes": f"u_y_{tstep}.probes", "centerline_u_z_probes": f"u_z_{tstep}.probes", - "centerline_p_probes": f"p_{tstep}.probes" + "centerline_p_probes": f"p_{tstep}.probes", } for var, arr in arrs.items(): arr.dump(path.join(filepath, filenames[var])) diff --git a/tests/test_compute_velocity_and_pressure.py b/tests/test_compute_velocity_and_pressure.py index 9e7b4b5e..10981349 100644 --- a/tests/test_compute_velocity_and_pressure.py +++ b/tests/test_compute_velocity_and_pressure.py @@ -15,7 +15,9 @@ def test_compute_velocity_and_pressure(): step = 1 # Run post-processing - compute_velocity_and_pressure(results_path, dt, save_frequency, velocity_degree, pressure_degree, step) + compute_velocity_and_pressure( + results_path, dt, save_frequency, velocity_degree, pressure_degree, step + ) # Check that output files exist metric_names = ["velocity", "pressure"] diff --git a/tests/test_pre_processing.py b/tests/test_pre_processing.py index 4e28e717..817c6c07 100644 --- a/tests/test_pre_processing.py +++ b/tests/test_pre_processing.py @@ -54,15 +54,18 @@ def test_mesh_model_with_one_inlet_and_two_outlets(): model_path = "tests/test_data/artery/artery.stl" # Get default input parameters common_input = read_command_line(model_path) - common_input.update(dict(meshing_method="curvature", - smoothing_method="taubin", - refine_region=False, - coarsening_factor=1.9, - visualize=False, - compress_mesh=False, - outlet_flow_extension_length=1, - inlet_flow_extension_length=1 - )) + common_input.update( + dict( + meshing_method="curvature", + smoothing_method="taubin", + refine_region=False, + coarsening_factor=1.9, + visualize=False, + compress_mesh=False, + outlet_flow_extension_length=1, + inlet_flow_extension_length=1, + ) + ) # Run pre processing run_pre_processing(**common_input) @@ -90,14 +93,17 @@ def test_mesh_model_with_one_inlet_and_one_outlet(): model_path = "tests/test_data/vein/vein.stl" # Get default input parameters common_input = read_command_line(model_path) - common_input.update(dict(meshing_method="diameter", - refine_region=False, - coarsening_factor=1.3, - visualize=False, - compress_mesh=False, - outlet_flow_extension_length=.5, - inlet_flow_extension_length=.5 - )) + common_input.update( + dict( + meshing_method="diameter", + refine_region=False, + coarsening_factor=1.3, + visualize=False, + compress_mesh=False, + outlet_flow_extension_length=0.5, + inlet_flow_extension_length=0.5, + ) + ) # Run pre processing run_pre_processing(**common_input) @@ -127,16 +133,19 @@ def test_mesh_model_with_geodesic_meshing(): # Get default input parameters common_input = read_command_line(model_path) region_point = [33.6612, 32.8443, 40.9184] - common_input.update(dict(meshing_method="geodesic", - max_geodesic_distance=2.5, - edge_length=0.5, - refine_region=True, - region_points=region_point, - visualize=False, - compress_mesh=False, - outlet_flow_extension_length=1, - inlet_flow_extension_length=1 - )) + common_input.update( + dict( + meshing_method="geodesic", + max_geodesic_distance=2.5, + edge_length=0.5, + refine_region=True, + region_points=region_point, + visualize=False, + compress_mesh=False, + outlet_flow_extension_length=1, + inlet_flow_extension_length=1, + ) + ) # Run pre processing run_pre_processing(**common_input) diff --git a/tests/test_simulation.py b/tests/test_simulation.py index a3bc6e42..58ca9856 100644 --- a/tests/test_simulation.py +++ b/tests/test_simulation.py @@ -26,8 +26,19 @@ def test_artery_problem(num_processors, save_cwd): # Navigate to the simulation directory chdir("src/vampy/simulation") - cmd = ["mpirun", "-np", f"{num_processors}", "oasismove", "NSfracStep", "solver=IPCS_ABCN", f"T={T}", f"dt={dt}", - "problem=Artery", f"mesh_path={mesh_path}", "dump_probe_frequency=10"] + cmd = [ + "mpirun", + "-np", + f"{num_processors}", + "oasismove", + "NSfracStep", + "solver=IPCS_ABCN", + f"T={T}", + f"dt={dt}", + "problem=Artery", + f"mesh_path={mesh_path}", + "dump_probe_frequency=10", + ] # Run Oasis result = subprocess.run(cmd, capture_output=True, text=True) @@ -45,7 +56,7 @@ def test_artery_problem(num_processors, save_cwd): expected_pressure_1 = 0.38403 expected_pressure_2 = 0.61597 - tol = 1E-16 + tol = 1e-16 assert abs(pressures[0] - expected_pressure_1) < tol assert abs(pressures[1] - expected_pressure_2) < tol @@ -60,7 +71,7 @@ def test_artery_problem(num_processors, save_cwd): expected_flow_rate_2 = 0.7219 # Lower tolerance in parallel due to round off errors - tol = 1E-3 + tol = 1e-3 assert abs(flow_rates[0] - expected_flow_rate_1) < tol assert abs(flow_rates[1] - expected_flow_rate_2) < tol @@ -75,8 +86,15 @@ def test_atrium_problem(save_cwd): # Navigate to the simulation directory chdir("src/vampy/simulation") - cmd = ["oasismove", "NSfracStep", 'solver=IPCS_ABCN', f"T={T}", f"dt={dt}", "problem=Atrium", - f"mesh_path={mesh_path}"] + cmd = [ + "oasismove", + "NSfracStep", + "solver=IPCS_ABCN", + f"T={T}", + f"dt={dt}", + "problem=Atrium", + f"mesh_path={mesh_path}", + ] # Run Oasis result = subprocess.run(cmd, capture_output=True, text=True) @@ -93,7 +111,7 @@ def test_atrium_problem(save_cwd): expected_max_velocity = 0.378 expected_mean_velocity = 0.171 - tol = 1E-16 + tol = 1e-16 assert abs(velocities[0] - expected_max_velocity) < tol assert abs(velocities[1] - expected_mean_velocity) < tol @@ -108,8 +126,16 @@ def test_moving_atrium_problem(save_cwd): # Navigate to the simulation directory chdir("src/vampy/simulation") - cmd = ["oasismove", "NSfracStepMove", f"T={T}", f"dt={dt}", "problem=MovingAtrium", f"mesh_path={mesh_path}", - "save_flow_metrics_frequency=10", 'track_blood=False'] + cmd = [ + "oasismove", + "NSfracStepMove", + f"T={T}", + f"dt={dt}", + "problem=MovingAtrium", + f"mesh_path={mesh_path}", + "save_flow_metrics_frequency=10", + "track_blood=False", + ] # Run Oasis result = subprocess.run(cmd, capture_output=True, text=True) @@ -127,7 +153,7 @@ def test_moving_atrium_problem(save_cwd): expected_max_velocity = 0.388 expected_mean_velocity = 0.163 - tol = 1E-16 + tol = 1e-16 assert abs(velocities[0] - expected_max_velocity) < tol assert abs(velocities[1] - expected_mean_velocity) < tol From 4958e9387ee8b2c0a919e5add932edb007f84ce0 Mon Sep 17 00:00:00 2001 From: Henrik Kjedsberg Date: Wed, 9 Oct 2024 22:11:21 +0200 Subject: [PATCH 23/23] Update linting script --- linting.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linting.sh b/linting.sh index c00e8f5d..5453445c 100644 --- a/linting.sh +++ b/linting.sh @@ -1,7 +1,9 @@ #! /usr/bin/env bash +flake8 src tests + isort src tests black src tests -flake8 src tests \ No newline at end of file +mypy src tests