Commit 1834e91b authored by chuckie82's avatar chuckie82
Browse files

Merge branch 'development' of http://gitlab.osti.gov/mtip/spinifel into development

parents feb29ff5 0cf13878
......@@ -8,8 +8,8 @@ use_callmonitor = false
[data]
in_dir = "${CFS}/m2859/data/3iyf/clean"
out_dir = "${SCRATCH}/spinifel_output"
name = "3iyf_dp005_128x128pixels_500k.h5"
out_dir = "${SCRATCH}/spinifel_output/"
name = "3iyf_ariana_20k_hires.h5"
[detector]
shape = [1, 128, 128]
......@@ -21,7 +21,7 @@ ps_eb_nodes = 1
ps_srv_nodes = 0
[runtime]
N_images_per_rank = 1000
N_images_per_rank = 2500
use_cuda = false
use_cufinufft = false
use_cupy = false
......@@ -32,11 +32,11 @@ chk_convergence = false
devices_per_node = 1
[algorithm]
N_generations = 20
N_generations = 10
nER = 50
nHIO = 50
beta = 0.9
cutoff = 0.1
nHIO = 25
beta = 0.3
cutoff = 0.05
N_phase_loops = 10
N_clipping = 0
N_binning = 0
......
......@@ -23,28 +23,40 @@ pushd "${root_dir}/../spinifel/sequential/"
if [[ ${target} = "cgpu"* ]]
then
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
elif [[ ${target} = "perlmutter"* ]]
then
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
elif [[ ${target} = *"tulip"* || ${target} = *"jlse"* ]]
then
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
elif [[ ${target} = *"summit"* || ${target} = *"ascent"* ]]
then
nvcc -O3 -shared -std=c++11 ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
nvcc -O3 -shared -std=c++11 ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
elif [[ $(hostname --fqdn) = *".spock."* ]]
then
hipcc -O3 -shared -std=c++11 -fPIC --amdgpu-target=gfx908 -DUSE_HIP ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
hipcc -O3 -shared -std=c++11 -fPIC --amdgpu-target=gfx908 -DUSE_HIP ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
else
echo "Don't recognize this target/hostname: ${target}."
echo "Falling back to Intel-style system."
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching.cu -o pyCudaKNearestNeighbors${pybind11_suffix}
orientation_matching_sp.cu -o pyCudaKNearestNeighbors_SP${pybind11_suffix}
nvcc -O3 -shared -std=c++11 --compiler-options -fPIC ${pybind11_inclues} \
orientation_matching_dp.cu -o pyCudaKNearestNeighbors_DP${pybind11_suffix}
fi
popd
......
......@@ -29,10 +29,40 @@ if settings.use_cupy:
import cupy as xp
def forward(ugrid, H_, K_, L_, support, use_recip_sym, N):
"""
Compute the forward NUFFT: from a uniform to nonuniform set of points.
:param ugrid: 3d array with grid sampling
:param H_: H dimension of reciprocal space position to evaluate
:param K_: K dimension of reciprocal space position to evaluate
:param L_: L dimension of reciprocal space position to evaluate
:param support: 3d object support array
:param use_recip_sym: if True, discard imaginary component # name seems misleading
:return nuvect: Fourier transform of uvect sampled at nonuniform (H_, K_, L_)
"""
# make sure that points lie within permissible finufft domain
assert np.max(np.abs(np.array([H_, K_, L_]))) < 3*np.pi
# Check if recip symmetry is met
if use_recip_sym:
assert np.all(np.isreal(ugrid))
# Apply support if given, overwriting input array
if support is not None:
ugrid *= support
# Allocate space in memory and solve NUFFT
#nuvect = np.zeros(H_.shape, dtype=np.complex64)
#nfft.nufft3d2(H_, K_, L_, ugrid, out=nuvect, eps=1.0e-12, isign=-1)
nuvect = nufft_3d_t2(H_, K_, L_, ugrid, -1, 1e-12, N)
return nuvect
@profiler.intercept
@nvtx.annotate("autocorrelation.py", is_prefix=True)
def forward(ugrid, H_, K_, L_, support, M, N, recip_extent, use_recip_sym):
def forward_spinifel(ugrid, H_, K_, L_, support, M, N, recip_extent, use_recip_sym):
"""Apply the forward, NUFFT2- problem"""
# Ensure that H_, K_, and L_ have the same shape
......@@ -50,11 +80,42 @@ def forward(ugrid, H_, K_, L_, support, M, N, recip_extent, use_recip_sym):
return nuvect / M**3
def adjoint(nuvect, H_, K_, L_, M, use_recip_sym=True, support=None):
"""
Compute the adjoint NUFFT: from a nonuniform to uniform set of points.
The sign is set for this to be the inverse FT.
:param nuvect: flattened data vector sampled in nonuniform space
:param H_: H dimension of reciprocal space position
:param K_: K dimension of reciprocal space position
:param L_: L dimension of reciprocal space position
:param M: cubic length of desired output array
:param support: 3d object support array
:param use_recip_sym: if True, discard imaginary component # name seems misleading
:return ugrid: Fourier transform of nuvect, sampled on a uniform grid
"""
# make sure that points lie within permissible finufft domain
assert np.max(np.abs(np.array([H_, K_, L_]))) < 3*np.pi
# Allocating space in memory and sovling NUFFT
#ugrid = np.zeros((M,)*3, dtype=np.complex64)
#nfft.nufft3d1(H_, K_, L_, nuvect, out=ugrid, eps=1.0e-15, isign=1)
ugrid = nufft_3d_t1(H_, K_, L_, nuvect, 1, 1e-12, M, M, M)
# Apply support if given
if support is not None:
ugrid *= support
# Discard imaginary component
if use_recip_sym:
ugrid = ugrid.real
return ugrid / (M**3)
@profiler.intercept
@nvtx.annotate("autocorrelation.py", is_prefix=True)
def adjoint(nuvect, H_, K_, L_, support, M, recip_extent, use_recip_sym):
def adjoint_spinifel(nuvect, H_, K_, L_, support, M, recip_extent, use_recip_sym):
"""Apply the adjoint, NUFFT1+ problem"""
# Ensure that H_, K_, and L_ have the same shape
......@@ -89,10 +150,41 @@ def core_problem(uvect, H_, K_, L_, ac_support, weights, M, N,
uvect_ADA = ugrid_ADA.flatten()
return uvect_ADA
def core_problem_convolution(uvect, M, F_ugrid_conv_, M_ups, ac_support, use_recip_sym=True):
"""
Convolve data vector and input kernel of where data sample reciprocal
space in upsampled regime.
:param uvect: data vector on uniform grid, flattened
:param M: length of data vector along each axis
:param F_ugrid_conv_: Fourier transform of convolution sampling array
:param M_ups: length of data vector along each axis when upsampled
:param ac_support: 2d support object for autocorrelation
:param use_recip_sym: if True, discard imaginary componeent
:return ugrid_conv_out: convolution of uvect and F_ugrid_conv_, flattened
"""
if use_recip_sym:
assert np.all(np.isreal(uvect))
# Upsample
ugrid = uvect.reshape((M,)*3) * ac_support
ugrid_ups = np.zeros((M_ups,)*3, dtype=uvect.dtype)
ugrid_ups[:M, :M, :M] = ugrid
# Convolution = Fourier multiplication
F_ugrid_ups = np.fft.fftn(np.fft.ifftshift(ugrid_ups))
F_ugrid_conv_out_ups = F_ugrid_ups * F_ugrid_conv_
ugrid_conv_out_ups = np.fft.fftshift(np.fft.ifftn(F_ugrid_conv_out_ups))
# Downsample
ugrid_conv_out = ugrid_conv_out_ups[:M, :M, :M]
ugrid_conv_out *= ac_support
if use_recip_sym:
# Both ugrid_conv and ugrid are real, so their convolution
# should be real, but numerical errors accumulate in the
# imaginary part.
ugrid_conv_out = ugrid_conv_out.real
return ugrid_conv_out.flatten()
@nvtx.annotate("autocorrelation.py", is_prefix=True)
def core_problem_convolution(uvect, M, F_ugrid_conv_, M_ups, ac_support,
def core_problem_convolution_spinifel(uvect, M, F_ugrid_conv_, M_ups, ac_support,
use_reciprocal_symmetry):
if settings.use_cupy:
uvect = xp.asarray(uvect)
......
......@@ -34,12 +34,58 @@ def core_problem(comm, uvect, H_, K_, L_, ac_support, weights, M, N,
uvect_ADA = reduce_bcast(comm, uvect_ADA)
return uvect_ADA
@nvtx.annotate("mpi/autocorrelation.py", is_prefix=True)
def gen_F_antisupport(M):
"""
Generate an antisupport in Fourier space, which has zeros in the central
sphere and ones in the high-resolution corners.
:param M: length of the cubic antisupport volume
:return F_antisupport: volume that masks central region
"""
# generate "antisupport" -- this has zeros in central sphere, 1s outside
lu = np.linspace(-np.pi, np.pi, M)
Hu_, Ku_, Lu_ = np.meshgrid(lu, lu, lu, indexing='ij')
Qu_ = np.around(np.sqrt(Hu_**2 + Ku_**2 + Lu_**2), 4)
F_antisupport = Qu_ > np.pi
assert np.all(F_antisupport == F_antisupport[::-1, :, :])
assert np.all(F_antisupport == F_antisupport[:, ::-1, :])
assert np.all(F_antisupport == F_antisupport[:, :, ::-1])
assert np.all(F_antisupport == F_antisupport[::-1, ::-1, ::-1])
return F_antisupport
@nvtx.annotate("mpi/autocorrelation.py", is_prefix=True)
def fourier_reg(uvect, support, F_antisupport, M, use_recip_sym):
"""
Generate the flattened matrix component that penalizes noise in the outer
regions of reciprocal space, specifically outside the unit sphere of radius
pi, where H_max, K_max, and L_max have been normalized to equal pi.
:param uvect: data vector on uniform grid, flattened
:param support: 3d support object for autocorrelation
:param F_antisupport: support in Fourier space, unmasked at high frequencies
:param M: length of data vector along each axis
:param use_recip_sym: if True, discard imaginary component
:return uvect: convolution of uvect and F_antisupport, flattened
"""
ugrid = uvect.reshape((M,)*3) * support
if use_recip_sym:
assert np.all(np.isreal(ugrid))
F_ugrid = np.fft.fftn(np.fft.ifftshift(ugrid))
F_reg = F_ugrid * np.fft.ifftshift(F_antisupport)
reg = np.fft.fftshift(np.fft.ifftn(F_reg))
uvect = (reg * support).flatten()
if use_recip_sym:
uvect = uvect.real
return uvect
@nvtx.annotate("mpi/autocorrelation.py", is_prefix=True)
def setup_linops(comm, H, K, L, data,
ac_support, weights, x0,
M, N, reciprocal_extent,
rlambda,
M, Mtot, N, reciprocal_extent,
alambda, rlambda, flambda,
use_reciprocal_symmetry):
"""Define W and d parts of the W @ x = d problem.
......@@ -54,48 +100,47 @@ def setup_linops(comm, H, K, L, data,
b the data
x0 the initial guess (ac_estimate)
"""
H_ = H.flatten() / reciprocal_extent * np.pi / settings.oversampling
K_ = K.flatten() / reciprocal_extent * np.pi / settings.oversampling
L_ = L.flatten() / reciprocal_extent * np.pi / settings.oversampling
H_ = H.flatten() / reciprocal_extent * np.pi
K_ = K.flatten() / reciprocal_extent * np.pi
L_ = L.flatten() / reciprocal_extent * np.pi
F_antisupport = gen_F_antisupport(M)
# Using upsampled convolution technique instead of ADA
M_ups = settings.M_ups
M_ups = M * 2
ugrid_conv = autocorrelation.adjoint(
np.ones_like(data), H_, K_, L_, 1, M_ups,
reciprocal_extent, use_reciprocal_symmetry)
ugrid_conv = reduce_bcast(comm, ugrid_conv)
np.ones_like(data), H_, K_, L_, M_ups,
use_reciprocal_symmetry, support=None)
#ugrid_conv = reduce_bcast(comm, ugrid_conv)
F_ugrid_conv_ = np.fft.fftn(np.fft.ifftshift(ugrid_conv)) #/ M**3
def W_matvec(uvect):
"""Define W part of the W @ x = d problem."""
uvect_ADA = autocorrelation.core_problem_convolution(
uvect, M, F_ugrid_conv_, M_ups, ac_support, use_reciprocal_symmetry)
if False: # Debug/test -> make sure all cg are in sync (same lambdas)
uvect_ADA_old = core_problem(
comm, uvect, H_, K_, L_, ac_support, weights, M, N,
reciprocal_extent, use_reciprocal_symmetry)
assert np.allclose(uvect_ADA, uvect_ADA_old)
uvect = uvect_ADA + rlambda*uvect
uvect, M, F_ugrid_conv_, M_ups, ac_support)
uvect_FDF = fourier_reg(uvect, ac_support, F_antisupport, M, use_recip_sym=use_reciprocal_symmetry)
uvect = alambda*uvect_ADA + rlambda*uvect + flambda*uvect_FDF
return uvect
W = LinearOperator(
dtype=np.complex128,
shape=(M**3, M**3),
dtype=np.complex64,
shape=(Mtot, Mtot),
matvec=W_matvec)
nuvect_Db = (data * weights).astype(np.float64)
nuvect_Db = data * weights
uvect_ADb = autocorrelation.adjoint(
nuvect_Db, H_, K_, L_, ac_support, M,
reciprocal_extent, use_reciprocal_symmetry
).flatten()
nuvect_Db, H_, K_, L_, M, support=ac_support,
use_recip_sym=use_reciprocal_symmetry).flatten()
uvect_ADb = reduce_bcast(comm, uvect_ADb)
d = uvect_ADb + rlambda*x0
#uvect_ADb = reduce_bcast(comm, uvect_ADb)
if np.sum(np.isnan(uvect_ADb)) > 0:
print("Warning: nans in the adjoint calculation; intensities may be too large", flush=True)
d = alambda*uvect_ADb + rlambda*x0
return W, d
@nvtx.annotate("mpi/autocorrelation.py", is_prefix=True)
def solve_ac(generation,
pixel_position_reciprocal,
......@@ -106,10 +151,9 @@ def solve_ac(generation,
comm = MPI.COMM_WORLD
M = settings.M
Mtot = M**3
N_images = slices_.shape[0] # N images per rank
print('N_images =', N_images)
N = int(utils.prod(slices_.shape)) # N images per rank x number of pixels per image = number of pixels per rank
print('N =', N)
N = np.prod(slices_.shape) # N images per rank x number of pixels per image = number of pixels per rank
reciprocal_extent = pixel_distance_reciprocal.max()
use_reciprocal_symmetry = True
ref_rank = -1
......@@ -121,31 +165,24 @@ def solve_ac(generation,
H, K, L = autocorrelation.gen_nonuniform_positions(
orientations, pixel_position_reciprocal)
# norm(nuvect_Db)^2 = b_squared = b_1^2 + b_2^2 +....
b_squared = np.sum( np.linalg.norm( slices_.reshape(slices_.shape[0],-1) , axis=-1) **2)
b_squared = reduce_bcast(comm, b_squared)
data = slices_.flatten()
data = slices_.flatten().astype(np.float32)
# Set up ac
if ac_estimate is None:
ac_support = np.ones((M,)*3)
ac_support = np.fft.fftshift(np.fft.ifftn(np.fft.fftn(np.fft.ifftshift(ac_support)).real)).real
ac_estimate = np.zeros((M,)*3)
ac_estimate = np.fft.fftshift(np.fft.ifftn(np.fft.fftn(np.fft.ifftshift(ac_estimate)).real)).real
else:
ac_smoothed = gaussian_filter(ac_estimate, 0.5)
ac_support = (ac_smoothed > 1e-12).astype(np.float64)
ac_support = np.fft.fftshift(np.fft.ifftn(np.fft.fftn(np.fft.ifftshift(ac_support)).real)).real
ac_estimate = np.fft.fftshift(np.fft.ifftn(np.fft.fftn(np.fft.ifftshift(ac_estimate)).real)).real
ac_support = (ac_smoothed > 1e-12).astype(np.float)
ac_estimate *= ac_support
weights = np.ones(N)
weights = np.ones(N).astype(np.float32)
# Use scalable heuristic for regularization lambda
rlambda = np.logspace(-8, 8, comm.size)[comm.rank]
maxiter = settings.solve_ac_maxiter
alambda = 1
rlambda = Mtot/N * 2 **(comm.rank - comm.size/2)
flambda = 1e5 * pow(10, comm.rank - comm.size//2)
maxiter = 100
# Log central slice L~=0
if comm.rank == (2 if settings.use_psana else 0):
......@@ -165,105 +202,45 @@ def solve_ac(generation,
W, d = setup_linops(comm, H, K, L, data,
ac_support, weights, x0,
M, N, reciprocal_extent,
rlambda,
M, Mtot, N, reciprocal_extent,
alambda, rlambda, flambda,
use_reciprocal_symmetry)
# W_0 and d_0 are given by defining W and d with rlambda=0
W_0, d_0 = setup_linops(comm, H, K, L, data,
ac_support, weights, x0,
M, N, reciprocal_extent,
0,
use_reciprocal_symmetry)
ret, info = cg(W, d, x0=x0, maxiter=maxiter, callback=callback)
if info != 0:
print(f'WARNING: CG did not converge at rlambda = {rlambda}')
soln = (np.linalg.norm(ret-ac_estimate.flatten())**2).real # solution norm
resid = (np.dot(ret,W_0.matvec(ret)-2*d_0) + b_squared).real # residual norm
v1 = norm(ret)
v2 = norm(W*ret-d)
# Rank0 gathers rlambda, solution norm, residual norm from all ranks
summary = comm.gather((comm.rank, rlambda, info, soln, resid), root=0)
summary = comm.gather((comm.rank, rlambda, v1, v2), root=0)
print('summary =', summary)
if comm.rank == 0:
ranks, lambdas, infos, solns, resids = [np.array(el) for el in zip(*summary)]
converged_idx = [i for i, info in enumerate(infos) if info == 0]
ranks = np.array(ranks)[converged_idx]
lambdas = np.array(lambdas)[converged_idx]
solns = np.array(solns)[converged_idx]
resids = np.array([item for sublist in resids for item in sublist])[converged_idx]
print('ranks =', ranks)
print('lambdas =', lambdas)
print('solns =', solns)
print('resids =', resids)
ranks, lambdas, v1s, v2s = [np.array(el) for el in zip(*summary)]
if generation == 0:
# Expect non-convergence => weird results
# Heuristic: retain rank with highest rlambda and high solution norm
idx = solns >= np.mean(solns)
idx = v1s >= np.mean(v1s)
imax = np.argmax(lambdas[idx])
iref = np.arange(len(ranks), dtype=int)[idx][imax]
print('iref =', iref)
else:
# Heuristic: L-curve criterion
# Take corner of L-curve
valuePair = np.array([resids, solns]).T
valuePair = np.array(sorted(valuePair , key=lambda k: [k[0], k[1]])) # sort the corner candidates in increasing order
lambdas = np.sort(lambdas) # sort lambdas in increasing order
allCoord = np.log(valuePair) # coordinates of the loglog L-curve
nPoints = len(resids)
if nPoints == 0: print('WARNING: no converged solution')
firstPoint = allCoord[0]
lineVec = allCoord[-1] - allCoord[0]
lineVecNorm = lineVec / np.sqrt(np.sum(lineVec**2))
vecFromFirst = allCoord - firstPoint
scalarProduct = np.sum(vecFromFirst * numpy.matlib.repmat(lineVecNorm, nPoints, 1), axis=1)
vecFromFirstParallel = np.outer(scalarProduct, lineVecNorm)
vecToLine = vecFromFirst - vecFromFirstParallel
distToLine = np.sqrt(np.sum(vecToLine ** 2, axis=1))
iref = np.argmax(distToLine)
print('iref =', iref)
ref_rank = ranks[iref]
# Log L-curve plot
fig, axes = plt.subplots(figsize=(10.0, 24.0), nrows=3, ncols=1)
axes[0].loglog(lambdas, valuePair.T[1])
axes[0].loglog(lambdas[iref], valuePair[iref][1], "rD")
axes[0].set_xlabel("$\lambda$")
axes[0].set_ylabel("Solution norm $||x||_{2}$")
axes[1].loglog(lambdas, valuePair.T[0])
axes[1].loglog(lambdas[iref], valuePair[iref][0], "rD")
axes[1].set_xlabel("$\lambda$")
axes[1].set_ylabel("Residual norm $||Ax-b||_{2}$")
axes[2].loglog(valuePair.T[0], valuePair.T[1]) # L-curve
axes[2].loglog(valuePair[iref][0], valuePair[iref][1], "rD")
axes[2].set_xlabel("Residual norm $||Ax-b||_{2}$")
axes[2].set_ylabel("Solution norm $||x||_{2}$")
fig.tight_layout()
plt.savefig(settings.out_dir / f"summary_{generation}.png")
plt.close('all')
iref = np.argmin(v1s+v2s)
ref_rank = ranks[iref]
print(f"Keeping result from rank {ref_rank}: v1={v1s[iref]} and v2={v2s[iref]}", flush=True)
else:
ref_rank = -1
# Rank0 broadcasts rank with best autocorrelation
ref_rank = comm.bcast(ref_rank, root=0)
# Set up ac volume
ac = ret.reshape((M,)*3)
ac = np.ascontiguousarray(ac.real).astype(np.float64)
print(f"Rank {comm.rank} got AC in {callback.counter} iterations.", flush=True)
if use_reciprocal_symmetry:
assert np.all(np.isreal(ac))
ac = np.ascontiguousarray(ac.real)
# Log autocorrelation volume
image.show_volume(ac, settings.Mquat, f"autocorrelation_{generation}_{comm.rank}.png")
image.show_volume(ac, settings.Mquat, f"autocorrelation_{generation}_{comm.rank}.png")
# Rank[ref_rank] broadcasts its autocorrelation
print(f"Rank {comm.rank} got AC in {callback.counter} iterations.", flush=True)
comm.Bcast(ac, root=ref_rank)
if comm.rank == 0:
print(f"Keeping result from rank {ref_rank}.")
return ac
......@@ -65,7 +65,7 @@ def main():
N_generations = settings.N_generations
if settings.load_gen > 0: # Load input from previous generation
curr_gen = settings.load_gen + 1
curr_gen = settings.load_gen
print(f"Loading checkpoint: {checkpoint.generate_checkpoint_name(settings.out_dir, settings.load_gen, settings.tag_gen)}", flush=True)
myRes = checkpoint.load_checkpoint(settings.out_dir,
settings.load_gen,
......@@ -83,11 +83,25 @@ def main():
ac = solve_ac(
curr_gen, pixel_position_reciprocal, pixel_distance_reciprocal, slices_)
logger.log(f"AC recovered in {timer.lap():.2f}s.")
if comm.rank == 0:
myRes = {
'pixel_position_reciprocal': pixel_position_reciprocal,
'pixel_distance_reciprocal': pixel_distance_reciprocal,
'slices_': slices_,
'ac': ac
}
checkpoint.save_checkpoint(myRes, settings.out_dir, curr_gen, tag="solve_ac")
ac_phased, support_, rho_ = phase(curr_gen, ac)
logger.log(f"Problem phased in {timer.lap():.2f}s.")
if comm.rank == 0:
myRes = {
'ac': ac,
'ac_phased': ac_phased,
'support_': support_,
'rho_': rho_
}
checkpoint.save_checkpoint(myRes, settings.out_dir, curr_gen, tag="phase")
# Save electron density and intensity
rho = np.fft.ifftshift(rho_)
intensity = np.fft.ifftshift(np.abs(np.fft.fftshift(ac_phased)**2))
......@@ -99,8 +113,9 @@ def main():
# terminate the loop
cov_xy = 0
cov_delta = .05
curr_gen += 1
for generation in range(curr_gen, N_generations):
for generation in range(curr_gen, N_generations+1):
logger.log(f"#"*27)
logger.log(f"##### Generation {generation}/{N_generations} #####")
logger.log(f"#"*27)
......@@ -109,16 +124,47 @@ def main():
ac_phased, slices_,
pixel_position_reciprocal, pixel_distance_reciprocal)
logger.log(f"Orientations matched in {timer.lap():.2f}s.")
if comm.rank == 0:
myRes = {'ac_phased': ac_phased,
'slices_': slices_,
'pixel_position_reciprocal': pixel_position_reciprocal,
'pixel_distance_reciprocal': pixel_distance_reciprocal,
'orientations': orientations
}
checkpoint.save_checkpoint(myRes, settings.out_dir, generation, tag="match")
# Solve autocorrelation
ac = solve_ac(
generation, pixel_position_reciprocal, pixel_distance_reciprocal,
slices_, orientations, ac_phased)
logger.log(f"AC recovered in {timer.lap():.2f}s.")
if comm.rank == 0:
myRes = {
'pixel_position_reciprocal': pixel_position_reciprocal,
'pixel_distance_reciprocal': pixel_distance_reciprocal,
'slices_': slices_,
'orientations': orientations,
'ac_phased': ac_phased,
'ac': ac