diff --git a/tests/test_collective_monitor.py b/tests/test_collective_monitor.py new file mode 100644 index 00000000..81081a3e --- /dev/null +++ b/tests/test_collective_monitor.py @@ -0,0 +1,651 @@ +import xfields as xf +import xtrack as xt +from xobjects.test_helpers import for_all_test_contexts + +import json +import h5py + +import numpy as np + + +@for_all_test_contexts +def test_bunch_monitor_hdf5(test_context): + n_macroparticles = int(1e6) + zeta_range = (-1, 1) + + energy = 7e12 + + offs_x = 1e-3 + offs_px = 2e-3 + offs_y = 3e-3 + offs_py = 4e-3 + offs_zeta = 5e-3 + offs_delta = 6e-3 + + sigma_x = 1e-3 + sigma_px = 2e-3 + sigma_y = 3e-3 + sigma_py = 4e-3 + sigma_zeta = 5e-3 + sigma_delta = 6e-3 + + # base coordinates + x_coord = sigma_x * np.random.random(n_macroparticles) + offs_x + px_coord = sigma_px * np.random.random(n_macroparticles) + offs_px + y_coord = sigma_y * np.random.random(n_macroparticles) + offs_y + py_coord = sigma_py * np.random.random(n_macroparticles) + offs_py + zeta_coord = sigma_zeta * np.random.random(n_macroparticles) + offs_zeta + delta_coord = sigma_delta * np.random.random(n_macroparticles) + offs_delta + + bunch_spacing_zeta = 10 + + n_bunches = 3 + + # n_bunches bunches with n_macroparticles each with different coordinates + particles = xt.Particles( + _context=test_context, p0c=energy, + x=np.concatenate([x_coord * (bid + 1) for bid in range(n_bunches)]), + px=np.concatenate([px_coord * (bid + 1) for bid in range(n_bunches)]), + y=np.concatenate([y_coord * (bid + 1) for bid in range(n_bunches)]), + py=np.concatenate([py_coord * (bid + 1) for bid in range(n_bunches)]), + zeta=np.concatenate([zeta_coord * (bid + 1) - bunch_spacing_zeta * bid + for bid in range(n_bunches)]), + delta=np.concatenate( + [delta_coord * (bid + 1) for bid in range(n_bunches)]), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + # dummy filling scheme + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=True, + monitor_slices=False, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + filling_scheme=filling_scheme, + bunch_spacing_zeta=bunch_spacing_zeta, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * flush_data_every): + monitor.track(particles) + + with h5py.File(monitor._bunches_filename, 'r') as h5file: + for bid in bunch_numbers: + bunch = h5file[str(bid)] + print(bunch['mean_x'][:], np.mean(x_coord) * (bid + 1)) + assert np.allclose(bunch['mean_x'][:], + np.mean(x_coord) * (bid + 1)) + assert np.allclose(bunch['mean_px'][:], + np.mean(px_coord) * (bid + 1)) + assert np.allclose(bunch['mean_y'][:], + np.mean(y_coord) * (bid + 1)) + assert np.allclose(bunch['mean_py'][:], + np.mean(py_coord) * (bid + 1)) + assert np.allclose(bunch['mean_zeta'][:], + (np.mean(zeta_coord * (bid + 1) - + bunch_spacing_zeta * bid))) + assert np.allclose(bunch['mean_delta'][:], + np.mean(delta_coord) * (bid + 1)) + + assert np.allclose(bunch['sigma_x'][:], + np.std(x_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_px'][:], + np.std(px_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_y'][:], + np.std(y_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_py'][:], + np.std(py_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_zeta'][:], + np.std(zeta_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_delta'][:], + np.std(delta_coord) * (bid + 1)) + + epsn_x = np.sqrt( + np.linalg.det(np.cov(x_coord, px_coord))) * beta * gamma + epsn_y = np.sqrt( + np.linalg.det(np.cov(y_coord, py_coord))) * beta * gamma + epsn_zeta = np.sqrt( + np.linalg.det(np.cov(zeta_coord, delta_coord))) * beta * gamma + + assert np.allclose(bunch['epsn_x'][:], + epsn_x * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_y'][:], + epsn_y * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_zeta'][:], + epsn_zeta * (bid + 1) ** 2) + + assert np.allclose(bunch['num_particles'][:], n_macroparticles) + + +@for_all_test_contexts +def test_bunch_monitor_json(test_context): + n_macroparticles = int(1e6) + zeta_range = (-1, 1) + + energy = 7e12 + + offs_x = 1e-3 + offs_px = 2e-3 + offs_y = 3e-3 + offs_py = 4e-3 + offs_zeta = 5e-3 + offs_delta = 6e-3 + + sigma_x = 1e-3 + sigma_px = 2e-3 + sigma_y = 3e-3 + sigma_py = 4e-3 + sigma_zeta = 5e-3 + sigma_delta = 6e-3 + + # base coordinates + x_coord = sigma_x * np.random.random(n_macroparticles) + offs_x + px_coord = sigma_px * np.random.random(n_macroparticles) + offs_px + y_coord = sigma_y * np.random.random(n_macroparticles) + offs_y + py_coord = sigma_py * np.random.random(n_macroparticles) + offs_py + zeta_coord = sigma_zeta * np.random.random(n_macroparticles) + offs_zeta + delta_coord = sigma_delta * np.random.random(n_macroparticles) + offs_delta + + bunch_spacing_zeta = 10 + + n_bunches = 3 + + # n_bunches bunches with n_macroparticles each with different coordinates + particles = xt.Particles( + _context=test_context, p0c=energy, + x=np.concatenate([x_coord * (bid + 1) for bid in range(n_bunches)]), + px=np.concatenate([px_coord * (bid + 1) for bid in range(n_bunches)]), + y=np.concatenate([y_coord * (bid + 1) for bid in range(n_bunches)]), + py=np.concatenate([py_coord * (bid + 1) for bid in range(n_bunches)]), + zeta=np.concatenate([zeta_coord * (bid + 1) - bunch_spacing_zeta * bid + for bid in range(n_bunches)]), + delta=np.concatenate( + [delta_coord * (bid + 1) for bid in range(n_bunches)]), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + # dummy filling scheme + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + + n_turns = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=True, + monitor_slices=False, + monitor_particles=False, + flush_data_every=n_turns, + zeta_range=zeta_range, + filling_scheme=filling_scheme, + bunch_spacing_zeta=bunch_spacing_zeta, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * n_turns): + monitor.track(particles) + + with open(monitor._bunches_filename, 'r') as f: + bunches_dict = json.load(f) + + for bid in bunch_numbers: + bunch = bunches_dict[str(bid)] + assert np.allclose(bunch['mean_x'][:], (np.mean(x_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_px'][:], (np.mean(px_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_y'][:], (np.mean(y_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_py'][:], (np.mean(py_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_zeta'][:], + (np.mean(zeta_coord * (bid + 1) - + bunch_spacing_zeta * bid))) + assert np.allclose(bunch['mean_delta'][:], + np.mean(delta_coord) * (bid + 1)) + + assert np.allclose(bunch['sigma_x'][:], (np.std(x_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_px'][:], (np.std(px_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_y'][:], (np.std(y_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_py'][:], (np.std(py_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_zeta'][:], + (np.std(zeta_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_delta'][:], + (np.std(delta_coord) * (bid + 1))) + + epsn_x = np.sqrt( + np.linalg.det(np.cov(x_coord, px_coord))) * beta * gamma + epsn_y = np.sqrt( + np.linalg.det(np.cov(y_coord, py_coord))) * beta * gamma + epsn_zeta = np.sqrt(np.linalg.det(np.cov(zeta_coord, + delta_coord))) * beta * gamma + + assert np.allclose(bunch['epsn_x'][:], epsn_x * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_y'][:], epsn_y * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_zeta'][:], epsn_zeta * (bid + 1) ** 2) + + assert np.allclose(bunch['num_particles'][:], n_macroparticles) + + +@for_all_test_contexts +def test_slice_monitor_hdf5(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (0, 1) + + energy = 7e12 + + offs_x = 1 + offs_px = 2 + offs_y = 3 + offs_py = 4 + offs_delta = 5 + + sigma_x = 6 + sigma_px = 7 + sigma_y = 8 + sigma_py = 9 + sigma_delta = 10 + + # dummy filling scheme + n_bunches = 3 + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + bunch_spacing_zeta = 10 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=False, + monitor_slices=True, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + filling_scheme=filling_scheme, + ) + + particles = xt.Particles( + _context=test_context, + p0c=energy, + x=np.zeros(n_macroparticles), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + n_slice = 5 + + dzeta = monitor.slicer.dzeta + zeta_coord_base = np.random.random(n_macroparticles) + + x_coord_bunch = [] + px_coord_bunch = [] + y_coord_bunch = [] + py_coord_bunch = [] + zeta_coord_bunch = [] + delta_coord_bunch = [] + n_macroparticles_slice_bunch = [] + + for bid in range(n_bunches): + zeta_coord = zeta_coord_base * (bid + 1) - bid * bunch_spacing_zeta + + bin_min = monitor.slicer.zeta_centers[bid, n_slice] - dzeta / 2 + bin_max = monitor.slicer.zeta_centers[bid, n_slice] + dzeta / 2 + + slice_mask = np.logical_and(zeta_coord < bin_max, zeta_coord >= bin_min) + + n_macroparticles_slice = np.sum(slice_mask) + + x_coord = (sigma_x * np.random.random( + n_macroparticles_slice) + offs_x) * (bid + 1) + px_coord = (sigma_px * np.random.random( + n_macroparticles_slice) + offs_px) * (bid + 1) + y_coord = (sigma_y * np.random.random( + n_macroparticles_slice) + offs_y) * (bid + 1) + py_coord = (sigma_py * np.random.random( + n_macroparticles_slice) + offs_py) * (bid + 1) + delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + + offs_delta) * (bid + 1) + + x_coord_bunch.append(x_coord) + px_coord_bunch.append(px_coord) + y_coord_bunch.append(y_coord) + py_coord_bunch.append(py_coord) + delta_coord_bunch.append(delta_coord) + zeta_coord_bunch.append(zeta_coord[slice_mask]) + n_macroparticles_slice_bunch.append(n_macroparticles_slice) + + particles.x[slice_mask] = x_coord + particles.px[slice_mask] = px_coord + particles.y[slice_mask] = y_coord + particles.py[slice_mask] = py_coord + particles.zeta[slice_mask] = zeta_coord[slice_mask] + particles.delta[slice_mask] = delta_coord + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(flush_data_every * 2): + monitor.track(particles) + + with h5py.File(monitor._slices_filename, 'r') as h5file: + for bid in bunch_numbers: + slice_data = h5file[str(bid)] + print(x_coord_bunch[bid]) + assert np.allclose(slice_data['mean_x'][:, n_slice], + np.mean(x_coord_bunch[bid])) + assert np.allclose(slice_data['mean_px'][:, n_slice], + np.mean(px_coord_bunch[bid])) + assert np.allclose(slice_data['mean_y'][:, n_slice], + np.mean(y_coord_bunch[bid])) + assert np.allclose(slice_data['mean_py'][:, n_slice], + np.mean(py_coord_bunch[bid])) + assert np.allclose(slice_data['mean_zeta'][:, n_slice], + np.mean(zeta_coord_bunch[bid])) + assert np.allclose(slice_data['mean_delta'][:, n_slice], + np.mean(delta_coord_bunch[bid])) + + assert np.allclose(slice_data['sigma_x'][:, n_slice], + np.std(x_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_px'][:, n_slice], + np.std(px_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_y'][:, n_slice], + np.std(y_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_py'][:, n_slice], + np.std(py_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_zeta'][:, n_slice], + np.std(zeta_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_delta'][:, n_slice], + np.std(delta_coord_bunch[bid])) + + epsn_x = (np.sqrt(np.var(x_coord_bunch[bid]) * + np.var(px_coord_bunch[bid]) - + np.cov(x_coord_bunch[bid], px_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_y = (np.sqrt(np.var(y_coord_bunch[bid]) * + np.var(py_coord_bunch[bid]) - + np.cov(y_coord_bunch[bid], py_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_zeta = (np.sqrt(np.var(zeta_coord_bunch[bid]) * + np.var(delta_coord_bunch[bid]) - + np.cov(zeta_coord_bunch[bid], + delta_coord_bunch[bid])[ + 0, 1]) * beta * gamma) + + assert np.allclose(slice_data['epsn_x'][:, n_slice], epsn_x) + assert np.allclose(slice_data['epsn_y'][:, n_slice], epsn_y) + assert np.allclose(slice_data['epsn_zeta'][:, n_slice], epsn_zeta) + + assert np.allclose(slice_data['num_particles'][:, n_slice], + n_macroparticles_slice_bunch[bid]) + + +@for_all_test_contexts +def test_slice_monitor_json(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (0, 1) + + energy = 7e12 + + offs_x = 1 + offs_px = 2 + offs_y = 3 + offs_py = 4 + offs_delta = 5 + + sigma_x = 6 + sigma_px = 7 + sigma_y = 8 + sigma_py = 9 + sigma_delta = 10 + + # dummy filling scheme + n_bunches = 3 + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + bunch_spacing_zeta = 10 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=False, + monitor_slices=True, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + filling_scheme=filling_scheme, + ) + + particles = xt.Particles( + _context=test_context, + p0c=energy, + x=np.zeros(n_macroparticles), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + n_slice = 5 + + dzeta = monitor.slicer.dzeta + zeta_coord_base = np.random.random(n_macroparticles) + + x_coord_bunch = [] + px_coord_bunch = [] + y_coord_bunch = [] + py_coord_bunch = [] + zeta_coord_bunch = [] + delta_coord_bunch = [] + n_macroparticles_slice_bunch = [] + + for bid in range(n_bunches): + zeta_coord = zeta_coord_base * (bid + 1) - bid * bunch_spacing_zeta + + bin_min = monitor.slicer.zeta_centers[bid, n_slice] - dzeta / 2 + bin_max = monitor.slicer.zeta_centers[bid, n_slice] + dzeta / 2 + + slice_mask = np.logical_and(zeta_coord < bin_max, zeta_coord >= bin_min) + + n_macroparticles_slice = np.sum(slice_mask) + + x_coord = (sigma_x * np.random.random( + n_macroparticles_slice) + offs_x) * (bid + 1) + px_coord = (sigma_px * np.random.random( + n_macroparticles_slice) + offs_px) * (bid + 1) + y_coord = (sigma_y * np.random.random( + n_macroparticles_slice) + offs_y) * (bid + 1) + py_coord = (sigma_py * np.random.random( + n_macroparticles_slice) + offs_py) * (bid + 1) + delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + + offs_delta) * (bid + 1) + + x_coord_bunch.append(x_coord) + px_coord_bunch.append(px_coord) + y_coord_bunch.append(y_coord) + py_coord_bunch.append(py_coord) + delta_coord_bunch.append(delta_coord) + zeta_coord_bunch.append(zeta_coord[slice_mask]) + n_macroparticles_slice_bunch.append(n_macroparticles_slice) + + particles.x[slice_mask] = x_coord + particles.px[slice_mask] = px_coord + particles.y[slice_mask] = y_coord + particles.py[slice_mask] = py_coord + particles.zeta[slice_mask] = zeta_coord[slice_mask] + particles.delta[slice_mask] = delta_coord + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * flush_data_every): + monitor.track(particles) + + with open(monitor._slices_filename, 'r') as f: + bunches_data = json.load(f) + + for bid in bunch_numbers: + slice_data = bunches_data[str(bid)] + + for turn in range(2 * flush_data_every): + assert np.isclose(slice_data['mean_x'][turn][n_slice], + np.mean(x_coord_bunch[bid])) + assert np.isclose(slice_data['mean_px'][turn][n_slice], + np.mean(px_coord_bunch[bid])) + assert np.isclose(slice_data['mean_y'][turn][n_slice], + np.mean(y_coord_bunch[bid])) + assert np.isclose(slice_data['mean_py'][turn][n_slice], + np.mean(py_coord_bunch[bid])) + assert np.isclose(slice_data['mean_zeta'][turn][n_slice], + np.mean(zeta_coord_bunch[bid])) + assert np.isclose(slice_data['mean_delta'][turn][n_slice], + np.mean(delta_coord_bunch[bid])) + + assert np.isclose(slice_data['sigma_x'][turn][n_slice], + np.std(x_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_px'][turn][n_slice], + np.std(px_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_y'][turn][n_slice], + np.std(y_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_py'][turn][n_slice], + np.std(py_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_zeta'][turn][n_slice], + np.std(zeta_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_delta'][turn][n_slice], + np.std(delta_coord_bunch[bid])) + + epsn_x = (np.sqrt(np.var(x_coord_bunch[bid]) * + np.var(px_coord_bunch[bid]) - + np.cov(x_coord_bunch[bid], px_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_y = (np.sqrt(np.var(y_coord_bunch[bid]) * + np.var(py_coord_bunch[bid]) - + np.cov(y_coord_bunch[bid], py_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_zeta = (np.sqrt(np.var(zeta_coord_bunch[bid]) * + np.var(delta_coord_bunch[bid]) - + np.cov(zeta_coord_bunch[bid], + delta_coord_bunch[bid])[ + 0, 1]) * beta * gamma) + + assert np.isclose(slice_data['epsn_x'][turn][n_slice], epsn_x) + assert np.isclose(slice_data['epsn_y'][turn][n_slice], epsn_y) + assert np.isclose(slice_data['epsn_zeta'][turn][n_slice], epsn_zeta) + + assert np.isclose(slice_data['num_particles'][turn][n_slice], + n_macroparticles_slice_bunch[bid]) + + +@for_all_test_contexts +def test_particle_monitor_hdf5(test_context): + n_macroparticles = int(1e2) + zeta_range = (-1, 1) + + particles = xt.Particles( + _context=test_context, p0c=7e12, + x=np.random.random(n_macroparticles), + px=np.random.random(n_macroparticles), + y=np.random.random(n_macroparticles), + py=np.random.random(n_macroparticles), + zeta=np.random.random(n_macroparticles), + delta=np.random.random(n_macroparticles), + ) + + # dummy particle mask + particle_monitor_mask = particles.x > 0 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=False, + monitor_slices=False, + monitor_particles=True, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + particle_monitor_mask=particle_monitor_mask, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2*flush_data_every): + monitor.track(particles) + + with h5py.File(monitor._particles_filename, 'r') as h5file: + for stat in monitor._stats_to_store_particles: + saved_data = h5file[stat] + for turn in range(2 * flush_data_every): + assert np.allclose(getattr(particles, + stat)[particle_monitor_mask], + saved_data[turn, :]) + + +@for_all_test_contexts +def test_particle_monitor_json(test_context): + n_macroparticles = int(1e2) + zeta_range = (-1, 1) + + particles = xt.Particles( + _context=test_context, p0c=7e12, + x=np.random.random(n_macroparticles), + px=np.random.random(n_macroparticles), + y=np.random.random(n_macroparticles), + py=np.random.random(n_macroparticles), + zeta=np.random.random(n_macroparticles), + delta=np.random.random(n_macroparticles), + ) + + # dummy particle mask + particle_monitor_mask = particles.x > 0 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=False, + monitor_slices=False, + monitor_particles=True, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + particle_monitor_mask=particle_monitor_mask, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2*flush_data_every): + monitor.track(particles) + + with open(monitor._particles_filename, 'r') as f: + buffer = json.load(f) + + for stat in monitor._stats_to_store_particles: + saved_data = buffer[stat] + for turn in range(2*flush_data_every): + assert np.allclose(getattr(particles, stat)[particle_monitor_mask], + saved_data[turn][:]) diff --git a/tests/test_element_with_slicer_filling_scheme.py b/tests/test_element_with_slicer_filling_scheme.py index 747bda3a..92414d1a 100644 --- a/tests/test_element_with_slicer_filling_scheme.py +++ b/tests/test_element_with_slicer_filling_scheme.py @@ -92,7 +92,7 @@ def test_element_with_slicer_filling_scheme(buffer_round_trip, num_turns): rtol=0, atol=1e-12) xo.assert_allclose(slicer2.zeta_centers, - np.array([[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]]), + np.array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]), rtol=0, atol=1e-12) xo.assert_allclose(slicer.zeta_range[0], -1, rtol=0, atol=1e-12) @@ -117,8 +117,8 @@ def test_element_with_slicer_filling_scheme(buffer_round_trip, num_turns): xo.assert_allclose(slicer2.num_particles, np.array([ - [ 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000.], - ]), + 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000.], + ), rtol=0, atol=1e-12) z_prof, prof = ele.moments_data.get_moment_profile('num_particles', i_turn=0) diff --git a/tests/test_transverse_damper.py b/tests/test_transverse_damper.py new file mode 100644 index 00000000..1c67fdcb --- /dev/null +++ b/tests/test_transverse_damper.py @@ -0,0 +1,129 @@ +import numpy as np +from scipy.constants import c +from scipy.constants import physical_constants +from scipy.signal import hilbert +from scipy.stats import linregress + +import xfields as xf +import xtrack as xt +import xpart as xp +from xobjects.test_helpers import for_all_test_contexts + + +exclude_contexts = ['ContextPyopencl', 'ContextCupy'] + +@for_all_test_contexts(excluding=exclude_contexts) +def test_transverse_damper(test_context): + longitudinal_mode = 'nonlinear' + # Machine settings + n_turns = 1000 + + n_macroparticles = 10000 + intensity = 8e9 + + alpha = 53.86**-2 + + e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 + p0c = 450e9 + gamma = p0c/e0 + beta = np.sqrt(1-1/gamma**2) + + h_rf = 35640 + bunch_spacing_buckets = 10 + num_bunches = 2 + n_slices = 500 + + epsn_x = 2e-6 + epsn_y = 2e-6 + taub = 0.9e-9 + sigma_z = taub*beta*c/4 + + circumference = 26658.883 + + momentum_compaction = alpha + v_rf = 4e6 + f_rev = beta*c/circumference + f_rf = f_rev*h_rf + + bucket_length = circumference / h_rf + zeta_range = (-0.5*bucket_length, 0.5*bucket_length) + + filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets)) + filling_scheme[0: num_bunches] = 1 + filled_slots = np.nonzero(filling_scheme)[0] + + one_turn_map = xt.LineSegmentMap( + length=circumference, + betx=70, bety=80, + qx=62.31, qy=60.32, + longitudinal_mode=longitudinal_mode, + voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180, + momentum_compaction_factor=momentum_compaction, + ) + + gain_x = 0.01 + gain_y = 0.01 + + transverse_damper = xf.TransverseDamper( + gain_x=gain_x, gain_y=gain_y, + filling_scheme=filling_scheme, + zeta_range=zeta_range, + num_slices=n_slices, + bunch_spacing_zeta=bunch_spacing_buckets*bucket_length, + circumference=circumference, + mode='bunch-by-bunch' + ) + + line = xt.Line(elements=[[one_turn_map, transverse_damper][0]], + element_names=[['one_turn_map', 'transverse_damper'][0]]) + + line.particle_ref = xt.Particles(p0c=p0c) + line.build_tracker(_context=test_context) + + particles = xp.generate_matched_gaussian_multibunch_beam( + _context=test_context, + filling_scheme=filling_scheme, + bunch_num_particles=n_macroparticles, + bunch_intensity_particles=intensity, + nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z, + line=line, bunch_spacing_buckets=bunch_spacing_buckets, + bunch_selection=filled_slots, + rf_harmonic=[h_rf], rf_voltage=[v_rf], + particle_ref=line.particle_ref, + + ) + + # apply a distortion to the bunches + amplitude = 1e-3 + particles.px += amplitude + particles.py += amplitude + + mean_x = np.zeros((n_turns, num_bunches)) + mean_y = np.zeros((n_turns, num_bunches)) + + for i_turn in range(n_turns): + line.track(particles, num_turns=1) + transverse_damper.track(particles, i_turn) + for ib in range(num_bunches): + mean_x[i_turn, ib] = np.mean(particles.x[n_macroparticles*ib: + n_macroparticles*(ib+1)]) + mean_y[i_turn, ib] = np.mean(particles.y[n_macroparticles*ib: + n_macroparticles*(ib+1)]) + + turns = np.linspace(0, n_turns-1, n_turns) + + i_fit_start = 200 + i_fit_end = 600 + + for i_bunch in range(num_bunches): + ampls_x = np.abs(hilbert(mean_x[:, i_bunch])) + fit_x = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_x[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_x.slope*2, gain_x, atol=1e-4, rtol=0) + + ampls_y = np.abs(hilbert(mean_y[:, i_bunch])) + fit_y = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_y[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_y.slope*2, gain_y, atol=1e-4, rtol=0) diff --git a/tests_mpi/run_all.sh b/tests_mpi/run_all.sh new file mode 100644 index 00000000..b2fac628 --- /dev/null +++ b/tests_mpi/run_all.sh @@ -0,0 +1,2 @@ +mpiexec -n 3 pytest -v test_collective_monitor_mpi.py +mpiexec -n 3 pytest -v test_transverse_damper_mpi.py diff --git a/tests_mpi/test_transverse_damper_mpi.py b/tests_mpi/test_transverse_damper_mpi.py new file mode 100644 index 00000000..777d3636 --- /dev/null +++ b/tests_mpi/test_transverse_damper_mpi.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.constants import c +from scipy.constants import physical_constants +from scipy.signal import hilbert +from scipy.stats import linregress + +import xfields as xf +import xtrack as xt +import xpart as xp +from xobjects.test_helpers import for_all_test_contexts + + +exclude_contexts = ['ContextPyopencl', 'ContextCupy'] + +@for_all_test_contexts(excluding=exclude_contexts) +def test_transverse_damper_mpi(test_context): + longitudinal_mode = 'nonlinear' + # Machine settings + n_turns = 1000 + + n_macroparticles = 10000 + intensity = 8e9 + + alpha = 53.86**-2 + + e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 + en = 450e9 + gamma = en/e0 + beta = np.sqrt(1-1/gamma**2) + + h_rf = 35640 + bunch_spacing_buckets = 10 + num_bunches = 4 + n_slices = 500 + + epsn_x = 2e-6 + epsn_y = 2e-6 + taub = 0.9e-9 + sigma_z = taub*beta*c/4 + + circumference = 26658.883 + + momentum_compaction = alpha + v_rf = 4e6 + f_rev = beta*c/circumference + f_rf = f_rev*h_rf + + bucket_length = circumference / h_rf + zeta_range = (-0.5*bucket_length, 0.5*bucket_length) + + filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets)) + filling_scheme[0: num_bunches] = 1 + + one_turn_map = xt.LineSegmentMap( + length=circumference, + betx=70, bety=80, + qx=62.31, qy=60.32, + longitudinal_mode=longitudinal_mode, + voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180, + momentum_compaction_factor=momentum_compaction, + ) + + gain_x = 0.01 + gain_y = 0.01 + + transverse_damper = xf.TransverseDamper( + gain_x=gain_x, gain_y=gain_y, + filling_scheme=filling_scheme, + zeta_range=zeta_range, + num_slices=n_slices, + bunch_spacing_zeta=bunch_spacing_buckets*bucket_length, + circumference=circumference + ) + + line = xt.Line(elements=[one_turn_map, transverse_damper], + element_names=['one_turn_map', 'transverse_damper']) + + line.particle_ref = xt.Particles(p0c=450e9) + line.build_tracker(_context=test_context) + + particles = xp.generate_matched_gaussian_multibunch_beam( + _context=test_context, + filling_scheme=filling_scheme, + bunch_num_particles=n_macroparticles, + bunch_intensity_particles=intensity, + nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z, + line=line, bunch_spacing_buckets=bunch_spacing_buckets, + rf_harmonic=[h_rf], rf_voltage=[v_rf], + particle_ref=line.particle_ref, + prepare_line_and_particles_for_mpi_wake_sim=True + ) + + # apply a distortion to the bunches + amplitude = 1e-3 + particles.px += amplitude + particles.py += amplitude + + mean_x = np.zeros((n_turns)) + mean_y = np.zeros((n_turns)) + + for i_turn in range(n_turns): + line.track(particles, num_turns=1) + mean_x[i_turn] = np.mean(particles.x) + mean_y[i_turn] = np.mean(particles.y) + + turns = np.linspace(0, n_turns-1, n_turns) + + i_fit_start = 200 + i_fit_end = 600 + + ampls_x = np.abs(hilbert(mean_x)) + fit_x = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_x[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_x.slope*2, gain_x, atol=1e-4, rtol=0) + + ampls_y = np.abs(hilbert(mean_y)) + fit_y = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_y[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_y.slope*2, gain_y, atol=1e-4, rtol=0) diff --git a/xfields/__init__.py b/xfields/__init__.py index e68c3215..5642a5b3 100644 --- a/xfields/__init__.py +++ b/xfields/__init__.py @@ -25,6 +25,8 @@ from .beam_elements.temp_slicer import TempSlicer from .beam_elements.electroncloud import ElectronCloud from .beam_elements.electronlens_interpolated import ElectronLensInterpolated +from .beam_elements.transverse_damper import TransverseDamper +from .beam_elements.collective_monitor import CollectiveMonitor from .general import _pkg_root from .config_tools import replace_spacecharge_with_quasi_frozen diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py new file mode 100644 index 00000000..49537578 --- /dev/null +++ b/xfields/beam_elements/collective_monitor.py @@ -0,0 +1,476 @@ +import numpy as np + +from .element_with_slicer import ElementWithSlicer +import json +import os +import xfields as xf +import xpart as xp + +COORDS = ['x', 'px', 'y', 'py', 'zeta', 'delta'] +SECOND_MOMENTS = {} +for c1 in COORDS: + for c2 in COORDS: + if c1 + '_' + c2 in SECOND_MOMENTS or c2 + '_' + c1 in SECOND_MOMENTS: + continue + SECOND_MOMENTS[c1 + '_' + c2] = (c1, c2) + + +class CollectiveMonitor(ElementWithSlicer): + """ + A class to monitor the collective motion of the beam. The monitor can save + bunch-by-bunch, slice-by-slice and particle-by-particle data. For the + particle-by-particle data, a mask can be used to select the particles to + be monitored. The monitor collects a buffer of `flush_data_every` turns + before saving the data to disk, in order to reduce the number of I/O and + avoid storing too much data in memory. + + Parameters + ---------- + monitor_bunches : Bool + If True, the bunch-by-bunch data is monitored + monitor_slices : Bool + If True, the slice-by-slice data is monitored + monitor_particles : Bool + If True, the particle-by-particle data is monitored + base_file_name : str + Base file name for the output files. If it is not specified, the data + will not be saved to disk. + particle_monitor_mask : np.ndarray + Mask identifying the particles to be monitored. If later on we try to + monitor a number of particles different than the length of the mask, an + error will be raised. + flush_data_every: int + Number of turns after which the data is saved to disk + stats_to_store : list + List of the statistics to store for the bunch-by-bunch and + slice-by-slice data + stats_to_store_particles : list + List of the statistics to store for the particle-by-particle data + backend: str + Backend used to save the data. For the moment only 'hdf5' and 'json' + are supported, with 'hdf5' being the default + zeta_range : Tuple + Zeta range for each bunch used in the underlying slicer. + num_slices : int + Number of slices per bunch used in the underlying slicer. It should be + specified if the slice-by-slice data is monitored. + bunch_spacing_zeta : float + Bunch spacing in meters. + filling_scheme: np.ndarray + List of zeros and ones representing the filling scheme. The length + of the array is equal to the number of slots in the machine and each + element of the array holds a one if the slot is filled or a zero + otherwise. + bunch_selection: np.ndarray + List of the bunches indicating which slots from the filling scheme are + used (not all the bunches are used when using multi-processing) + _flatten: bool + Use flattened wakes + """ + + _stats_to_store = [ + 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', + 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', + 'sigma_px', 'sigma_py', 'sigma_delta', + 'epsn_x', 'epsn_y', 'epsn_zeta', 'num_particles'] + + _stats_to_store_particles = [ + 'x', 'px', 'y', 'py', 'zeta', 'delta', 'weight', 'state'] + + # Mapping from the stats to store in the monitor to the moments to store in + # the slicer + stat_to_slicer_moments = { + 'mean_x': ['x'], + 'mean_px': ['px'], + 'mean_y': ['y'], + 'mean_py': ['py'], + 'mean_zeta': ['zeta'], + 'mean_delta': ['delta'], + 'sigma_x': ['x', 'x_x'], + 'sigma_y': ['y', 'y_y'], + 'sigma_zeta': ['zeta', 'zeta_zeta'], + 'sigma_px': ['px', 'px_px'], + 'sigma_py': ['py', 'py_py'], + 'sigma_delta': ['delta', 'delta_delta'], + 'epsn_x': ['x', 'px', 'x_x', 'px_px', 'x_px'], + 'epsn_y': ['y', 'py', 'y_y', 'py_py', 'y_py'], + 'epsn_zeta': ['zeta', 'delta', 'delta_delta', 'zeta_zeta', + 'zeta_delta'], + 'num_particles': ['num_particles'] + } + + def __init__(self, + monitor_bunches, + monitor_slices, + monitor_particles, + base_file_name=None, + particle_monitor_mask=None, + flush_data_every=1, + zeta_range=None, + num_slices=1, + bunch_spacing_zeta=None, + filling_scheme=None, + bunch_selection=None, + stats_to_store=None, + stats_to_store_particles=None, + backend='hdf5', + _flatten=False): + + slicer_moments = [] + if stats_to_store is not None: + for stat in stats_to_store: + assert stat in self._stats_to_store + self.stats_to_store = stats_to_store + else: + self.stats_to_store = self._stats_to_store + + if stats_to_store_particles is not None: + for stat in stats_to_store_particles: + assert stat in self._stats_to_store_particles + self.stats_to_store_particles = stats_to_store_particles + else: + self.stats_to_store_particles = self._stats_to_store_particles + + for stat in self.stats_to_store: + # For each stat we store, we add the corresponding moment + if stat == 'num_particles': + slicer_moments.append(stat) + else: + slicer_moments.extend(self.stat_to_slicer_moments[stat]) + + if backend == 'hdf5': + self.extension = 'h5' + self.flush_buffer_to_file_func = flush_buffer_to_file_hdf5 + elif backend == 'json': + self.extension = 'json' + self.flush_buffer_to_file_func = flush_buffer_to_file_json + else: + raise ValueError('Only hdf5 and json backends are supported.') + + self.base_file_name = base_file_name + self.bunch_buffer = None + self.slice_buffer = None + self.particle_buffer = None + self.monitor_bunches = monitor_bunches + self.monitor_slices = monitor_slices + self.monitor_particles = monitor_particles + self.flush_data_every = flush_data_every + self.particle_monitor_mask = particle_monitor_mask + + if base_file_name is not None: + self._bunches_filename = (self.base_file_name + '_bunches.' + + self.extension) + + if os.path.exists(self._bunches_filename) and monitor_bunches: + os.remove(self._bunches_filename) + + self._slices_filename = (self.base_file_name + '_slices.' + + self.extension) + + if os.path.exists(self._slices_filename) and monitor_slices: + os.remove(self._slices_filename) + + self._particles_filename = (self.base_file_name + '_particles.' + + self.extension) + + if os.path.exists(self._particles_filename) and monitor_particles: + os.remove(self._particles_filename) + + self.pipeline_manager = None + + self.i_turn = 0 + + super().__init__( + slicer_moments=slicer_moments, + zeta_range=zeta_range, # These are [a, b] in the paper + num_slices=num_slices, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=bunch_spacing_zeta, # This is P in the paper + filling_scheme=filling_scheme, + bunch_selection=bunch_selection, + with_compressed_profile=False + ) + + def _reconfigure_for_parallel(self, n_procs, my_rank): + filled_slots = self.slicer.filled_slots + scheme = np.zeros(np.max(filled_slots) + 1, + dtype=np.int64) + scheme[filled_slots] = 1 + + split_scheme = xp.matched_gaussian.split_scheme + bunch_selection_rank = split_scheme(filling_scheme=scheme, + n_chunk=int(n_procs)) + + self.slicer = xf.UniformBinSlicer( + filling_scheme=scheme, + bunch_selection=bunch_selection_rank[my_rank], + zeta_range=self.slicer.zeta_range, + num_slices=self.slicer.num_slices, + bunch_spacing_zeta=self.slicer.bunch_spacing_zeta, + moments=self.slicer.moments + ) + + def track(self, particles, _slice_result=None, _other_bunch_slicers=None): + super().track(particles=particles, + _slice_result=_slice_result, + _other_bunch_slicers=_other_bunch_slicers + ) + + self.i_turn += 1 + + if self.monitor_bunches: + if self.bunch_buffer is None: + self.bunch_buffer = self._init_bunch_buffer() + + self._update_bunch_buffer(particles) + + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): + self.flush_buffer_to_file_func( + self.bunch_buffer, self._bunches_filename) + self.bunch_buffer = self._init_bunch_buffer() + + if self.monitor_slices: + if self.slice_buffer is None: + self.slice_buffer = self._init_slice_buffer() + + self._update_slice_buffer(particles) + + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): + self.flush_buffer_to_file_func( + self.slice_buffer, self._slices_filename) + self.slice_buffer = self._init_slice_buffer() + + if self.monitor_particles: + if self.particle_buffer is None: + self.particle_buffer = self._init_particle_buffer() + + self._update_particle_buffer(particles) + + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): + self.flush_buffer_to_file_func( + self.particle_buffer, self._particles_filename) + self.particle_buffer = self._init_particle_buffer() + + def _init_bunch_buffer(self): + buf = {} + for bid in self.slicer.bunch_selection: + # Convert to int to avoid json serialization issues + buf[int(bid)] = {} + for stats in self.stats_to_store: + buf[int(bid)][stats] = np.zeros(self.flush_data_every) + + return buf + + def _init_slice_buffer(self): + buf = {} + for bid in self.slicer.bunch_selection: + # Convert to int to avoid json serialization issues + buf[int(bid)] = {} + for stats in self.stats_to_store: + buf[int(bid)][stats] = np.zeros((self.flush_data_every, + self.slicer.num_slices)) + + return buf + + def _init_particle_buffer(self): + num_particles = np.sum(self.particle_monitor_mask) + buf = {} + + for stat in self.stats_to_store_particles: + buf[stat] = np.zeros((self.flush_data_every, num_particles)) + + return buf + + def _update_bunch_buffer(self, particles): + i_bunch_particles = self._slice_result['i_slot_particles'] + + for i_bunch, bid in enumerate(self.slicer.bunch_selection): + bunch_mask = (i_bunch_particles == bid) + beta = np.mean(particles.beta0[bunch_mask]) + gamma = 1 / np.sqrt(1 - beta**2) + beta_gamma = beta * gamma + for stat in self.stats_to_store: + if stat == 'num_particles': + if len(self.slicer.bunch_selection) > 1: + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + val = np.sum(self.slicer.num_particles) + else: + mom = getattr(particles, stat.split('_')[-1]) + if stat.startswith('mean'): + val = np.mean(mom[bunch_mask]) + elif stat.startswith('sigma'): + val = np.std(mom[bunch_mask]) + elif stat.startswith('epsn'): + if stat.split('_')[-1] != 'zeta': + mom_p_str = 'p' + stat.split('_')[-1] + else: + mom_p_str = 'delta' + mom_p = getattr(particles, mom_p_str) + val = (np.sqrt(np.linalg.det(np.cov(mom[bunch_mask], + mom_p[bunch_mask]))) + * beta_gamma) + elif stat == 'num_particles': + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + raise ValueError('Unknown statistics f{stat}') + + self.bunch_buffer[bid][stat][self.i_turn % + self.flush_data_every] = val + + def _update_slice_buffer(self, particles): + i_bunch_particles = self._slice_result['i_slot_particles'] + + for i_bunch, bid in enumerate(self.slicer.bunch_selection): + bunch_mask = (i_bunch_particles == bid) + # we use the bunch beta_gamma to calculate the emittance, while + # we should use the slice beta_gamma + beta = np.mean(particles.beta0[bunch_mask]) + gamma = 1 / np.sqrt(1 - beta**2) + beta_gamma = beta * gamma + for stat in self.stats_to_store: + mom_str = stat.split('_')[-1] + if stat.startswith('mean'): + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.mean(mom_str)[i_bunch, :] + else: + val = self.slicer.mean(mom_str) + elif stat.startswith('sigma'): + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.std(mom_str)[i_bunch, :] + else: + val = self.slicer.std(mom_str) + elif stat.startswith('epsn'): + if mom_str != 'zeta': + mom_p_str = 'p' + stat.split('_')[-1] + else: + mom_p_str = 'delta' + + if len(self.slicer.bunch_selection) > 1: + val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * + self.slicer.var(mom_p_str)[i_bunch, :] - + self.slicer.cov(mom_str, mom_p_str)[i_bunch, + :]) * + beta_gamma) + else: + val = (np.sqrt(self.slicer.var(mom_str) * + self.slicer.var(mom_p_str) - + self.slicer.cov(mom_str, mom_p_str)) * + beta_gamma) + elif stat == 'num_particles': + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.num_particles[i_bunch, :] + else: + val = self.slicer.num_particles + else: + raise ValueError('Unknown statistics f{stat}') + + self.slice_buffer[bid][stat][self.i_turn % + self.flush_data_every, :] = val + + def _update_particle_buffer(self, particles): + for stat in self.stats_to_store_particles: + if (self.particle_monitor_mask is not None + and len(self.particle_monitor_mask) != len(particles.x)): + raise ValueError('The length of the particle monitor mask is ' + 'different from the number of particles being tracked') + val = getattr(particles, stat)[self.particle_monitor_mask] + self.particle_buffer[stat][self.i_turn % + self.flush_data_every, :] = val + + +def flush_buffer_to_file_hdf5(buffer, filename): + try: + import h5py + except ImportError: + raise ImportError('h5py is required to save the data in hdf5 format') + # Iif the file does not exist, create it and write the buffer + if not os.path.exists(filename): + with h5py.File(filename, 'w') as f: + for key in buffer: + if type(buffer[key]) is not dict: + if len(buffer[key].shape) == 1: + f.create_dataset(str(key), + data=buffer[key], + chunks=True, maxshape=(None,)) + elif len(buffer[key].shape) == 2: + f.create_dataset(str(key), + data=buffer[key], + chunks=True, + maxshape=(None, buffer[key].shape[1])) + else: + raise ValueError('The shape of the buffer is not ' + 'supported') + else: + # this is for the bunch or slice buffer + for subkey in buffer[key]: + if len(buffer[key][subkey].shape) == 1: + f.create_dataset(str(key) + '/' + str(subkey), + data=buffer[key][subkey], + chunks=True, maxshape=(None,)) + elif len(buffer[key][subkey].shape) == 2: + f.create_dataset(str(key) + '/' + str(subkey), + data=buffer[key][subkey], + chunks=True, + maxshape=(None, + buffer[key][subkey].shape[1])) + else: + raise ValueError('The shape of the buffer is not ' + 'supported') + + # If the file exists, append the buffer + else: + with h5py.File(filename, 'a') as f: + for key in buffer: + if type(buffer[key]) is not dict: + # this is for the particle buffer + f[str(key)].resize((f[str(key)].shape[0] + + buffer[key].shape[0]), axis=0) + f[str(key)][-buffer[key].shape[0]:] = buffer[key] + else: + # this is for the bunch or slice buffer + for subkey in buffer[key]: + f[str(key) + '/' + str(subkey)].resize( + (f[str(key) + '/' + str(subkey)].shape[0] + + buffer[key][subkey].shape[0]), axis=0) + val = buffer[key][subkey] + f[str(key) + '/' + + str(subkey)][-buffer[key][subkey].shape[0]:] = val + + +def flush_buffer_to_file_json(buffer, filename): + # To prepare the buffer for json, we need to convert the numpy arrays to + # lists + buffer_lists = {} + for key in buffer: + if type(buffer[key]) is not dict: + # this is for the particle buffer + buffer_lists[str(key)] = buffer[key].tolist() + else: + # this is for the bunch or slice buffer + buffer_lists[str(key)] = {} + for subkey in buffer[key]: + buffer_lists[str(key)][subkey] = buffer[key][subkey].tolist() + + # Now we can write the buffer to the file + if not os.path.exists(filename): + with open(filename, 'w') as f: + json.dump(buffer_lists, f, indent=2) + else: + with open(filename, 'r') as f: + old_buffer = json.load(f) + for key in buffer_lists: + if type(buffer_lists[key]) is not dict: + # this is for the particle buffer + old_buffer[key] = np.concatenate((old_buffer[key], buffer_lists[key]), axis=0).tolist() + else: + # this is for the bunch or slice buffer + for subkey in buffer_lists[key]: + old_buffer[key][subkey] = np.concatenate( + (old_buffer[key][subkey], buffer_lists[key][subkey]), + axis=0).tolist() + + with open(filename, 'w') as f: + json.dump(old_buffer, f, indent=2) diff --git a/xfields/beam_elements/element_with_slicer.py b/xfields/beam_elements/element_with_slicer.py index 9943347c..761b9821 100644 --- a/xfields/beam_elements/element_with_slicer.py +++ b/xfields/beam_elements/element_with_slicer.py @@ -77,6 +77,19 @@ def __init__(self, num_turns=num_turns, circumference=circumference) + @staticmethod + def _check_filling_scheme_info(filling_scheme, bunch_numbers, num_slots): + if filling_scheme is None and bunch_numbers is None: + if num_slots is None: + num_slots = 1 + filling_scheme = np.ones(num_slots, dtype=np.int64) + bunch_numbers = np.arange(num_slots, dtype=np.int64) + else: + assert (num_slots is None and filling_scheme is not None and + bunch_numbers is not None) + + return filling_scheme, bunch_numbers + def init_slicer(self, zeta_range, num_slices, filling_scheme, bunch_selection, bunch_spacing_zeta, slicer_moments): if zeta_range is not None: @@ -239,7 +252,7 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): return xt.PipelineStatus(on_hold=True) if self.pipeline_manager is not None: - for i_partner, partner_name in enumerate(self.partners_names): + for i_partner, partner_name in enumerate(self.partner_names): self.pipeline_manager.receive_message(self._recv_buffer_length_buffer, self.name, partner_name, particles.name, internal_tag=0) self._ensure_recv_buffer_size() diff --git a/xfields/beam_elements/transverse_damper.py b/xfields/beam_elements/transverse_damper.py new file mode 100644 index 00000000..83bc8623 --- /dev/null +++ b/xfields/beam_elements/transverse_damper.py @@ -0,0 +1,118 @@ +import numpy as np + +import xpart as xp +import xfields as xf +import xtrack as xt +from xfields.slicers.compressed_profile import CompressedProfile + + +class TransverseDamper(xt.BeamElement): + """ + A simple bunch-by-bunch transverse Damper implementation. + + Parameters + ---------- + gain_x : float + the horizontal damper gain in 1/turns (corresponding to a damping + rate of gain_x/2). + gain_y : float + the vertical damper gain in 1/turns (corresponding to a damping rate + of gain_x/2). + zeta_range : tuple + the range of zetas covered by the underlying slicer. + num_slices : int + the number of slices per bunch used by the underlying slicer. + filling_scheme : np.ndarray, optional + an array of zeros and ones representing the filling scheme. Only + needed for multi-bunch tracking. + bunch_selection : np.ndarray, optional + an array indicating which slot each bunch occupies in the filling + scheme. Only needed for multi-bunch tracking + bunch_spacing_zeta : float, optional + the bunch spacing in meters. Only needed for multi-bunch tracking + circumference : float, optional + the machine circumference. Only needed for multi-bunch tracking. + + """ + + iscollective = True + + def __init__(self, gain_x, gain_y, zeta_range, num_slices, + circumference=None, bunch_spacing_zeta=None, filling_scheme=None, + bunch_selection=None, **kwargs): + self.gains = { + 'px': gain_x, + 'py': gain_y, + } + + self.slicer = xf.UniformBinSlicer( + filling_scheme=filling_scheme, + bunch_selection=bunch_selection, + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + moments=['px', 'py'] + ) + + if filling_scheme is not None: + i_last_bunch = np.where(filling_scheme)[0][-1] + num_periods = i_last_bunch + 1 + else: + num_periods = 1 + + self.moments_data = {} + for moment in self.gains.keys(): + self.moments_data[moment] = CompressedProfile( + moments=[moment], + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + num_periods=num_periods, + num_turns=1, + circumference=circumference + ) + + def _reconfigure_for_parallel(self, n_procs, my_rank): + filled_slots = self.slicer.filled_slots + scheme = np.zeros(np.max(filled_slots) + 1, + dtype=np.int64) + scheme[filled_slots] = 1 + + split_scheme = xp.matched_gaussian.split_scheme + bunch_selection_rank = split_scheme(filling_scheme=scheme, + n_chunk=int(n_procs)) + + self.slicer = xf.UniformBinSlicer( + filling_scheme=scheme, + bunch_selection=bunch_selection_rank[my_rank], + zeta_range=self.slicer.zeta_range, + num_slices=self.slicer.num_slices, + bunch_spacing_zeta=self.slicer.bunch_spacing_zeta, + moments=['px', 'py'] + ) + + def track(self, particles, i_turn=0): + i_slice_particles = particles.particle_id * 0 + -999 + i_slot_particles = particles.particle_id * 0 + -9999 + + self.slicer.slice(particles, i_slice_particles=i_slice_particles, + i_slot_particles=i_slot_particles) + + for moment in ['px', 'py']: + particles_moment = getattr(particles, moment)[:] + slice_means = self.slicer.mean(moment) + + for i_bunch, bunch_number in enumerate( + self.slicer.bunch_selection): + slot_mask = i_slot_particles == bunch_number + slot_slices = np.unique(i_slice_particles[slot_mask]) + + if len(self.slicer.bunch_selection) == 1: + slot_mean = np.mean(slice_means[slot_slices]) + else: + slot_mean = np.mean(slice_means[i_bunch, slot_slices]) + + slot_mean = np.mean(particles_moment[slot_mask]) + + particles_moment[slot_mask] -= (self.gains[moment] * + slot_mean) diff --git a/xfields/beam_elements/waketracker/waketracker.py b/xfields/beam_elements/waketracker/waketracker.py index da413a35..841c6b71 100644 --- a/xfields/beam_elements/waketracker/waketracker.py +++ b/xfields/beam_elements/waketracker/waketracker.py @@ -1,13 +1,6 @@ -from typing import Tuple - import numpy as np -from scipy.constants import c as clight -from scipy.constants import e as qe -from scipy.interpolate import interp1d - import xobjects as xo -import xtrack as xt from ..element_with_slicer import ElementWithSlicer from .convolution import _ConvData @@ -122,7 +115,7 @@ def track(self, particles): # Use common slicer from parent class to measure all moments status = super().track(particles) - if status and status.on_hold == True: + if status and status.on_hold: return status for wf in self.components: diff --git a/xfields/slicers/uniform.py b/xfields/slicers/uniform.py index 336737f8..187ced93 100644 --- a/xfields/slicers/uniform.py +++ b/xfields/slicers/uniform.py @@ -1,4 +1,3 @@ -from pathlib import Path import numpy as np from ..general import _pkg_root