diff --git a/tracking/element.py b/tracking/element.py index 9f18581f12ff118058a37f852ee119ba908168c9..d5eacc2af0556430b7c9f02d3e1b8e54eb066645 100644 --- a/tracking/element.py +++ b/tracking/element.py @@ -151,9 +151,7 @@ class TransverseMap(Element): self.alpha = self.ring.optics.local_alpha self.beta = self.ring.optics.local_beta self.gamma = self.ring.optics.local_gamma - self.dispersion = self.ring.optics.local_dispersion - self.phase_advance = self.ring.tune[0:2]*2*np.pi - + self.dispersion = self.ring.optics.local_dispersion if self.ring.adts is not None: self.adts_poly = [np.poly1d(self.ring.adts[0]), np.poly1d(self.ring.adts[1]), @@ -173,16 +171,20 @@ class TransverseMap(Element): """ # Compute phase advance which depends on energy via chromaticity and ADTS - if self.ring.adts is not None: - phase_advance_x = self.phase_advance[0]*(1+self.ring.chro[0]*bunch["delta"]+ - self.adts_poly[0](bunch['x'])+self.adts_poly[2](bunch['y'])) - - phase_advance_y = self.phase_advance[1]*(1+self.ring.chro[1]*bunch["delta"]+ - self.adts_poly[1](bunch['x'])+self.adts_poly[3](bunch['y'])) - + if self.ring.adts is None: + phase_advance_x = 2*np.pi * (self.ring.tune[0] + + self.ring.chro[0]*bunch["delta"]) + phase_advance_y = 2*np.pi * (self.ring.tune[1] + + self.ring.chro[1]*bunch["delta"]) else: - phase_advance_x = self.phase_advance[0]*(1+self.ring.chro[0]*bunch["delta"]) - phase_advance_y = self.phase_advance[1]*(1+self.ring.chro[1]*bunch["delta"]) + phase_advance_x = 2*np.pi * (self.ring.tune[0] + + self.ring.chro[0]*bunch["delta"] + + self.adts_poly[0](bunch['x']) + + self.adts_poly[2](bunch['y'])) + phase_advance_y = 2*np.pi * (self.ring.tune[1] + + self.ring.chro[1]*bunch["delta"] + + self.adts_poly[1](bunch['x']) + + self.adts_poly[3](bunch['y'])) # 6x6 matrix corresponding to (x, xp, delta, y, yp, delta) matrix = np.zeros((6,6,len(bunch))) diff --git a/tracking/longwakepotential.py b/tracking/longwakepotential.py new file mode 100644 index 0000000000000000000000000000000000000000..0bba585627f5371be678285074cfbd1ef0f0542e --- /dev/null +++ b/tracking/longwakepotential.py @@ -0,0 +1,331 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Aug 3 15:03:20 2021 + +@author: foosang +""" + +from machine_data import v0313_v2 +from mbtrack2.tracking import Bunch, WakePotential, Beam, LongitudinalMap, TransverseMap +from mbtrack2.collective_effects import Resonator, CircularResistiveWall, WakeField +import numpy as np +import matplotlib.pyplot as plt +from scipy.interpolate import interp1d +from scipy.signal import convolve, oaconvolve +from mpi4py import MPI +from time import time + +t0 = time() +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +size = comm.Get_size() + +ring = v0313_v2(IDs="noID",V_RF=1.6e6) +ring.chro = np.array((0,0)) +beam = Beam(ring) +filling = np.zeros(ring.h) +filling[0:1] = 1e-3 +n_bunch = np.where(filling != 0)[0].size + +save_data = False + +mp_number = int(1e4) +beam.init_beam(filling, mp_per_bunch=mp_number, track_alive=False, mpi=True) + +bunch_num = beam.mpi.bunch_num +rank = beam.mpi.bunch_to_rank(bunch_num) +bunch = beam[bunch_num] + +bunch['x'] += 1e-3 + +imp_model = Resonator(Rs=20e6, fr=10e9, Q=50000, plane='x', n_wake=1e6, n_imp=1e6) + +# wake_length = 10e-11 +# freq_lim = int(100e9) +# freq_num = int(1e5) +# freq = np.linspace(0, freq_lim, freq_num) +# mesh = np.linspace(-1*wake_length, wake_length, freq_num) + +# rho_Cu = 1.68e-8 # Copper's resistivity in (Ohm.m) +# rw = CircularResistiveWall(mesh, freq, length=336, rho=rho_Cu, radius=5e-3) +# imp_model = WakeField([rw.Wxdip], "Wxdip") + +m = int(3000) # ring.T1 = m*slice_size + +slice_size = ring.T1/m +bunch_length = 10e-11 +n_bins = int(bunch_length/slice_size)+1 + +bunch_head = bunch_length/2 +bunch_tail = -1*bunch_length/2 + +# ------ For variable bin size ------- +# slice_size = None +# bunch_head = None +# bunch_tail = None +# n_bins = 75 +# ------------------------------------ + +turns = 3 +xsave = np.empty((mp_number, turns), dtype=np.float64) +xall = np.empty((beam.mpi.size, mp_number, turns), dtype=np.float64) + +long = LongitudinalMap(ring) +trans = TransverseMap(ring) + +wp_k = 0 +for k in range (turns): + beam.mpi.share_distributions(beam, n_bin=n_bins, bin_min=bunch_tail, + bin_max=bunch_head, bin_size=slice_size) + + + #%% Prepare long tau and rho + + center = beam.mpi.tau_center[rank] + profile = beam.mpi.tau_profile[rank] + bin_size = center[1]-center[0] + charge_per_mp = beam.mpi.charge_per_mp_all[rank] + sorted_index = beam.mpi.tau_sorted_index + + # _ , sorted_index, profile, center = bunch.binning(n_bin=n_bins, + # bin_min=bunch_tail, + # bin_max=bunch_head, + # bin_size=slice_size ) + + # charge_per_mp = bunch.charge_per_mp + # bin_size = center[1]-center[0] + + #%% + rho = bunch.charge_per_mp*profile/(bin_size*bunch.charge) # [mp_number/tau] + rho = np.array(rho) + + # Compute time array relative to t0 + tau = np.array(center) + dtau = tau[1] - tau[0] + + n_turns = 2 # the upper limit of computing rho + if n_bins % 2 == 0: + n_bins_total = int(n_turns*ring.T0/bin_size) + N_front = int(n_bins_total/2) + N_back = int(n_bins_total/2) + tau = np.arange(tau[0] - dtau*N_front, tau[-1] + dtau*N_back, dtau) + rho = np.append(rho, np.zeros(N_back)) + rho = np.insert(rho, 0, np.zeros(N_front)) + else: + n_bins_total = int(np.floor(n_turns*ring.T0/bin_size)) + N_front = int(np.floor(n_bins_total/2)) + N_back = int(np.floor(n_bins_total/2)) + tau = np.arange(tau[0] - dtau*N_front, tau[-1] + dtau*(N_back+1), dtau) + rho = np.append(rho, np.zeros(N_back)) + rho = np.insert(rho, 0, np.zeros(N_front+1)) + + if len(tau) != len(rho): + tau = np.append(tau, tau[-1]+dtau) + # tau = np.insert(tau, 0, tau[0]-dtau) + + print("bunch {0} n_bins_total = {1}, n_bins = {2}, tau0 = {3}".format(bunch_num, n_bins_total, n_bins, tau[0])) + + #%% Long wake function + + wf0 = imp_model.Wxdip + + wf_func = interp1d(wf0.data.index, wf0.data['real'], bounds_error=False, fill_value=0) + wf = wf_func(tau) + + # plt.plot(tau, wf) + + #%% Dipole moment + + plane = "x" + dipole0 = np.zeros((n_bins - 1,)) + + for i in range(n_bins - 1): + dipole0[i] = bunch[plane][sorted_index == i].sum() + dipole0 = dipole0/profile + dipole0[np.isnan(dipole0)] = 0 + + dipole0 = np.append(dipole0, np.zeros(N_back)) + dipole0 = np.insert(dipole0, 0, np.zeros(N_front)) + + if len(tau) != len(dipole0): + if len(tau)-len(dipole0) == 1: + dipole0 = np.append(dipole0, 0) + elif len(tau)-len(dipole0) == 2: + dipole0 = np.append(dipole0, 0) + dipole0 = np.insert(dipole0, 0, 0) + elif len(tau)-len(dipole0) == 3: + dipole0 = np.append(dipole0, np.array([0,0])) + dipole0 = np.insert(dipole0, 0, 0) + + # Interpole on tau0 to get the same size as W0 + dipole_func = interp1d(tau, dipole0, fill_value=0, bounds_error=False) + dipole = dipole_func(tau) + + # plt.plot(tau, dipole) + + #%% Get wake potential + + wp = convolve(rho*dipole, wf, mode='same')*dtau + + cut_off = (n_bins_total-n_bins)//2 + wp = wp[cut_off:] + tau = tau[cut_off:] + + # beam.mpi.share_distributions(beam, n_bin=n_bins, bin_min=bunch_tail, + # bin_max=bunch_head, bin_size=slice_size) + + # plt.plot(tau, wp) + # plt.plot(tau, rho/rho.max()*wp.max()) + + + #%% Share wake potential and time array between bunches + + beam.mpi.__setattr__("wp_all", np.empty((len(beam), len(tau)), dtype=np.float64)) + beam.mpi.comm.Allgather([wp, MPI.DOUBLE], [beam.mpi.__getattribute__("wp_all"), MPI.DOUBLE]) + + beam.mpi.__setattr__("tau_all", np.empty((len(beam), len(tau)), dtype=np.float64)) + beam.mpi.comm.Allgather([tau, MPI.DOUBLE], [beam.mpi.__getattribute__("tau_all"), MPI.DOUBLE]) + + #%% Sum up the wake potentials + + # notation : n (or nothing) = current bunch, i = previous bunches + + rank_n = beam.mpi.bunch_to_rank(bunch_num) + mpi_size = beam.mpi.size + + # data of bunch 0 + tau_0 = beam.mpi.tau_all[0] + wp_0 = beam.mpi.wp_all[0] + + if save_data is True and bunch_num == 0: + np.save("turn{0}_wp_0".format(k), wp_0) + np.save("turn{0}_tau_0".format(k), tau_0) + + + # Iterate from bunch 0 until the bunch before the current one. + for i in range(1,rank_n+1): + bunch_num_i = beam.mpi.rank_to_bunch(i) + + tau_i = beam.mpi.tau_all[i] + wp_i = beam.mpi.wp_all[i] + + shift_bunch = bunch_num_i * m # shift in number of bins unit + + wp_0[shift_bunch:] += wp_i[:-shift_bunch] + + if rank_n == mpi_size-1 and i == rank_n: # save only for the last bunch + # if i == rank_n: # save for every bunch + if save_data is True : + # save its own wake + # np.save("turn{0}_bunch{1}_wp{2}".format(k, bunch_num, bunch_num_i), wp_i) + # save the summed wake until itself + np.save("turn{0}_bunch{1}_sum".format(k, bunch_num), wp_0) + + if i == rank_n and k > 0: # add wp_k only once with itself + shift_T0 = ring.h * m + wp_0[:-shift_T0] += wp_k + if rank_n == mpi_size-1 and save_data is True: + # save the remaning wake from the previous turn + np.save("turn{0}_bunch{1}_sum_wp_k".format(k, bunch_num), wp_0) + + + func = interp1d(tau_0, wp_0, bounds_error=False, fill_value=0) + tn = bunch_num * ring.T1 # shift in time unit relative to bunch 0 + wp_interp = func(bunch['tau']+tn) + bunch['xp'] += wp_interp * bunch.charge / ring.E0 # add Wp of an anterior bunch + + # For the last bunch + if rank_n == beam.mpi.size - 1: + shift_T0 = ring.h * m + wp_k = wp_0[shift_T0:] + + if save_data is False: + print('No output file is written') + else: + print('Output files generated') + + + long.track(bunch) + trans.track(bunch) + xsave[:,k] = bunch['x'] + + if rank_n == mpi_size - 1: + print("turn {0} bunch {1} COMPLETE".format(k, bunch_num)) + +comm.Allgather([xsave, MPI.DOUBLE], [xall, MPI.DOUBLE]) +print(xall) +t1 = time() +print('total time = ', t1-t0) + + +#%% Compare to WakePotential + +# bunch2 = Bunch(ring, mp_number=1e5) +# imp_model2 = Resonator(Rs=20e6, fr=10e9, Q=10000, plane='x', n_wake=1e6, n_imp=1e6) + +# bunch2.init_gaussian() +# bunch2['x'] += 1e-3 + +# wp_class = WakePotential(ring, imp_model2, n_bin=95) +# wp_class.track(bunch2) +# wp_class.plot_last_wake('Wxdip') + +# tau99, wp99 = wp_class.get_wakepotential(bunch2, 'Wxdip') +# plt.plot(tau99, wp99) + +# wp_class.plot_last_wake('Wxdip', plot_wake_function=False) + +# _,_,_,_,dipole99 = wp_class.get_gaussian_wakepotential(ring.sigma_0, 'Wxdip') +# # plt.plot(tau99, dipole99) + +# _,dtau99,wf99 = wp_class.prepare_wakefunction('Wxdip', tau99) + +# a = np.max(abs(wp99)) +# b = np.max(abs(wp)) +# print('max abs(wp99) = ', a) +# print('max abs(wp) = ', b) +# print('wp99 / wp = ', a/b) + +#%% Old codes for Wp sum + + # -------- Old code (as of 12/08/2021) -------- + # the starting point of bunch i relative to t0 + # ti = bunch_num_i * ring.T1 + tau[0] + # ti_index = np.where(tau_0 >= ti)[0][0] + + # tau_i = beam.mpi.tau_all[i] + # tau_i = tau_i[:-ti_index] # crop the exceeding tail to have the same size as tau_0 + # wp_i = beam.mpi.wp_all[i] + # wp_i = wp_i[:-ti_index] + + # wp_0[ti_index:] += wp_i + # -------------------------------------------- + + # --------- old code -------------- + # ti = (bunch_num - bunch_num_i) * ring.T1 + tau[0] + + # tau_i0 = beam.mpi.tau_all[i] + # wp_i0 = beam.mpi.wp_all[i] + + # ind = np.where(tau_i0 >= ti)[0] + # tau_i = tau_i0[ind] + # wp_i = wp_i0[ind] + + # # for testing the addition of the wakes at some location on bunch n + # print("bunch_n {0} + bunch_i {1} | tn = {2}, ti = {3}".format(bunch_num, bunch_num_i, tn, ti)) + + # # ind_n = 650000 + # # print('INIT : bunch {0} round {1} | wp = {2}'.format(bunch_num, i, wp[ind_n])) + # # print('bunch {0}+{1} | wp_i = {2}'.format(bunch_num, bunch_num_i, wp_i[ind_n])) + + # # print('bunch {0} + bunch {1} | starts at {2}'.format(bunch_num, bunch_num_i, tau_i[0])) + + # # add zeros at the end of the cropped array to maintain the size. + # wp_i = np.append(wp_i, np.zeros(wp.size-ind.size)) + + # wp += wp_i + # ---------------------------------- + + + +