Skip to content
Snippets Groups Projects
Commit 66874533 authored by Gamelin Alexis's avatar Gamelin Alexis
Browse files

Various improvements on Beam

Fix bug on Beam.distance_between_bunches for the last bunch when the first bunches are empty.
Change Beam.distance_between_bunches to compute the array only at initalization with Beam.update_distance_between_bunches
Beam.mpi_share_distributions can now share all bunch dimensions.
parent 3ead6b4a
No related branches found
No related tags found
No related merge requests found
...@@ -460,6 +460,8 @@ class Beam: ...@@ -460,6 +460,8 @@ class Beam:
mpi_gather() mpi_gather()
Gather beam, all bunches of the different processors are sent to Gather beam, all bunches of the different processors are sent to
all processors. Rather slow all processors. Rather slow
mpi_share_distributions()
Compute the bunch profile and share it between the different bunches.
mpi_close() mpi_close()
Call mpi_gather and switch off MPI parallelisation Call mpi_gather and switch off MPI parallelisation
plot(var, option=None) plot(var, option=None)
...@@ -507,12 +509,22 @@ class Beam: ...@@ -507,12 +509,22 @@ class Beam:
@property @property
def distance_between_bunches(self): def distance_between_bunches(self):
"""Return an array which contains the distance to the next bunch in
units of the RF period (ring.T1)"""
return self._distance_between_bunches
def update_distance_between_bunches(self):
"""Update the distance_between_bunches array"""
filling_pattern = self.filling_pattern filling_pattern = self.filling_pattern
distance = np.zeros(filling_pattern.shape) distance = np.zeros(filling_pattern.shape)
last_value = 0
# All bunches
for index, value in enumerate(filling_pattern): for index, value in enumerate(filling_pattern):
if value == False: if value == False:
pass pass
elif value == True: elif value == True:
last_value = index
count = 1 count = 1
for value2 in filling_pattern[index+1:]: for value2 in filling_pattern[index+1:]:
if value2 == False: if value2 == False:
...@@ -520,7 +532,17 @@ class Beam: ...@@ -520,7 +532,17 @@ class Beam:
elif value2 == True: elif value2 == True:
break break
distance[index] = count distance[index] = count
return distance
# Last bunch case
count2 = 0
for index2, value2 in enumerate(filling_pattern):
if value2 == True:
break
if value2 == False:
count2 += 1
distance[last_value] += count2
self._distance_between_bunches = distance
def init_beam(self, filling_pattern, current_per_bunch=1e-3, def init_beam(self, filling_pattern, current_per_bunch=1e-3,
mp_per_bunch=1e3, track_alive=True): mp_per_bunch=1e3, track_alive=True):
...@@ -574,6 +596,7 @@ class Beam: ...@@ -574,6 +596,7 @@ class Beam:
self.bunch_list = bunch_list self.bunch_list = bunch_list
self.update_filling_pattern() self.update_filling_pattern()
self.update_distance_between_bunches()
for bunch in self.not_empty: for bunch in self.not_empty:
bunch.init_gaussian() bunch.init_gaussian()
...@@ -676,28 +699,46 @@ class Beam: ...@@ -676,28 +699,46 @@ class Beam:
self.mpi_switch = False self.mpi_switch = False
self.mpi = None self.mpi = None
def mpi_share_distributions(self): def mpi_share_distributions(self, dimensions="tau", n_bins=75):
"""Share distribution between bunches""" """
Compute the bunch profile and share it between the different bunches.
Parameters
----------
dimension : str or list of str, optional
Dimensions in which the binning is done. The default is "tau".
n_bin : int or list of int, optional
Number of bins. The default is 75.
"""
if(self.mpi_switch == False): if(self.mpi_switch == False):
print("Error, mpi is not initialised.") print("Error, mpi is not initialised.")
if isinstance(dimensions, str):
dimensions = [dimensions]
if isinstance(n_bins, int):
n_bins = np.ones((len(dimensions),), dtype=int)*n_bins
bunch = self[self.mpi.bunch_num] bunch = self[self.mpi.bunch_num]
bins, sorted_index, profile, center = bunch.binning(n_bin=75)
self.mpi.bins_all = np.empty((len(self), len(bins)), dtype=np.float64)
self.mpi.comm.Allgather([bins, MPI.DOUBLE], [self.mpi.bins_all, MPI.DOUBLE])
self.mpi.center_all = np.empty((len(self), len(center)), dtype=np.float64)
self.mpi.comm.Allgather([center, MPI.DOUBLE], [self.mpi.center_all, MPI.DOUBLE])
self.mpi.profile_all = np.empty((len(self), len(profile)), dtype=np.int64)
self.mpi.comm.Allgather([profile, MPI.INT64_T], [self.mpi.profile_all, MPI.INT64_T])
charge_per_mp_all = self.mpi.comm.allgather(bunch.charge_per_mp) charge_per_mp_all = self.mpi.comm.allgather(bunch.charge_per_mp)
self.mpi.charge_per_mp_all = charge_per_mp_all self.mpi.charge_per_mp_all = charge_per_mp_all
self.mpi.sorted_index = sorted_index for i in range(len(dimensions)):
dim = dimensions[i]
n_bin = n_bins[i]
bins, sorted_index, profile, center = bunch.binning(dimension=dim, n_bin=n_bin)
self.mpi.__setattr__(dim + "_center", np.empty((len(self), len(center)), dtype=np.float64))
self.mpi.comm.Allgather([center, MPI.DOUBLE], [self.mpi.__getattribute__(dim + "_center"), MPI.DOUBLE])
self.mpi.__setattr__(dim + "_profile", np.empty((len(self), len(profile)), dtype=np.int64))
self.mpi.comm.Allgather([center, MPI.INT64_T], [self.mpi.__getattribute__(dim + "_profile"), MPI.INT64_T])
self.mpi.__setattr__(dim + "_sorted_index", sorted_index)
def plot(self, var, option=None): def plot(self, var, option=None):
""" """
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment