Skip to content
Snippets Groups Projects
Commit e0e90ccb authored by Alexis GAMELIN's avatar Alexis GAMELIN
Browse files

Changes to allow for use w/o mpi4py

Beam.mpi_share_distributions is now moved to mpi.share_distribution
Now mpi4py is only needed for the multi-bunch tracking and not for single bunch
parent 7c73c0ef
Branches
Tags
No related merge requests found
...@@ -18,7 +18,6 @@ from mbtrack2.tracking.rf import CavityResonator ...@@ -18,7 +18,6 @@ from mbtrack2.tracking.rf import CavityResonator
from scipy.interpolate import interp1d from scipy.interpolate import interp1d
from abc import ABCMeta from abc import ABCMeta
from scipy.fft import rfft, rfftfreq from scipy.fft import rfft, rfftfreq
from mpi4py import MPI
class Monitor(Element, metaclass=ABCMeta): class Monitor(Element, metaclass=ABCMeta):
""" """
...@@ -126,6 +125,7 @@ class Monitor(Element, metaclass=ABCMeta): ...@@ -126,6 +125,7 @@ class Monitor(Element, metaclass=ABCMeta):
self._file_name_storage.append(file_name + ".hdf5") self._file_name_storage.append(file_name + ".hdf5")
if len(self._file_storage) == 0: if len(self._file_storage) == 0:
if mpi_mode == True: if mpi_mode == True:
from mpi4py import MPI
f = hp.File(self.file_name, "w", libver='latest', f = hp.File(self.file_name, "w", libver='latest',
driver='mpio', comm=MPI.COMM_WORLD) driver='mpio', comm=MPI.COMM_WORLD)
else: else:
......
...@@ -8,6 +8,7 @@ Module to handle parallel computation ...@@ -8,6 +8,7 @@ Module to handle parallel computation
""" """
import numpy as np import numpy as np
from mpi4py import MPI
class Mpi: class Mpi:
""" """
...@@ -51,10 +52,6 @@ class Mpi: ...@@ -51,10 +52,6 @@ class Mpi:
""" """
def __init__(self, filling_pattern): def __init__(self, filling_pattern):
try:
from mpi4py import MPI
except(ModuleNotFoundError):
print("Please install mpi4py module.")
self.comm = MPI.COMM_WORLD self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank() self.rank = self.comm.Get_rank()
...@@ -137,4 +134,46 @@ class Mpi: ...@@ -137,4 +134,46 @@ class Mpi:
else: else:
return max(self.table[:,0]) return max(self.table[:,0])
def share_distributions(self, beam, dimensions="tau", n_bin=75):
"""
Compute the bunch profiles and share it between the different bunches.
Parameters
----------
beam : Beam object
dimension : str or list of str, optional
Dimensions in which the binning is done. The default is "tau".
n_bin : int or list of int, optional
Number of bins. The default is 75.
"""
if(beam.mpi_switch == False):
print("Error, mpi is not initialised.")
if isinstance(dimensions, str):
dimensions = [dimensions]
if isinstance(n_bin, int):
n_bin = np.ones((len(dimensions),), dtype=int)*n_bin
bunch = beam[self.bunch_num]
charge_per_mp_all = self.comm.allgather(bunch.charge_per_mp)
self.charge_per_mp_all = charge_per_mp_all
for i in range(len(dimensions)):
dim = dimensions[i]
n = n_bin[i]
bins, sorted_index, profile, center = bunch.binning(dimension=dim, n_bin=n)
self.__setattr__(dim + "_center", np.empty((len(beam), len(center)), dtype=np.float64))
self.comm.Allgather([center, MPI.DOUBLE], [self.__getattribute__(dim + "_center"), MPI.DOUBLE])
self.__setattr__(dim + "_profile", np.empty((len(beam), len(profile)), dtype=np.int64))
self.comm.Allgather([profile, MPI.INT64_T], [self.__getattribute__(dim + "_profile"), MPI.INT64_T])
self.__setattr__(dim + "_sorted_index", sorted_index)
\ No newline at end of file
...@@ -13,8 +13,6 @@ import seaborn as sns ...@@ -13,8 +13,6 @@ import seaborn as sns
import pandas as pd import pandas as pd
from mbtrack2.tracking.parallel import Mpi from mbtrack2.tracking.parallel import Mpi
from scipy.constants import c, m_e, m_p, e from scipy.constants import c, m_e, m_p, e
from mpi4py import MPI
class Particle: class Particle:
""" """
...@@ -702,47 +700,6 @@ class Beam: ...@@ -702,47 +700,6 @@ class Beam:
self.mpi_switch = False self.mpi_switch = False
self.mpi = None self.mpi = None
def mpi_share_distributions(self, dimensions="tau", n_bin=75):
"""
Compute the bunch profile and share it between the different bunches.
Parameters
----------
dimension : str or list of str, optional
Dimensions in which the binning is done. The default is "tau".
n_bin : int or list of int, optional
Number of bins. The default is 75.
"""
if(self.mpi_switch == False):
print("Error, mpi is not initialised.")
if isinstance(dimensions, str):
dimensions = [dimensions]
if isinstance(n_bin, int):
n_bin = np.ones((len(dimensions),), dtype=int)*n_bin
bunch = self[self.mpi.bunch_num]
charge_per_mp_all = self.mpi.comm.allgather(bunch.charge_per_mp)
self.mpi.charge_per_mp_all = charge_per_mp_all
for i in range(len(dimensions)):
dim = dimensions[i]
n = n_bin[i]
bins, sorted_index, profile, center = bunch.binning(dimension=dim, n_bin=n)
self.mpi.__setattr__(dim + "_center", np.empty((len(self), len(center)), dtype=np.float64))
self.mpi.comm.Allgather([center, MPI.DOUBLE], [self.mpi.__getattribute__(dim + "_center"), MPI.DOUBLE])
self.mpi.__setattr__(dim + "_profile", np.empty((len(self), len(profile)), dtype=np.int64))
self.mpi.comm.Allgather([profile, MPI.INT64_T], [self.mpi.__getattribute__(dim + "_profile"), MPI.INT64_T])
self.mpi.__setattr__(dim + "_sorted_index", sorted_index)
def plot(self, var, option=None): def plot(self, var, option=None):
""" """
Plot variables with respect to bunch number. Plot variables with respect to bunch number.
......
...@@ -422,7 +422,7 @@ class CavityResonator(): ...@@ -422,7 +422,7 @@ class CavityResonator():
for j, bunch in enumerate(beam.not_empty): for j, bunch in enumerate(beam.not_empty):
index = self.valid_bunch_index[j] index = self.valid_bunch_index[j]
if beam.mpi_switch: if beam.mpi_switch:
beam.mpi_share_distributions(n_bin=n_bin) beam.mpi.share_distributions(beam, n_bin=n_bin)
center[:,index] = beam.mpi.tau_center[j] center[:,index] = beam.mpi.tau_center[j]
profile[:,index] = beam.mpi.tau_profile[j] profile[:,index] = beam.mpi.tau_profile[j]
bin_length[index] = center[1, index]-center[0, index] bin_length[index] = center[1, index]-center[0, index]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment