diff --git a/tracking/monitors/monitors.py b/tracking/monitors/monitors.py
index 6eb7bd9af6068b8d7af05993de5add1ec006d2f7..0d3b9a4cde683c0c301c793ed69d36bbc70a226c 100644
--- a/tracking/monitors/monitors.py
+++ b/tracking/monitors/monitors.py
@@ -18,7 +18,6 @@ from mbtrack2.tracking.rf import CavityResonator
 from scipy.interpolate import interp1d
 from abc import ABCMeta
 from scipy.fft import rfft, rfftfreq
-from mpi4py import MPI
 
 class Monitor(Element, metaclass=ABCMeta):
     """
@@ -126,6 +125,7 @@ class Monitor(Element, metaclass=ABCMeta):
                 self._file_name_storage.append(file_name + ".hdf5")
                 if len(self._file_storage) == 0:
                     if mpi_mode == True:
+                        from mpi4py import MPI
                         f = hp.File(self.file_name, "w", libver='latest', 
                              driver='mpio', comm=MPI.COMM_WORLD)
                     else:
diff --git a/tracking/parallel.py b/tracking/parallel.py
index 72f8758ed1d8d7e4e0a0bf7607f93192a6b3a185..6de53e785970fca7d2e991a7b84bed72346be3f4 100644
--- a/tracking/parallel.py
+++ b/tracking/parallel.py
@@ -8,6 +8,7 @@ Module to handle parallel computation
 """
 
 import numpy as np
+from mpi4py import MPI
 
 class Mpi:
     """
@@ -51,10 +52,6 @@ class Mpi:
     """
     
     def __init__(self, filling_pattern):
-        try:
-            from mpi4py import MPI
-        except(ModuleNotFoundError):
-            print("Please install mpi4py module.")
 
         self.comm = MPI.COMM_WORLD
         self.rank = self.comm.Get_rank()
@@ -136,5 +133,47 @@ class Mpi:
             return self.rank - 1
         else:
             return max(self.table[:,0])
+        
+    def share_distributions(self, beam, dimensions="tau", n_bin=75):
+        """
+        Compute the bunch profiles and share it between the different bunches.
+
+        Parameters
+        ----------
+        beam : Beam object
+        dimension : str or list of str, optional
+            Dimensions in which the binning is done. The default is "tau".
+        n_bin : int or list of int, optional
+            Number of bins. The default is 75.
+
+        """
+        
+        if(beam.mpi_switch == False):
+            print("Error, mpi is not initialised.")
+            
+        if isinstance(dimensions, str):
+            dimensions = [dimensions]
+            
+        if isinstance(n_bin, int):
+            n_bin = np.ones((len(dimensions),), dtype=int)*n_bin
+            
+        bunch = beam[self.bunch_num]
+        
+        charge_per_mp_all = self.comm.allgather(bunch.charge_per_mp)
+        self.charge_per_mp_all = charge_per_mp_all
+            
+        for i in range(len(dimensions)):
+            
+            dim = dimensions[i]
+            n = n_bin[i]
+            bins, sorted_index, profile, center = bunch.binning(dimension=dim, n_bin=n)
+            
+            self.__setattr__(dim + "_center", np.empty((len(beam), len(center)), dtype=np.float64))
+            self.comm.Allgather([center,  MPI.DOUBLE], [self.__getattribute__(dim + "_center"), MPI.DOUBLE])
+            
+            self.__setattr__(dim + "_profile", np.empty((len(beam), len(profile)), dtype=np.int64))
+            self.comm.Allgather([profile,  MPI.INT64_T], [self.__getattribute__(dim + "_profile"), MPI.INT64_T])
+            
+            self.__setattr__(dim + "_sorted_index", sorted_index)
     
     
\ No newline at end of file
diff --git a/tracking/particles.py b/tracking/particles.py
index 6cb7c07d84fb9b2f5e90326d02bec7b1467af5b1..fc75fa34931457605ba18f8edf841fa1cf826c08 100644
--- a/tracking/particles.py
+++ b/tracking/particles.py
@@ -13,8 +13,6 @@ import seaborn as sns
 import pandas as pd
 from mbtrack2.tracking.parallel import Mpi
 from scipy.constants import c, m_e, m_p, e
-from mpi4py import MPI
-
 
 class Particle:
     """
@@ -702,47 +700,6 @@ class Beam:
         self.mpi_switch = False
         self.mpi = None
         
-    def mpi_share_distributions(self, dimensions="tau", n_bin=75):
-        """
-        Compute the bunch profile and share it between the different bunches.
-
-        Parameters
-        ----------
-        dimension : str or list of str, optional
-            Dimensions in which the binning is done. The default is "tau".
-        n_bin : int or list of int, optional
-            Number of bins. The default is 75.
-
-        """
-        
-        if(self.mpi_switch == False):
-            print("Error, mpi is not initialised.")
-            
-        if isinstance(dimensions, str):
-            dimensions = [dimensions]
-            
-        if isinstance(n_bin, int):
-            n_bin = np.ones((len(dimensions),), dtype=int)*n_bin
-            
-        bunch = self[self.mpi.bunch_num]
-        
-        charge_per_mp_all = self.mpi.comm.allgather(bunch.charge_per_mp)
-        self.mpi.charge_per_mp_all = charge_per_mp_all
-            
-        for i in range(len(dimensions)):
-            
-            dim = dimensions[i]
-            n = n_bin[i]
-            bins, sorted_index, profile, center = bunch.binning(dimension=dim, n_bin=n)
-            
-            self.mpi.__setattr__(dim + "_center", np.empty((len(self), len(center)), dtype=np.float64))
-            self.mpi.comm.Allgather([center,  MPI.DOUBLE], [self.mpi.__getattribute__(dim + "_center"), MPI.DOUBLE])
-            
-            self.mpi.__setattr__(dim + "_profile", np.empty((len(self), len(profile)), dtype=np.int64))
-            self.mpi.comm.Allgather([profile,  MPI.INT64_T], [self.mpi.__getattribute__(dim + "_profile"), MPI.INT64_T])
-            
-            self.mpi.__setattr__(dim + "_sorted_index", sorted_index)
-        
     def plot(self, var, option=None):
         """
         Plot variables with respect to bunch number.
diff --git a/tracking/rf.py b/tracking/rf.py
index a8f34862d539d5fe8defede2d8a820d92c8f1991..8274964655791f58c013ccc95372c9f378337638 100644
--- a/tracking/rf.py
+++ b/tracking/rf.py
@@ -422,7 +422,7 @@ class CavityResonator():
         for j, bunch in enumerate(beam.not_empty):
             index = self.valid_bunch_index[j]
             if beam.mpi_switch:
-                beam.mpi_share_distributions(n_bin=n_bin)
+                beam.mpi.share_distributions(beam, n_bin=n_bin)
                 center[:,index] = beam.mpi.tau_center[j]
                 profile[:,index] = beam.mpi.tau_profile[j]
                 bin_length[index] = center[1, index]-center[0, index]