diff --git a/mbtrack2/tracking/parallel.py b/mbtrack2/tracking/parallel.py
index 30f835b413adfd02aef6a9f4aeabf860f0fa0cc5..b71cf4d66d6f5661bee12c17a177f02c8cf689cb 100644
--- a/mbtrack2/tracking/parallel.py
+++ b/mbtrack2/tracking/parallel.py
@@ -48,13 +48,16 @@ class Mpi:
         Compute the bunch profiles and share it between the different bunches.
     share_means(beam)
         Compute the bunch means and share it between the different bunches.
+    share_stds(beam)
+        Compute the bunch standard deviations and share it between the 
+        different bunches.
         
     References
     ----------
     [1] L. Dalcin, P. Kler, R. Paz, and A. Cosimo, Parallel Distributed 
     Computing using Python, Advances in Water Resources, 34(9):1124-1139, 2011.
-    """
     
+    """
     def __init__(self, filling_pattern):
         from mpi4py import MPI
         self.MPI = MPI
@@ -214,4 +217,29 @@ class Mpi:
         else:
             mean = np.zeros((6,), dtype=np.float64)
         self.comm.Allgather([mean, self.MPI.DOUBLE], [self.mean_all, self.MPI.DOUBLE])
+        
+    def share_stds(self, beam):
+        """
+        Compute the bunch standard deviations and share it between the 
+        different bunches.
+
+        Parameters
+        ----------
+        beam : Beam object
+
+        """
+        if(beam.mpi_switch == False):
+            print("Error, mpi is not initialised.")
+            
+        bunch = beam[self.bunch_num]
+        
+        charge_all = self.comm.allgather(bunch.charge)
+        self.charge_all = charge_all
+        
+        self.std_all = np.empty((self.size, 6), dtype=np.float64)
+        if len(bunch) != 0:
+            std = bunch.std
+        else:
+            std = np.zeros((6,), dtype=np.float64)
+        self.comm.Allgather([std, self.MPI.DOUBLE], [self.std_all, self.MPI.DOUBLE])
                                 
\ No newline at end of file