diff --git a/ArchiveExtractor.py b/ArchiveExtractor.py
index b703354405bfcdaa4b7bb443fb54275a5614714f..0299d2aaea2a6cc14e21dc76e3cfc5bad7674b71 100755
--- a/ArchiveExtractor.py
+++ b/ArchiveExtractor.py
@@ -127,20 +127,24 @@ def query_ADB_BetweenDates(attr,
         raise ValueError("Attribute '%s' is not archived in DB %s"%(attr, extractor))
 
     # Get its sampling period in seconds
-    try:
-        samplingPeriod = int(ADB.GetArchivingMode(attr)[1])*10**-3
+    req=ADB.GetArchivingMode(attr)
+    logger.debug("GetArchivingMode: "+str(req))
+
+    if req[0] == "MODE_P":
+        samplingPeriod = int(req[1])*10**-3
         logger.debug("Attribute is sampled every %g seconds"%samplingPeriod)
 
-        # Evaluate the number of points
-        est_N = (dateStop-dateStart).total_seconds()/samplingPeriod
-        logger.debug("Which leads to %d points to extract."%est_N)
+    elif req[0] == "MODE_EVT":
+        logger.warning("Attribute is archived on event. Chunks of data are sized with an estimated datarate of 0.1Hz")
+        samplingPeriod = 10
+
+    else:
+        raise NotImplemented("Archive mode not implemented")
 
-    except ValueError:
-        logger.warning("Attribute has no sampling period. Maybe it's archived on event.")
-        logger.warning("Please note that this script does not cut acces into chunks for this type of attributes.")
 
-        # Temporary bypass estimation
-        est_N = 1
+    # Evaluate the number of points
+    est_N = (dateStop-dateStart).total_seconds()/samplingPeriod
+    logger.debug("Which leads to %d points to extract."%est_N)
 
     # If data chunk is too much, we need to cut it
     if est_N > Nmax: