diff --git a/ArchiveExtractor/Amenities.py b/ArchiveExtractor/Amenities.py
index fd48cd18d4831f69a6dac963b54d8b01e31a965c..fc3117bf1f4ed52b25e0d3acd95876bcc16c9a15 100644
--- a/ArchiveExtractor/Amenities.py
+++ b/ArchiveExtractor/Amenities.py
@@ -173,7 +173,11 @@ def _chunkerize(attribute, dateStart, dateStop, db, Nmax=100000):
     dx=int(info["max_dim_x"])
     if dx > 1:
         logger.debug("Attribute is a vector with max dimension = %s"%dx)
-        N=N*dx
+
+        # Quick fix : max dimension is not always used. Cap it to a few hundreds
+        N=N*min(dx,2048)
+        if dx > 2048:
+            logger.warning("Attribute vector has max dimension above 2048, assume no more than this limit for chunkerize.")
 
     # If data chunk is too much, we need to cut it
     if N > Nmax:
diff --git a/ArchiveExtractor/Core.py b/ArchiveExtractor/Core.py
index e193cf85359dc06e907cd697da42247c8ba89a2e..d1c040900e265375f90849d690bd31701de08c7a 100644
--- a/ArchiveExtractor/Core.py
+++ b/ArchiveExtractor/Core.py
@@ -170,7 +170,8 @@ def _extract_vector(attribute, method, date1, date2, db):
         attrHist = ae._Extractors[{'H':0, 'T':1}[db]].attribute_history(name, N)
 
         # Transform to datetime - value arrays
-        _value = np.empty((N, int(info["max_dim_x"])), dtype=float)
+        mx = min(int(info["max_dim_x"]), 2048) # Quick fix: Crop dimension
+        _value = np.empty((N, mx), dtype=float)
         _value[:] = np.nan
         _date = np.empty(N, dtype=object)
         for i_h in range(N):