diff --git a/ArchiveExtractor.py b/ArchiveExtractor.py
index cf26417e1a399a9af9019270b6784a382adef0fe..3617a182b5d07a027810fe3ad87e9d1adb495121 100755
--- a/ArchiveExtractor.py
+++ b/ArchiveExtractor.py
@@ -1,5 +1,5 @@
 """
-Python module for extracting attribute from Arhive Extractor Device.
+Python module for extracting attribute from Archive Extractor Device.
 """
 import logging
 import datetime
@@ -519,13 +519,19 @@ def _extract_attribute(attribute, method, date1, date2, db):
     # =============
     # For now we handle multi dimension the same way as scalar, which will get only the first element
     if (attrtype=="scalar") or (attrtype=="multi"):
-        return _extract_scalar(attribute, method, date1, date2, db)
+        if info["data_type"] == '1':
+            # Boolean data type, quick fix
+            dtype=bool
+        else:
+            dtype=float
+
+        return _extract_scalar(attribute, method, date1, date2, db, dtype)
     if attrtype=="vector":
         return _extract_vector(attribute, method, date1, date2, db)
 
 
 ##---------------------------------------------------------------------------##
-def _extract_scalar(attribute, method, date1, date2, db):
+def _extract_scalar(attribute, method, date1, date2, db, dtype):
 
     # =====================
     if method == "nearest":
@@ -572,12 +578,12 @@ def _extract_scalar(attribute, method, date1, date2, db):
 
 
             # Transform to datetime - value arrays
-            _value = np.asarray(_value, dtype=float)
+            _value = np.asarray(_value, dtype=dtype)
             if len(_date) > 0:
                 _date = _ArrayTimeStampToDatetime(_date/1000.0)
 
             # Fabricate return pandas.Series
-            data.append(pd.Series(index=_date, data=_value,name=attribute))
+            data.append(pd.Series(index=_date, data=_value, name=attribute))
 
         # Concatenate chunks
         return pd.concat(data)