diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 71348bd8c5555833196ea45d5239ec1d1201518c..4aa45078f7b5b0ca082627a0ab12a76fb4730ae1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,3 +1,6 @@
+variables:
+  GIT_SUBMODULE_STRATEGY: recursive
+
 before_script:
   - export DEBIAN_FRONTEND=noninteractive
   - apt-get -qq update
@@ -9,8 +12,6 @@ before_script:
   - export PYENV_ROOT="$HOME/.pyenv"
   - export PATH="$PYENV_ROOT/bin:$PATH"
   - eval "$(pyenv init -)"
-  - git clone https://git.ufz.de/rdm/dios ~/dios
-  - export PYTHONPATH="$HOME/dios":$PYTHONPATH
 
 
 test:python36:
@@ -41,3 +42,21 @@ test:python38:
     - pip install -r requirements.txt
     - python -m pytest test
     - python -m saqc --config ressources/data/config_ci.csv --data ressources/data/data.csv --outfile /tmp/test.csv
+
+# Make html docu with sphinx
+pages:
+  stage: deploy
+  script:
+    - pyenv install 3.6.9
+    - pyenv shell 3.6.9
+    - pip install --upgrade pip
+    - pip install -r requirements.txt
+    - cd sphinx-doc/
+    - pip install -r requirements_sphinx.txt
+    - make html
+    - cp -r _build/html ../public
+  artifacts:
+    paths:
+      - public
+  only:
+    - develop
diff --git a/.gitmodules b/.gitmodules
index 68397aa969304a3dcc129f93735b949971fc9a89..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +0,0 @@
-[submodule "dios"]
-	path = dios
-	url = https://git.ufz.de/rdm/dios.git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a5fd5fe9f103e900ac7051000dafa55a49d2edc0..9120d74be275a9fcb709f60ecd752c3facfbe1b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,7 +67,6 @@ coming soon ...
 - added the data processing module `proc_functions`
 - `flagCrossValidation` implemented
 
-
 ## Bugfixes
 - `spikes_flagRaise` - overestimation of value courses average fixed
 - `spikes_flagRaise` - raise check window now closed on both sides
@@ -76,5 +75,7 @@ coming soon ...
 - renamed `spikes_oddWater` to `spikes_flagMultivarScores`
 - added STRAY auto treshing algorithm to `spikes_flagMultivarScores`
 - added "unflagging" - postprocess to `spikes_flagMultivarScores`
+- improved and extended masking
 
 ## Breaking Changes
+- register is now a decorator instead of a wrapper
diff --git a/dios b/dios
deleted file mode 160000
index e9a80225b02799fa668882149a39f4a734b4f280..0000000000000000000000000000000000000000
--- a/dios
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit e9a80225b02799fa668882149a39f4a734b4f280
diff --git a/docs/ConfigurationFiles.md b/docs/ConfigurationFiles.md
index b660206945cf20993f7c0d8ad9b6dc3a19fc8122..64743bb40d211db2a51adfb5ad07c64bbcbdd7be 100644
--- a/docs/ConfigurationFiles.md
+++ b/docs/ConfigurationFiles.md
@@ -1,8 +1,8 @@
 # Configuration Files
-The behaviour of SaQC is completely controlled by a text based configuration file.
+The behaviour of SaQC can be completely controlled by a text based configuration file.
 
 ## Format
-SaQC expects its configuration files to be semicolon-separated text files with a
+SaQC expects configuration files to be semicolon-separated text files with a
 fixed header. Each row of the configuration file lists
 one variable and one or several test functions that are applied on the given variable.
 
@@ -12,11 +12,11 @@ one variable and one or several test functions that are applied on the given var
 The header names are basically fixed, but if you really insist in custom
 configuration headers have a look [here](saqc/core/config.py).
 
-| Name    | Data Type                                    | Description            | Optional |
+| Name    | Data Type                                    | Description            | Required |
 |---------|----------------------------------------------|------------------------|----------|
-| varname | string                                       | name of a variable     | no       |
-| test    | [function notation](#test-function-notation) | test function          | no       |
-| plot    | boolean (`True`/`False`)                     | plot the test's result | yes      |
+| varname | string                                       | name of a variable     | yes      |
+| test    | [function notation](#test-function-notation) | test function          | yes      |
+| plot    | boolean (`True`/`False`)                     | plot the test's result | no       |
 
 
 ### Test function notation
diff --git a/requirements.txt b/requirements.txt
index 5a8333fca668a0c43377f5ae0467bd4140c10209..db2e71d721c465a108648e0725c8c309e88689ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,26 @@
-attrs==19.3.0
+attrs==20.1.0
 Click==7.1.2
 cycler==0.10.0
+dios==0.6.0
 dtw==1.4.0
 kiwisolver==1.2.0
 importlib-metadata==1.7.0
 joblib==0.16.0
-llvmlite==0.31.0
+llvmlite==0.34.0
 mlxtend==0.17.3
-matplotlib==3.3.0
-more-itertools==8.4.0
-numba==0.48.0
+matplotlib==3.3.1
+more-itertools==8.5.0
+numba==0.51.1
 numpy==1.19.1
 outlier==0.2
 utils==1.0.1
 outlier-utils==0.0.3
 packaging==20.4
-pandas==1.0.1
+pandas==1.1.1
 pluggy==0.13.1
 pyparsing==2.4.7
 py==1.9.0
-pyarrow==1.0.0
+pyarrow==1.0.1
 pytest-lazy-fixture==0.6.3
 pytest==6.0.1
 python-dateutil==2.8.1
diff --git a/ressources/data/config_ci.csv b/ressources/data/config_ci.csv
index f613a9385629b662fe18293ee5ea5de015dbbb98..f631338ade105552e37c61d16ea72aab50dab106 100644
--- a/ressources/data/config_ci.csv
+++ b/ressources/data/config_ci.csv
@@ -1,6 +1,6 @@
 varname;test;plot
 SM2;harm_shift2Grid(freq="15Min");False
-SM1;flagRange(min=10, max=60);False
+'.*';flagRange(min=10, max=60);False
 SM2;flagMissing(nodata=NAN);False
 SM2;flagRange(min=10, max=60);False
 SM2;spikes_flagMad(window="30d", z=3.5);False
diff --git a/saqc/__main__.py b/saqc/__main__.py
index 65e7c868643e681eb3e066198f2b34a1f6fdde15..e8acb5aff3864117f723be35a8ee3a7f2bc13260 100644
--- a/saqc/__main__.py
+++ b/saqc/__main__.py
@@ -57,6 +57,7 @@ def main(config, data, flagger, outfile, nodata, log_level, fail):
 
     data_result, flagger_result = saqc.readConfig(config).getResult()
 
+    return
     if outfile:
         data_result = data_result.to_df()
         flags = flagger_result.getFlags().to_df()
diff --git a/saqc/core/core.py b/saqc/core/core.py
index d53d2b5a137164f7df8cdb89ef591cc07da33d78..0350f2e6b9faf1ad8d0792b52fd60a93d66e3674 100644
--- a/saqc/core/core.py
+++ b/saqc/core/core.py
@@ -9,7 +9,7 @@ TODOS:
 
 import logging
 from copy import deepcopy
-from typing import List, Tuple
+from typing import Any, Dict, List
 
 import pandas as pd
 import dios
@@ -18,20 +18,26 @@ import timeit
 
 from saqc.lib.plotting import plotHook, plotAllHook
 from saqc.lib.tools import isQuoted
-from saqc.core.register import FUNC_MAP, SaQCFunc
 from saqc.core.reader import readConfig
 from saqc.flagger import BaseFlagger, CategoricalFlagger, SimpleFlagger, DmpFlagger
+from saqc.core.register import FUNC_MAP
 
 
 logger = logging.getLogger("SaQC")
 
 
-def _handleErrors(exc, func, field, policy):
+def _handleErrors(exc, func_dump, policy):
+    func_name = func_dump['func_name']
+    func_kws = func_dump['func_kws']
+    field = func_dump['field']
+    ctrl_kws = func_dump['ctrl_kws']
+    lineno = ctrl_kws['lineno']
+    func_expr = ctrl_kws['lineno']
     msg = f"Execution failed. Variable: '{field}', "
-    if func.lineno is not None and func.expr is not None:
-        msg += f"Config line {func.lineno}: '{func.expr}', "
+    if lineno is not None and func_expr is not None:
+        msg += f"Config line {lineno}: '{func_expr}', "
     else:
-        msg += f"Function: {func.func.__name__ }(), parameters: '{func.kwargs}', "
+        msg += f"Function: {func_name}(), parameters: '{func_kws}', "
     msg += f"Exception:\n{type(exc).__name__}: {exc}"
 
     if policy == "ignore":
@@ -54,6 +60,9 @@ def _prepInput(flagger, data, flags):
             raise TypeError("data should not use MultiIndex")
         data = dios.to_dios(data)
 
+    if not hasattr(data.columns, "str"):
+        raise TypeError("expected dataframe columns of type string")
+
     if not isinstance(flagger, BaseFlagger):
         # NOTE: we should generate that list automatically,
         #       it won't ever be complete otherwise
@@ -103,7 +112,7 @@ class SaQC:
         self._flagger = self._initFlagger(data, flagger, flags)
         self._error_policy = error_policy
         # NOTE: will be filled by calls to `_wrap`
-        self._to_call: List[Tuple[str, SaQCFunc]] = []
+        self._to_call: List[Dict[str, Any]] = []  # todo fix the access everywhere
 
     def _initFlagger(self, data, flagger, flags):
         """ Init the internal flagger object.
@@ -115,9 +124,9 @@ class SaQC:
         # ensure all data columns
         merged = flagger.initFlags(data)
         if flags is not None:
-            merged = merged.merge(flagger.initFlags(flags=flags))
+            merged = merged.merge(flagger.initFlags(flags=flags), inplace=True)
         if flagger.initialized:
-            merged = merged.merge(flagger)
+            merged = merged.merge(flagger, inplace=True)
         return merged
 
     def readConfig(self, fname):
@@ -150,22 +159,27 @@ class SaQC:
         #       method instead of intermingling it with the computation
         data, flagger = self._data, self._flagger
 
-        for field, func in self._to_call:
-            logger.debug(f"processing: {field}, {func.__name__}, {func.kwargs}")
+        for func_dump in self._to_call:
+            func_name = func_dump['func_name']
+            func_kws = func_dump['func_kws']
+            field = func_dump['field']
+            plot = func_dump["ctrl_kws"]["plot"]
+            logger.debug(f"processing: {field}, {func_name}, {func_kws}")
 
             try:
                 t0 = timeit.default_timer()
-                data_result, flagger_result = func(data=data, flagger=flagger, field=field)
+                data_result, flagger_result = _saqcCallFunc(func_dump, data, flagger)
+
             except Exception as e:
                 t1 = timeit.default_timer()
-                logger.debug(f"{func.__name__} failed after {t1-t0} sec")
-                _handleErrors(e, func, field, self._error_policy)
+                logger.debug(f"{func_name} failed after {t1 - t0} sec")
+                _handleErrors(e, func_dump, self._error_policy)
                 continue
             else:
                 t1 = timeit.default_timer()
-                logger.debug(f"{func.__name__} finished after {t1-t0} sec")
+                logger.debug(f"{func_name} finished after {t1 - t0} sec")
 
-            if func.plot:
+            if plot:
                 plotHook(
                     data_old=data,
                     data_new=data_result,
@@ -173,16 +187,20 @@ class SaQC:
                     flagger_new=flagger_result,
                     sources=[],
                     targets=[field],
-                    plot_name=func.__name__,
+                    plot_name=func_name,
                 )
 
             data = data_result
             flagger = flagger_result
 
-        if any([func.plot for _, func in self._to_call]):
+        if any([fdump["ctrl_kws"]["plot"] for fdump in self._to_call]):
             plotAllHook(data, flagger)
 
-        return SaQC(flagger, data, nodata=self._nodata, error_policy=self._error_policy)
+        # This is much faster for big datasets that to throw everything in the constructor.
+        # Simply because of _initFlagger -> merge() -> mergeDios() over all columns.
+        new = SaQC(SimpleFlagger(), dios.DictOfSeries(), nodata=self._nodata, error_policy=self._error_policy)
+        new._flagger, new._data = flagger, data
+        return new
 
     def getResult(self):
         """
@@ -196,24 +214,46 @@ class SaQC:
         realization = self.evaluate()
         return realization._data, realization._flagger
 
-    def _wrap(self, func, lineno=None, expr=None):
-        def inner(field: str, *args, regex: bool = False, to_mask=None, **kwargs):
-
+    def _wrap(self, func_name, lineno=None, expr=None):
+        def inner(field: str, *args, regex: bool = False, to_mask=None, plot=False, inplace=False, **kwargs):
             fields = [field] if not regex else self._data.columns[self._data.columns.str.match(field)]
 
-            if func.__name__ in ("flagGeneric", "procGeneric"):
+            if func_name in ("flagGeneric", "procGeneric"):
                 # NOTE:
                 # We need to pass `nodata` to the generic functions
                 # (to implement stuff like `ismissing`). As we
                 # should not interfere with proper nodata attributes
                 # of other test functions (e.g. `flagMissing`) we
                 # special case the injection
-                kwargs["nodata"] = kwargs.get("nodata", self._nodata)
+                kwargs.setdefault('nodata', self._nodata)
+
+            # to_mask is a control keyword
+            ctrl_kws = {
+                **(FUNC_MAP[func_name]["ctrl_kws"]),
+                'to_mask': to_mask,
+                'plot': plot,
+                'inplace': inplace,
+                'lineno': lineno,
+                'expr': expr
+            }
+            func = FUNC_MAP[func_name]["func"]
+
+            func_dump = {
+                "func_name": func_name,
+                "func": func,
+                "func_args": args,
+                "func_kws": kwargs,
+                "ctrl_kws": ctrl_kws,
+            }
+
+            if inplace:
+                out = self
+            else:
+                out = self.copy()
 
-            out = deepcopy(self)
             for field in fields:
-                f = SaQCFunc(func, *args, lineno=lineno, expression=expr, to_mask=to_mask, **kwargs)
-                out._to_call.append((field, f))
+                dump_copy = {**func_dump, "field": field}
+                out._to_call.append(dump_copy)
             return out
 
         return inner
@@ -222,9 +262,97 @@ class SaQC:
         """
         All failing attribute accesses are redirected to
         __getattr__. We use this mechanism to make the
-        `RegisterFunc`s appear as `SaQC`-methods with
+        registered functions as `SaQC`-methods without
         actually implementing them.
         """
         if key not in FUNC_MAP:
             raise AttributeError(f"no such attribute: '{key}'")
-        return self._wrap(FUNC_MAP[key])
+        return self._wrap(key)
+
+    def copy(self):
+        return deepcopy(self)
+
+
+def _saqcCallFunc(func_dump, data, flagger):
+    func = func_dump['func']
+    func_name = func_dump['func_name']
+    func_args = func_dump['func_args']
+    func_kws = func_dump['func_kws']
+    field = func_dump['field']
+    ctrl_kws = func_dump['ctrl_kws']
+    to_mask = ctrl_kws['to_mask']
+    masking = ctrl_kws['masking']
+
+    if masking == 'all':
+        columns = data.columns
+    elif masking == 'none':
+        columns = []
+    elif masking == 'field':
+        columns = [field]
+    else:
+        raise ValueError(f"masking: {masking}")
+    to_mask = flagger.BAD if to_mask is None else to_mask
+
+    # NOTE:
+    # when assigning new variables to `data`, the respective
+    # field is missing in `flags`, so we add it if necessary in
+    # order to keep the columns from `data` and `flags` in sync.
+    # NOTE:
+    # Also assigning a new variable to `flags` only, is possible.
+    # This is also is handled here.
+    # NOTE:
+    # Any newly assigned column can safely be ignored by masking, thus
+    # this check comes after setting `columns`
+    if field not in flagger.getFlags():
+        flagger = flagger.merge(flagger.initFlags(data=pd.Series(name=field, dtype=np.float64)))
+
+    data_in, mask = _maskData(data, flagger, columns, to_mask)
+    data_result, flagger_result = func(data_in, field, flagger, *func_args, func_name=func_name, **func_kws)
+    data_result = _unmaskData(data, mask, data_result, flagger_result, to_mask)
+
+    return data_result, flagger_result
+
+
+def _maskData(data, flagger, columns, to_mask):
+    # TODO: this is heavily undertested
+    mask = flagger.isFlagged(field=columns, flag=to_mask, comparator='==')
+    data = data.copy()
+    for c in columns:
+        col_mask = mask[c].values
+        if np.any(col_mask):
+            col_data = data[c].values.astype(np.float64)
+            col_data[col_mask] = np.nan
+            data[c] = col_data
+    return data, mask
+
+
+def _unmaskData(data_old, mask_old, data_new, flagger_new, to_mask):
+    # TODO: this is heavily undertested
+
+    # NOTE:
+    # we only need to respect columns, that was masked,
+    # and also are still present in new data.
+    # this throw out:
+    #  - any newly assigned columns
+    #  - columns that wasn't masked, due to masking-kw
+    columns = mask_old.columns.intersection(data_new.columns)
+    mask_new = flagger_new.isFlagged(field=columns, flag=to_mask, comparator="==")
+
+    for col in columns:
+        was_masked = mask_old[col]
+        is_masked = mask_new[col]
+
+        # if index changed we just go with the new data.
+        # A test should use `register(masking='none')` if it changes
+        # the index but, does not want to have all NaNs on flagged locations.
+        if was_masked.index.equals(is_masked.index):
+            mask = was_masked.values & is_masked.values & data_new[col].isna().values
+
+            # reapplying old values on masked positions
+            if np.any(mask):
+                data = np.where(mask, data_new[col].values, data_old[col].values)
+                data_new[col] = pd.Series(data=data, index=is_masked.index)
+
+    return data_new
+
+
diff --git a/saqc/core/reader.py b/saqc/core/reader.py
index 7f0ba93f3b106fb1caad3d90955146beff74704f..512f7f592f807c8ae1562a027e889781fe955a3c 100644
--- a/saqc/core/reader.py
+++ b/saqc/core/reader.py
@@ -33,7 +33,13 @@ def _handleComments(df):
     df.loc[df[F.VARNAME].str.startswith(COMMENT)] = EMPTY
 
     for col in df:
-        df[col] = df[col].str.split(COMMENT, expand=True).iloc[:, 0].str.strip()
+        try:
+            df[col] = df[col].str.split(COMMENT, expand=True).iloc[:, 0].str.strip()
+        except AttributeError:
+            # NOTE:
+            # if `df[col]` is not of type string, we know, that
+            # there are no comments and the `.str` access fails
+            pass
 
     return df
 
@@ -79,7 +85,7 @@ def readConfig(fname):
 
     df[F.VARNAME] = df[F.VARNAME].replace(r"^\s*$", np.nan, regex=True)
     df[F.TEST] = df[F.TEST].replace(r"^\s*$", np.nan, regex=True)
-    df[F.PLOT] = df[F.PLOT].replace({"False": "", EMPTY: ""})
+    df[F.PLOT] = df[F.PLOT].replace({"False": "", EMPTY: "", np.nan: ""})
     df = df.astype({F.PLOT: bool})
     df = _parseConfig(df)
 
diff --git a/saqc/core/register.py b/saqc/core/register.py
index 2863f182556a0a0ca7cd2e0a20ae8c2f73e173b6..15a56868829750d0d0daeef58c05eb7d4bd525a6 100644
--- a/saqc/core/register.py
+++ b/saqc/core/register.py
@@ -1,211 +1,17 @@
 #!/usr/bin/env python
 
-from operator import itemgetter
-from inspect import signature, Parameter, _VAR_POSITIONAL, _VAR_KEYWORD
-from typing import Tuple, Dict, Generator, Any, Set
-
-import numpy as np
-import pandas as pd
-
-
-class Func:
-    """
-    This class is basically extends functool.partial` and in
-    fact, most of the constructor implementation is taken
-    directly from the python standard lib. Not messing with the
-    partial class directly proved to be an easier aproach though.
-
-    Besides the implementation of a partial like functionality
-    `Func` provides a couple of properties/methods used to check
-    the passed arguments before the actual function call happens.
-    """
-
-    def __init__(self, *args, **kwargs):
-        if len(args) < 1:
-            raise TypeError("'Func' takes at least one argument")
-        func, *args = args
-        if not callable(func):
-            raise TypeError("the first argument must be callable")
-
-        if isinstance(func, Func):
-            args = func.args + args
-            kwargs = {**func.kwargs, **kwargs}
-            func = func.func
-
-        self._signature = signature(func)
-        # NOTE:
-        # bind_partial comes with a validity check, so let's use it
-        self._signature.bind_partial(*args, **kwargs)
-
-        self.__name__ = func.__name__
-        self.func = func
-        self.args = args
-        self.kwargs = kwargs
-
-    def __repr__(self):
-        return f"{self.__class__.__name__}({self.__name__}, {self.args}, {self.kwargs})"
-
-    def __call__(self, *args, **kwargs):
-        keywords = {**self.kwargs, **kwargs}
-        return self.func(*self.args, *args, **keywords)
-
-    @property
-    def _parameters(self) -> Generator[Tuple[str, Parameter], None, None]:
-        """
-        yield all 'normal' parameters and their names, skipping
-        VAR_POSITIONALs (*args) and VAR_KEYWORDs (**kwargs) as
-        the don't help evaluating the correctness of the passed
-        arguments.
-        """
-        for k, v in self._signature.parameters.items():
-            if v.kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
-                continue
-            yield k, v
-
-    @property
-    def parameters(self) -> Tuple[str]:
-        """
-        return the names of all parameters, i.e. positional
-        and keyword arguments without varargs
-        """
-        return tuple(map(itemgetter(0), self._parameters))
-
-    @property
-    def optionals(self) -> Tuple[str]:
-        """
-        return the names of all optional parameters without varargs
-        """
-        return tuple(k for k, v in self._parameters if v.default is not Parameter.empty)
-
-    def _getPositionals(self):
-        """
-        return the names of all positional parameters without varargs
-        """
-        return tuple(k for k, v in self._parameters if v.default is Parameter.empty)
-
-    positionals = property(_getPositionals)
-
-    def addGlobals(self, globs: Dict[str, Any]):
-        """
-        Add the given key-value pairs to the function's global
-        scope. We abuse the __globals__ mechanism mainly to
-        make certain other functions (dis-)available within the
-        'Func' body.
-        """
-        self.func.__globals__.update(globs)
-        return self
-
-    def getUnbounds(self) -> Set[str]:
-        """
-        returns all the names of all unbound variables,
-        i.e. not yet `partialed` parameters
-        """
-        return set(self.positionals[len(self.args):]) - set(self.kwargs.keys())
-
-
-class RegisterFunc(Func):
-    """
-    This class acts as a simple wrapper around all registered
-    functions. Currently its sole purpose is to inject additional
-    call arguments
-    """
-
-    def __call__(self, *args, **kwargs):
-        # NOTE:
-        # injecting the function name into the
-        # keywords is sort of hacky
-        kwargs = {"func_name": self.__name__, **kwargs}
-        return super().__call__(*args, **kwargs)
-
-
-class SaQCFunc(Func):
-    """
-    This class represents all test-, process and horminzation functions
-    provided through `SaQC`. Every call to an `SaQC` object will be wrapped
-    with all its non-dynamic arguments.
-
-    `SaQCFunc`s are callable and expose the signature `data`, `field` and
-    `flagger`
-    """
-
-    # NOTE:
-    # we should formalize the function interface somehow, somewhere
-    _injectables = ("data", "field", "flagger")
-
-    def __init__(self, *args, plot=False, lineno=None, expression=None, to_mask=None, **kwargs):
-        super().__init__(*args, **kwargs)
-
-        unbound = self.getUnbounds()
-        if unbound:
-            raise TypeError(f"missing required arguments: {', '.join(unbound)}")
-
-        self.plot = plot
-        self.lineno = lineno
-        self.expr = expression
-        self.to_mask = to_mask
-
-    def _getPositionals(self) -> Tuple[int]:
-        """
-        Returns all positional (i.e. non-optional arguments)
-        without the `data`, `field` and `flagger`
-        """
-        positionals = super()._getPositionals()
-        return tuple(k for k in positionals if k not in self._injectables)
-
-    positionals = property(_getPositionals)
-
-    def __call__(self, data, field, flagger):
-        # NOTE:
-        # when assigning new variables to `data`, the respective
-        # field is missing in `flags`, so we add it if necessary in
-        # order to keep the columns from `data` and `flags` in sync
-        if field not in flagger.getFlags():
-            flagger = flagger.merge(flagger.initFlags(data=pd.Series(name=field)))
-
-        data_in = self._maskData(data, flagger)
-
-        data_result, flagger_result = self.func(data_in, field, flagger, *self.args, **self.kwargs)
-
-        data_result = self._unmaskData(data, flagger, data_result, flagger_result)
-
-        return data_result, flagger_result
-
-    def _maskData(self, data, flagger):
-        to_mask = flagger.BAD if self.to_mask is None else self.to_mask
-        mask = flagger.isFlagged(flag=to_mask, comparator='==')
-        data = data.copy()
-        data[mask] = np.nan
-        return data
-
-    def _unmaskData(self, data_old, flagger_old, data_new, flagger_new):
-        to_mask = flagger_old.BAD if self.to_mask is None else self.to_mask
-        mask_old = flagger_old.isFlagged(flag=to_mask, comparator="==")
-        mask_new = flagger_new.isFlagged(flag=to_mask, comparator="==")
-
-        for col, left in data_new.indexes.iteritems():
-            if col not in mask_old:
-                continue
-            right = mask_old[col].index
-            # NOTE: ignore columns with changed indices (assumption: harmonization)
-            if left.equals(right):
-                # NOTE: Don't overwrite data, that was masked, but is not considered
-                # flagged anymore and also respect newly set data on masked locations.
-                mask = mask_old[col] & mask_new[col] & data_new[col].isna()
-                data_new.loc[mask, col] = data_old.loc[mask, col]
-        return data_new
-
+from typing import Dict, Any
 
 # NOTE:
 # the global SaQC function store,
 # will be filled by calls to register
-FUNC_MAP: Dict[str, RegisterFunc] = {}
+FUNC_MAP: Dict[str, Any] = {}
+
 
+def register(masking='all'):
+    def inner(func):
+        ctrl_kws = dict(masking=masking)
+        FUNC_MAP[func.__name__] = dict(func=func, ctrl_kws=ctrl_kws)
+        return func
+    return inner
 
-def register(func, name=None):
-    if name is None:
-        name = func.__name__
-    else:
-        func.__name__ = name
-    func = RegisterFunc(func)
-    FUNC_MAP[name] = func
-    return func
diff --git a/saqc/core/visitor.py b/saqc/core/visitor.py
index 9ca875d802c507f16eea9bc89cb1fbdabd6f863d..f65d6e0e1f9de28c863391484a05535e516d0658 100644
--- a/saqc/core/visitor.py
+++ b/saqc/core/visitor.py
@@ -153,7 +153,7 @@ class ConfigFunctionParser(ast.NodeVisitor):
             raise NameError(f"unknown function '{func_name}'")
 
         self.generic_visit(node)
-        return FUNC_MAP[func_name]
+        return func_name
 
     def visit_keyword(self, node):
 
diff --git a/saqc/flagger/baseflagger.py b/saqc/flagger/baseflagger.py
index dd3016de9ee711cd7a763f71c10ba18e6fd0148c..2d86d9719096c9b1a9c3ceca756af0cf75003325 100644
--- a/saqc/flagger/baseflagger.py
+++ b/saqc/flagger/baseflagger.py
@@ -8,7 +8,7 @@ from abc import ABC, abstractmethod
 from typing import TypeVar, Union, Any, List, Optional
 
 import pandas as pd
-import dios.dios as dios
+import dios
 
 from saqc.lib.tools import assertScalar, mergeDios, toSequence, mutateIndex
 
@@ -64,22 +64,27 @@ class BaseFlagger(ABC):
         if data is not None:
             if not isinstance(data, diosT):
                 data = dios.DictOfSeries(data)
-            flags = data.copy()
-            flags[:] = self.UNFLAGGED
+
+            flags = dios.DictOfSeries(columns=data.columns)
+            for c in flags.columns:
+                flags[c] = pd.Series(self.UNFLAGGED, index=data[c].index)
+            flags = flags.astype(self.dtype)
         else:
-            if not isinstance(data, diosT):
+            if not isinstance(flags, diosT):
                 flags = dios.DictOfSeries(flags)
 
-        newflagger = self.copy()
-        newflagger._flags = flags.astype(self.dtype)
+        newflagger = self.copy(flags=flags)
         return newflagger
 
-    def rename(self, field: str, new_name: str):
-        newflagger = self.copy()
-        newflagger._flags.columns = mutateIndex(newflagger._flags.columns, field, new_name)
-        return newflagger
+    def rename(self, field: str, new_name: str, inplace=False):
+        if inplace:
+            out = self
+        else:
+            out = self.copy()
+        out._flags.columns = mutateIndex(out._flags.columns, field, new_name)
+        return out
 
-    def merge(self, other: BaseFlaggerT, join: str = "merge"):
+    def merge(self, other: BaseFlaggerT, subset: Optional[List] = None, join: str = "merge", inplace=False):
         """
         Merge the given flagger 'other' into self
         """
@@ -87,19 +92,25 @@ class BaseFlagger(ABC):
         if not isinstance(other, self.__class__):
             raise TypeError(f"flagger of type '{self.__class__}' needed")
 
-        newflagger = self.copy(flags=mergeDios(self.flags, other.flags, join=join))
-        return newflagger
+        if inplace:
+            self._flags = mergeDios(self._flags, other._flags, subset=subset, join=join)
+            return self
+        else:
+            return self.copy(flags=mergeDios(self._flags, other._flags, subset=subset, join=join))
 
-    def slice(self, field: FieldsT = None, loc: LocT = None, drop: FieldsT = None) -> BaseFlaggerT:
+    def slice(self, field: FieldsT = None, loc: LocT = None, drop: FieldsT = None, inplace=False) -> BaseFlaggerT:
         """ Return a potentially trimmed down copy of self. """
         if drop is not None:
             if field is not None:
                 raise TypeError("either 'field' or 'drop' can be given, but not both")
             field = self._flags.columns.drop(drop, errors="ignore")
-        flags = self.getFlags(field=field, loc=loc)
-        flags = dios.to_dios(flags)
-        newflagger = self.copy(flags=flags)
-        return newflagger
+        flags = self.getFlags(field=field, loc=loc).to_dios()
+
+        if inplace:
+            self._flags = flags
+            return self
+        else:
+            return self.copy(flags=flags)
 
     def getFlags(self, field: FieldsT = None, loc: LocT = None) -> PandasT:
         """ Return a potentially, to `loc`, trimmed down version of flags.
@@ -126,9 +137,11 @@ class BaseFlagger(ABC):
             field = slice(None) if field is None else self._check_field(field)
             indexer = (loc, field)
 
-        return self.flags.aloc[indexer]
+        # this is a bug in `dios.aloc`, which may return a shallow copied dios, if `slice(None)` is passed
+        # as row indexer. Thus is because pandas `.loc` return a shallow copy if a null-slice is passed to a series.
+        return self._flags.copy().aloc[indexer]
 
-    def setFlags(self, field: str, loc: LocT = None, flag: FlagT = None, force: bool = False, **kwargs) -> BaseFlaggerT:
+    def setFlags(self, field: str, loc: LocT = None, flag: FlagT = None, force: bool = False, inplace=False, **kwargs) -> BaseFlaggerT:
         """Overwrite existing flags at loc.
 
         If `force=False` (default) only flags with a lower priority are overwritten,
@@ -146,17 +159,21 @@ class BaseFlagger(ABC):
             this = self.getFlags(field=field, loc=loc)
             row_indexer = this < flag
 
-        out = deepcopy(self)
+        if inplace:
+            out = self
+        else:
+            out = deepcopy(self)
+
         out._flags.aloc[row_indexer, field] = flag
         return out
 
-    def clearFlags(self, field: str, loc: LocT = None, **kwargs) -> BaseFlaggerT:
+    def clearFlags(self, field: str, loc: LocT = None, inplace=False, **kwargs) -> BaseFlaggerT:
         assertScalar("field", field, optional=False)
         if "force" in kwargs:
             raise ValueError("Keyword 'force' is not allowed here.")
         if "flag" in kwargs:
             raise ValueError("Keyword 'flag' is not allowed here.")
-        return self.setFlags(field=field, loc=loc, flag=self.UNFLAGGED, force=True, **kwargs)
+        return self.setFlags(field=field, loc=loc, flag=self.UNFLAGGED, force=True, inplace=inplace, **kwargs)
 
     def isFlagged(self, field=None, loc: LocT = None, flag: FlagT = None, comparator: str = ">") -> PandasT:
         """
@@ -211,9 +228,16 @@ class BaseFlagger(ABC):
         return flagged
 
     def copy(self, flags=None) -> BaseFlaggerT:
-        out = deepcopy(self)
-        if flags is not None:
+        if flags is None:
+            out = deepcopy(self)
+        else:
+            # if flags is given and self.flags is big,
+            # this hack will bring some speed improvement
+            saved = self._flags
+            self._flags = None
+            out = deepcopy(self)
             out._flags = flags
+            self._flags = saved
         return out
 
     def isValidFlag(self, flag: FlagT) -> bool:
@@ -241,12 +265,12 @@ class BaseFlagger(ABC):
         # https://git.ufz.de/rdm-software/saqc/issues/46
         failed = []
         if isinstance(field, str):
-            if field not in self.flags:
+            if field not in self._flags:
                 failed += [field]
         else:
             try:
                 for f in field:
-                    if f not in self.flags:
+                    if f not in self._flags:
                         failed += [f]
             # not iterable, probably a slice or
             # any indexer we dont have to check
diff --git a/saqc/flagger/dmpflagger.py b/saqc/flagger/dmpflagger.py
index 4778d384c421992f0d445984de683c3a12f13df9..0a0acd9e37fe6b7a5b52dc5382c1303fda61dd74 100644
--- a/saqc/flagger/dmpflagger.py
+++ b/saqc/flagger/dmpflagger.py
@@ -4,11 +4,11 @@
 import subprocess
 import json
 from copy import deepcopy
-from typing import TypeVar
+from typing import TypeVar, Optional, List
 
 import pandas as pd
 
-import dios.dios as dios
+import dios
 
 from saqc.flagger.categoricalflagger import CategoricalFlagger
 from saqc.lib.tools import assertScalar, mergeDios, mutateIndex
@@ -74,32 +74,38 @@ class DmpFlagger(CategoricalFlagger):
 
         # implicit set self._flags, and make deepcopy of self aka. DmpFlagger
         newflagger = super().initFlags(data=data, flags=flags)
-        newflagger._causes = newflagger.flags.astype(str)
-        newflagger._comments = newflagger.flags.astype(str)
+        newflagger._causes = newflagger._flags.astype(str)
+        newflagger._comments = newflagger._flags.astype(str)
         newflagger._causes[:], newflagger._comments[:] = "", ""
         return newflagger
 
-    def slice(self, field=None, loc=None, drop=None):
-        newflagger = super().slice(field=field, loc=loc, drop=drop)
-        flags = newflagger.flags
+    def slice(self, field=None, loc=None, drop=None, inplace=False):
+        newflagger = super().slice(field=field, loc=loc, drop=drop, inplace=inplace)
+        flags = newflagger._flags
         newflagger._causes = self._causes.aloc[flags, ...]
         newflagger._comments = self._comments.aloc[flags, ...]
         return newflagger
 
-    def rename(self, field: str, new_name: str):
-        newflagger = super().rename(field, new_name)
+    def rename(self, field: str, new_name: str, inplace=False):
+        newflagger = super().rename(field, new_name, inplace=inplace)
         newflagger._causes.columns = newflagger._flags.columns
         newflagger._comments.columns = newflagger._flags.columns
         return newflagger
 
-    def merge(self, other: DmpFlaggerT, join: str = "merge"):
+    def merge(self, other: DmpFlaggerT, subset: Optional[List] = None, join: str = "merge", inplace=False):
         assert isinstance(other, DmpFlagger)
-        out = super().merge(other, join)
-        out._causes = mergeDios(out._causes, other._causes, join=join)
-        out._comments = mergeDios(out._comments, other._comments, join=join)
-        return out
+        flags = mergeDios(self._flags, other._flags, subset=subset, join=join)
+        causes = mergeDios(self._causes, other._causes, subset=subset, join=join)
+        comments = mergeDios(self._comments, other._comments, subset=subset, join=join)
+        if inplace:
+            self._flags = flags
+            self._causes = causes
+            self._comments = comments
+            return self
+        else:
+            return self._construct_new(flags, causes, comments)
 
-    def setFlags(self, field, loc=None, flag=None, force=False, comment="", cause="", **kwargs):
+    def setFlags(self, field, loc=None, flag=None, force=False, comment="", cause="", inplace=False, **kwargs):
         assert "iloc" not in kwargs, "deprecated keyword, iloc"
         assertScalar("field", field, optional=False)
 
@@ -113,8 +119,20 @@ class DmpFlagger(CategoricalFlagger):
             this = self.getFlags(field=field, loc=loc)
             row_indexer = this < flag
 
-        out = deepcopy(self)
+        if inplace:
+            out = self
+        else:
+            out = deepcopy(self)
+
         out._flags.aloc[row_indexer, field] = flag
         out._causes.aloc[row_indexer, field] = cause
         out._comments.aloc[row_indexer, field] = comment
         return out
+
+    def _construct_new(self, flags, causes, comments) -> DmpFlaggerT:
+        new = DmpFlagger()
+        new.project_version = self.project_version
+        new._flags = flags
+        new._causes = causes
+        new._comments = comments
+        return new
diff --git a/saqc/funcs/breaks_detection.py b/saqc/funcs/breaks_detection.py
index d41db0f65b0de15f40c304e284443f24b17d51ac..3dd756860d25368cbe0c7456f14bd9195153e341 100644
--- a/saqc/funcs/breaks_detection.py
+++ b/saqc/funcs/breaks_detection.py
@@ -10,7 +10,7 @@ from saqc.core.register import register
 from saqc.lib.tools import retrieveTrustworthyOriginal
 
 
-@register
+@register(masking='field')
 def breaks_flagSpektrumBased(
     data,
     field,
diff --git a/saqc/funcs/constants_detection.py b/saqc/funcs/constants_detection.py
index aeb5750cfdfd982c21d2522630cf84755d729dde..1ba51474f3e4f95a9fcaf2f3a992215f91512e7c 100644
--- a/saqc/funcs/constants_detection.py
+++ b/saqc/funcs/constants_detection.py
@@ -9,7 +9,7 @@ from saqc.lib.ts_operators import varQC
 from saqc.lib.tools import retrieveTrustworthyOriginal
 
 
-@register
+@register(masking='field')
 def constants_flagBasic(data, field, flagger, thresh, window, **kwargs):
     """
     This functions flags plateaus/series of constant values of length `window` if
@@ -63,7 +63,7 @@ def constants_flagBasic(data, field, flagger, thresh, window, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def constants_flagVarianceBased(
     data, field, flagger, window="12h", thresh=0.0005, max_missing=None, max_consec_missing=None, **kwargs
 ):
diff --git a/saqc/funcs/data_modelling.py b/saqc/funcs/data_modelling.py
index 980b5f7b4fd0b206b995a02a976fc84d9ce9eba2..bc239817ac2b6ec795a40a52f4233178097aa346 100644
--- a/saqc/funcs/data_modelling.py
+++ b/saqc/funcs/data_modelling.py
@@ -13,7 +13,7 @@ from saqc.lib.ts_operators import (
 )
 
 
-@register
+@register(masking='field')
 def modelling_polyFit(data, field, flagger, winsz, polydeg, numba="auto", eval_flags=True, min_periods=0, **kwargs):
     """
     Function fits a polynomial model to the data and returns the residues.
@@ -186,7 +186,7 @@ def modelling_polyFit(data, field, flagger, winsz, polydeg, numba="auto", eval_f
     return data, flagger
 
 
-@register
+@register(masking='field')
 def modelling_rollingMean(data, field, flagger, winsz, eval_flags=True, min_periods=0, center=True, **kwargs):
     """
     Models the data with the rolling mean and returns the residues.
diff --git a/saqc/funcs/functions.py b/saqc/funcs/functions.py
index cb43ade7b8df0e8de07a837bf5227b2eb094a19d..a4b1b08a9ab3a7aa54bf0e4f6d63eda4b0238214 100644
--- a/saqc/funcs/functions.py
+++ b/saqc/funcs/functions.py
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 
 from functools import partial
+from inspect import signature
 
 import numpy as np
 import pandas as pd
@@ -11,13 +12,12 @@ import pywt
 import itertools
 import collections
 from mlxtend.evaluate import permutation_test
-import datetime
-from scipy.stats import linregress
 from scipy.cluster.hierarchy import linkage, fcluster
 
+
 from saqc.lib.tools import groupConsecutives, sesonalMask
 
-from saqc.core.register import register, Func
+from saqc.core.register import register
 from saqc.core.visitor import ENVIRONMENT
 from dios import DictOfSeries
 from typing import Any
@@ -36,12 +36,13 @@ def _execGeneric(flagger, data, func, field, nodata):
     # - field is only needed to translate 'this' parameters
     #    -> maybe we could do the translation on the tree instead
 
-    func = Func(func)
-    for k in func.parameters:
+    sig = signature(func)
+    args = []
+    for k, v in sig.parameters.items():
         k = field if k == "this" else k
         if k not in data:
             raise NameError(f"variable '{k}' not found")
-        func = Func(func, data[k])
+        args.append(data[k])
 
     globs = {
         "isflagged": partial(_dslIsFlagged, flagger),
@@ -53,11 +54,11 @@ def _execGeneric(flagger, data, func, field, nodata):
         "UNFLAGGED": flagger.UNFLAGGED,
         **ENVIRONMENT,
     }
-    func = func.addGlobals(globs)
-    return func()
+    func.__globals__.update(globs)
+    return func(*args)
 
 
-@register
+@register(masking='all')
 def procGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
     """
     generate/process data with generically defined functions.
@@ -126,7 +127,7 @@ def procGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='all')
 def flagGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
     """
     a function to flag a data column by evaluation of a generic expression.
@@ -212,12 +213,14 @@ def flagGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
         raise TypeError(f"generic expression does not return a boolean array")
 
     if flagger.getFlags(field).empty:
-        flagger = flagger.merge(flagger.initFlags(data=pd.Series(name=field, index=mask.index)))
+        flagger = flagger.merge(
+            flagger.initFlags(
+                data=pd.Series(name=field, index=mask.index, dtype=np.float64)))
     flagger = flagger.setFlags(field, mask, **kwargs)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagRange(data, field, flagger, min, max, **kwargs):
     """
     Function flags values not covered by the closed interval [`min`, `max`].
@@ -251,13 +254,12 @@ def flagRange(data, field, flagger, min, max, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='all')
 def flagPattern(data, field, flagger, reference_field, method='dtw', partition_freq="days", partition_offset='0',
                 max_distance=0.03, normalized_distance=True, open_end=True, widths=(1, 2, 4, 8),
                 waveform='mexh', **kwargs):
     """
     Implementation of two pattern recognition algorithms:
-
     - Dynamic Time Warping (dtw) [1]
     - Pattern recognition via wavelets [2]
 
@@ -397,8 +399,7 @@ def flagPattern(data, field, flagger, reference_field, method='dtw', partition_f
     return data, flagger
 
 
-
-@register
+@register(masking='field')
 def flagMissing(data, field, flagger, nodata=np.nan, **kwargs):
     """
     The function flags all values indicating missing data.
@@ -433,7 +434,7 @@ def flagMissing(data, field, flagger, nodata=np.nan, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagSesonalRange(
     data, field, flagger, min, max, startmonth=1, endmonth=12, startday=1, endday=31, **kwargs,
 ):
@@ -487,19 +488,19 @@ def flagSesonalRange(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def clearFlags(data, field, flagger, **kwargs):
     flagger = flagger.clearFlags(field, **kwargs)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def forceFlags(data, field, flagger, flag, **kwargs):
-    flagger = flagger.clearFlags(field).setFlags(field, flag=flag, **kwargs)
+    flagger = flagger.clearFlags(field).setFlags(field, flag=flag, inplace=True, **kwargs)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagIsolated(
     data, field, flagger, gap_window, group_window, **kwargs,
 ):
@@ -561,7 +562,7 @@ def flagIsolated(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagDummy(data, field, flagger, **kwargs):
     """
     Function does nothing but returning data and flagger.
@@ -585,7 +586,7 @@ def flagDummy(data, field, flagger, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagForceFail(data, field, flagger, **kwargs):
     """
     Function raises a runtime error.
@@ -603,7 +604,7 @@ def flagForceFail(data, field, flagger, **kwargs):
     raise RuntimeError("Works as expected :D")
 
 
-@register
+@register(masking='field')
 def flagUnflagged(data, field, flagger, **kwargs):
     """
     Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
@@ -635,7 +636,7 @@ def flagUnflagged(data, field, flagger, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def flagGood(data, field, flagger, **kwargs):
     """
     Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
@@ -663,7 +664,7 @@ def flagGood(data, field, flagger, **kwargs):
     return flagUnflagged(data, field, flagger, **kwargs)
 
 
-@register
+@register(masking='field')
 def flagManual(data, field, flagger, mdata, mflag: Any = 1, method="plain", **kwargs):
     """
     Flag data by given, "manually generated" data.
@@ -790,7 +791,7 @@ def flagManual(data, field, flagger, mdata, mflag: Any = 1, method="plain", **kw
     return data, flagger
 
 
-@register
+@register(masking='all')
 def flagCrossScoring(data, field, flagger, fields, thresh, cross_stat='modZscore', **kwargs):
     """
     Function checks for outliers relatively to the "horizontal" input data axis.
diff --git a/saqc/funcs/harm_functions.py b/saqc/funcs/harm_functions.py
index ee866d16a0bcca51d6a81105391f3cd1e1b55f50..b6205ddbd4470aad97eb73f0b97d0dd0f87b0ed0 100644
--- a/saqc/funcs/harm_functions.py
+++ b/saqc/funcs/harm_functions.py
@@ -19,7 +19,7 @@ from saqc.funcs.proc_functions import (
 
 logger = logging.getLogger("SaQC")
 
-@register
+@register(masking='none')
 def harm_shift2Grid(data, field, flagger, freq, method="nshift", to_drop=None, **kwargs):
     """
     A method to "regularize" data by shifting data points forward/backward to a regular timestamp.
@@ -81,7 +81,7 @@ def harm_shift2Grid(data, field, flagger, freq, method="nshift", to_drop=None, *
     return data, flagger
 
 
-@register
+@register(masking='none')
 def harm_aggregate2Grid(
     data, field, flagger, freq, value_func, flag_func=np.nanmax, method="nagg", to_drop=None, **kwargs
 ):
@@ -162,7 +162,7 @@ def harm_aggregate2Grid(
     return data, flagger
 
 
-@register
+@register(masking='none')
 def harm_linear2Grid(data, field, flagger, freq, to_drop=None, **kwargs):
     """
     A method to "regularize" data by interpolating linearly the data at regular timestamp.
@@ -211,7 +211,7 @@ def harm_linear2Grid(data, field, flagger, freq, to_drop=None, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='none')
 def harm_interpolate2Grid(
     data, field, flagger, freq, method, order=1, to_drop=None, **kwargs,
 ):
@@ -281,7 +281,7 @@ def harm_interpolate2Grid(
     return data, flagger
 
 
-@register
+@register(masking='none')
 def harm_deharmonize(data, field, flagger, method, to_drop=None, **kwargs):
     """
     The Function function "undoes" regularization, by regaining the original data and projecting the
diff --git a/saqc/funcs/proc_functions.py b/saqc/funcs/proc_functions.py
index 85ab704a09aaa6c520cde842f7a3107dab416e79..ac3b19499ca458d918b334fa5fd8a89e6c58d4b4 100644
--- a/saqc/funcs/proc_functions.py
+++ b/saqc/funcs/proc_functions.py
@@ -8,9 +8,8 @@ from saqc.lib.ts_operators import interpolateNANs, aggregate2Freq, shift2Freq, e
 from saqc.lib.tools import toSequence, mergeDios, dropper, mutateIndex
 import dios
 import functools
-import matplotlib.pyplot as plt
 from scipy.optimize import curve_fit
-import pickle
+from sklearn.linear_model import LinearRegression
 
 ORIGINAL_SUFFIX = "_original"
 
@@ -25,7 +24,7 @@ METHOD2ARGS = {
 }
 
 
-@register
+@register(masking='field')
 def proc_rollingInterpolateMissing(
     data, field, flagger, winsz, func=np.median, center=True, min_periods=0, interpol_flag="UNFLAGGED", **kwargs
 ):
@@ -94,7 +93,7 @@ def proc_rollingInterpolateMissing(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_interpolateMissing(
     data,
     field,
@@ -186,19 +185,21 @@ def proc_interpolateMissing(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_interpolateGrid(
-    data,
-    field,
-    flagger,
-    freq,
-    method,
-    inter_order=2,
-    to_drop=None,
-    downgrade_interpolation=False,
-    empty_intervals_flag=None,
-    **kwargs
-):
+        data,
+        field,
+        flagger,
+        freq,
+        method,
+        inter_order=2,
+        to_drop=None,
+        downgrade_interpolation=False,
+        empty_intervals_flag=None,
+        grid_field=None,
+        inter_limit=2,
+        **kwargs):
+
     """
     Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
 
@@ -208,6 +209,11 @@ def proc_interpolateGrid(
     Note, that the function differs from proc_interpolateMissing, by returning a whole new data set, only containing
     samples at the interpolated, equidistant timestamps (of frequency "freq").
 
+    Note, it is possible to interpolate unregular "grids" (with no frequencies). In fact, any date index
+    can be target of the interpolation. Just pass the field name of the variable, holding the index
+    you want to interpolate, to "grid_field". 'freq' is then use to determine the maximum gap size for
+    a grid point to be interpolated.
+
     Parameters
     ----------
     data : dios.DictOfSeries
@@ -235,6 +241,11 @@ def proc_interpolateGrid(
         A Flag, that you want to assign to those values resulting equidistant sample grid, that were not surrounded by
         valid (flagged) data in the original dataset and thus werent interpolated. Default automatically assigns
         flagger.BAD flag to those values.
+    grid_field : String, default None
+        Use the timestamp of another variable as (not nessecarily regular) "grid" to be interpolated.
+    inter_limit : Integer, default 2
+        Maximum number of consecutive Grid values allowed for interpolation. If set
+        to "n", in the result, chunks of "n" consecutive grid values wont be interpolated.
 
     Returns
     -------
@@ -259,8 +270,8 @@ def proc_interpolateGrid(
     datcol.dropna(inplace=True)
     if datcol.empty:
         data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger)
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
         return data, flagger
     # account for annoying case of subsequent frequency aligned values, differing exactly by the margin
     # 2*freq:
@@ -274,9 +285,12 @@ def proc_interpolateGrid(
         spec_case_mask = spec_case_mask.tshift(-1, freq)
 
     # prepare grid interpolation:
-    grid_index = pd.date_range(
-        start=datcol.index[0].floor(freq), end=datcol.index[-1].ceil(freq), freq=freq, name=datcol.index.name
-    )
+    if grid_field is None:
+        grid_index = pd.date_range(start=datcol.index[0].floor(freq), end=datcol.index[-1].ceil(freq), freq=freq,
+                                   name=datcol.index.name)
+    else:
+        grid_index = data[grid_field].index
+
 
     aligned_start = datcol.index[0] == grid_index[0]
     aligned_end = datcol.index[-1] == grid_index[-1]
@@ -284,24 +298,40 @@ def proc_interpolateGrid(
 
     # do the interpolation
     inter_data, chunk_bounds = interpolateNANs(
-        datcol,
-        method,
-        order=inter_order,
-        inter_limit=2,
-        downgrade_interpolation=downgrade_interpolation,
-        return_chunk_bounds=True,
+        datcol, method, order=inter_order, inter_limit=inter_limit, downgrade_interpolation=downgrade_interpolation,
+        return_chunk_bounds=True
     )
 
-    # override falsely interpolated values:
-    inter_data[spec_case_mask.index] = np.nan
+    if grid_field is None:
+        # override falsely interpolated values:
+        inter_data[spec_case_mask.index] = np.nan
 
     # store interpolated grid
-    inter_data = inter_data.asfreq(freq)
+    inter_data = inter_data[grid_index]
     data[field] = inter_data
 
     # flags reshaping (dropping data drops):
     flagscol.drop(flagscol[drop_mask].index, inplace=True)
 
+    if grid_field is not None:
+        # only basic flag propagation supported for custom grids (take worst from preceeding/succeeding)
+        preceeding = flagscol.reindex(grid_index, method='ffill', tolerance=freq)
+        succeeding = flagscol.reindex(grid_index, method='bfill', tolerance=freq)
+        # check for too big gaps in the source data and drop the values interpolated in those too big gaps
+        na_mask = preceeding.isna() | succeeding.isna()
+        na_mask = na_mask[na_mask]
+        preceeding.drop(na_mask.index, inplace=True)
+        succeeding.drop(na_mask.index, inplace=True)
+        inter_data.drop(na_mask.index, inplace=True)
+        data[field] = inter_data
+        mask = succeeding > preceeding
+        preceeding.loc[mask] = succeeding.loc[mask]
+        flagscol = preceeding
+        flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(flagger_new)
+        return data, flagger
+
+    # for freq defined grids, max-aggregate flags of every grid points freq-ranged surrounding
     # hack ahead! Resampling with overlapping intervals:
     # 1. -> no rolling over categories allowed in pandas, so we translate manually:
     cats = pd.CategoricalIndex(flagger.dtype.categories, ordered=True)
@@ -331,17 +361,17 @@ def proc_interpolateGrid(
     if np.isnan(inter_data[-1]) and not aligned_end:
         chunk_bounds = chunk_bounds.append(pd.DatetimeIndex([inter_data.index[-1]]))
     chunk_bounds = chunk_bounds.unique()
-    flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, **kwargs)
+    flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
 
     # block chunk ends of interpolation
     flags_to_block = pd.Series(np.nan, index=chunk_bounds).astype(flagger_new.dtype)
-    flagger_new = flagger_new.setFlags(field, loc=chunk_bounds, flag=flags_to_block, force=True)
+    flagger_new = flagger_new.setFlags(field, loc=chunk_bounds, flag=flags_to_block, force=True, inplace=True)
 
-    flagger = flagger.slice(drop=field).merge(flagger_new)
+    flagger = flagger.slice(drop=field).merge(flagger_new, subset=[field], inplace=True)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_resample(
     data,
     field,
@@ -452,8 +482,8 @@ def proc_resample(
         # for consistency reasons - return empty data/flags column when there is no valid data left
         # after filtering.
         data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger)
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
         return data, flagger
 
     datcol = aggregate2Freq(
@@ -477,12 +507,12 @@ def proc_resample(
 
     # data/flags reshaping:
     data[field] = datcol
-    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, **kwargs)
-    flagger = flagger.slice(drop=field).merge(reshaped_flagger)
+    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_shift(data, field, flagger, freq, method, to_drop=None, empty_intervals_flag=None, **kwargs):
     """
     Function to shift data points to regular (equidistant) timestamps.
@@ -540,8 +570,8 @@ def proc_shift(data, field, flagger, freq, method, to_drop=None, empty_intervals
     datcol.dropna(inplace=True)
     if datcol.empty:
         data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger)
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
         return data, flagger
 
     flagscol.drop(drop_mask[drop_mask].index, inplace=True)
@@ -549,12 +579,12 @@ def proc_shift(data, field, flagger, freq, method, to_drop=None, empty_intervals
     datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
     flagscol = shift2Freq(flagscol, method, freq, fill_value=empty_intervals_flag)
     data[field] = datcol
-    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, **kwargs)
-    flagger = flagger.slice(drop=field).merge(reshaped_flagger)
+    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_transform(data, field, flagger, func, **kwargs):
     """
     Function to transform data columns with a transformation that maps series onto series of the same length.
@@ -588,7 +618,7 @@ def proc_transform(data, field, flagger, func, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_projectFlags(data, field, flagger, method, source, freq=None, to_drop=None, **kwargs):
 
     """
@@ -720,7 +750,7 @@ def proc_projectFlags(data, field, flagger, method, source, freq=None, to_drop=N
     return data, flagger
 
 
-@register
+@register(masking='none')
 def proc_fork(data, field, flagger, suffix=ORIGINAL_SUFFIX, **kwargs):
     """
     The function generates a copy of the data "field" and inserts it under the name field + suffix into the existing
@@ -749,13 +779,13 @@ def proc_fork(data, field, flagger, suffix=ORIGINAL_SUFFIX, **kwargs):
 
     fork_field = str(field) + suffix
     fork_dios = dios.DictOfSeries({fork_field: data[field]})
-    fork_flagger = flagger.slice(drop=data.columns.drop(field)).rename(field, fork_field)
+    fork_flagger = flagger.slice(drop=data.columns.drop(field)).rename(field, fork_field, inplace=True)
     data = mergeDios(data, fork_dios)
     flagger = flagger.merge(fork_flagger)
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_drop(data, field, flagger, **kwargs):
     """
     The function drops field from the data dios and the flagger.
@@ -784,7 +814,7 @@ def proc_drop(data, field, flagger, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def proc_rename(data, field, flagger, new_name, **kwargs):
     """
     The function renames field to new name (in both, the flagger and the data).
@@ -841,7 +871,7 @@ def _drift_fit(x, shift_target, cal_mean):
     return dataFit, dataShift
 
 
-@register
+@register(masking='all')
 def proc_seefoExpDriftCorrecture(data, field, flagger, maint_data_field, cal_mean=5, flag_maint_period=False, **kwargs):
     """
     The function fits an exponential model to chunks of data[field].
@@ -932,4 +962,37 @@ def proc_seefoExpDriftCorrecture(data, field, flagger, maint_data_field, cal_mea
         to_flag = to_flag.drop(to_flag[: maint_data.index[0]].index)
         to_flag = to_flag[to_flag.isna()]
         flagger = flagger.setFlags(field, loc=to_flag, **kwargs)
+
+    data[field] = to_correct
+
     return data, flagger
+
+
+@register
+def proc_seefoLinearDriftCorrecture(data, field, flagger, x_field, y_field, **kwargs):
+    """
+    Train a linear model that predicts data[y_field] by x_1*(data[x_field]) + x_0. (Least squares fit)
+
+    Then correct the data[field] via:
+
+    data[field] = data[field]*x_1 + x_0
+
+    Note, that data[x_field] and data[y_field] must be of equal length.
+    (Also, you might want them to be sampled at same timestamps.)
+
+    Parameters
+    ----------
+    x_field : String
+        Field name of x - data.
+    y_field : String
+        Field name of y - data.
+
+    """
+    data = data.copy()
+    datcol = data[field]
+    reg = LinearRegression()
+    reg.fit(data[x_field].values.reshape(-1,1), data[y_field].values)
+    datcol = (datcol * reg.coef_[0]) + reg.intercept_
+    data[field] = datcol
+    return data, flagger
+
diff --git a/saqc/funcs/soil_moisture_tests.py b/saqc/funcs/soil_moisture_tests.py
index d36f735e24cb4a543d04627f9000db3794ac429e..424b618b87be57d9b7d0a8b63a44f89cb2ce13dc 100644
--- a/saqc/funcs/soil_moisture_tests.py
+++ b/saqc/funcs/soil_moisture_tests.py
@@ -14,7 +14,7 @@ from saqc.core.register import register
 from saqc.lib.tools import retrieveTrustworthyOriginal
 
 
-@register
+@register(masking='field')
 def sm_flagSpikes(
     data,
     field,
@@ -96,7 +96,7 @@ def sm_flagSpikes(
     )
 
 
-@register
+@register(masking='field')
 def sm_flagBreaks(
     data,
     field,
@@ -184,7 +184,7 @@ def sm_flagBreaks(
     )
 
 
-@register
+@register(masking='all')
 def sm_flagFrost(data, field, flagger, soil_temp_variable, window="1h", frost_thresh=0, **kwargs):
 
     """
@@ -248,7 +248,7 @@ def sm_flagFrost(data, field, flagger, soil_temp_variable, window="1h", frost_th
     return data, flagger
 
 
-@register
+@register(masking='all')
 def sm_flagPrecipitation(
     data,
     field,
@@ -383,7 +383,8 @@ def sm_flagPrecipitation(
     flagger = flagger.setFlags(field, loc=invalid_indices.index, **kwargs)
     return data, flagger
 
-@register
+
+@register(masking='field')
 def sm_flagConstants(
     data,
     field,
@@ -523,7 +524,7 @@ def sm_flagConstants(
     return data, flagger
 
 
-@register
+@register(masking='all')
 def sm_flagRandomForest(data, field, flagger, references, window_values: int, window_flags: int, path: str, **kwargs):
     """
     This Function uses pre-trained machine-learning model objects for flagging of a specific variable. The model is
diff --git a/saqc/funcs/spikes_detection.py b/saqc/funcs/spikes_detection.py
index 89d245ae7f93a84d5d681b0ac971b8aefc157ad1..fbfb4289be2ae51c46ced5e6017526961da18ed4 100644
--- a/saqc/funcs/spikes_detection.py
+++ b/saqc/funcs/spikes_detection.py
@@ -16,10 +16,10 @@ from saqc.lib.tools import (
     offset2seconds,
     slidingWindowIndices,
     findIndex,
+    toSequence
 )
 from outliers import smirnov_grubbs
 
-
 def _stray(
     val_frame,
     partition_freq=None,
@@ -28,6 +28,8 @@ def _stray(
     n_neighbors=10,
     iter_start=0.5,
     alpha=0.05,
+    trafo=lambda x: x
+
 ):
     """
     Find outliers in multi dimensional observations.
@@ -97,6 +99,7 @@ def _stray(
     for _, partition in partitions:
         if partition.empty | (partition.shape[0] < partition_min):
             continue
+        partition = partition.apply(trafo)
         sample_size = partition.shape[0]
         nn_neighbors = min(n_neighbors, max(sample_size, 2))
         resids = kNNfunc(partition.values, n_neighbors=nn_neighbors - 1, algorithm="ball_tree")
@@ -180,7 +183,7 @@ def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.
     elif bin_frac in ["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"]:
         binz = np.histogram_bin_edges(resids, bins=bin_frac)
     else:
-        raise ValueError("Cant interpret {} as an binning technique.".format(bin_frac))
+        raise ValueError(f"Can't interpret {bin_frac} as an binning technique.")
 
     binzenters = np.array([0.5 * (binz[i] + binz[i + 1]) for i in range(len(binz) - 1)])
     # inititialize full histogram:
@@ -233,7 +236,8 @@ def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.
 
 
 def _reduceMVflags(
-    val_frame, fields, flagger, to_flag_frame, reduction_range, reduction_drop_flagged=False, reduction_thresh=3.5
+    val_frame, fields, flagger, to_flag_frame, reduction_range, reduction_drop_flagged=False, reduction_thresh=3.5,
+        reduction_min_periods=1
 ):
     """
     Function called by "spikes_flagMultivarScores" to reduce the number of false positives that result from
@@ -266,21 +270,30 @@ def _reduceMVflags(
     ----------
     [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
     """
+
     to_flag_frame[:] = False
     to_flag_index = to_flag_frame.index
     for var in fields:
         for index in enumerate(to_flag_index):
             index_slice = slice(index[1] - pd.Timedelta(reduction_range), index[1] + pd.Timedelta(reduction_range))
 
-            test_slice = val_frame[var][index_slice]
+            test_slice = val_frame[var][index_slice].dropna()
+            # check, wheather value under test is sufficiently centered:
+            first_valid = test_slice.first_valid_index()
+            last_valid = test_slice.last_valid_index()
+            min_range = pd.Timedelta(reduction_range)/4
+            polydeg = 2
+            if ((pd.Timedelta(index[1] - first_valid) < min_range) |
+                (pd.Timedelta(last_valid - index[1]) < min_range)):
+                polydeg = 0
             if reduction_drop_flagged:
-                test_slice = test_slice.drop(to_flag_index, errors="ignore")
-            if not test_slice.empty:
-                x = test_slice.index.values.astype(float)
+                test_slice = test_slice.drop(to_flag_index, errors='ignore')
+            if test_slice.shape[0] >= reduction_min_periods:
+                x = (test_slice.index.values.astype(float))
                 x_0 = x[0]
-                x = (x - x_0) / 10 ** 12
-                polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=2)
-                testval = poly.polyval((float(index[1].to_numpy()) - x_0) / 10 ** 12, polyfitted)
+                x = (x - x_0)/10**12
+                polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
+                testval = poly.polyval((float(index[1].to_numpy()) - x_0)/10**12, polyfitted)
                 testval = val_frame[var][index[1]] - testval
                 resids = test_slice.values - poly.polyval(x, polyfitted)
                 med_resids = np.median(resids)
@@ -294,7 +307,7 @@ def _reduceMVflags(
     return to_flag_frame
 
 
-@register
+@register(masking='all')
 def spikes_flagMultivarScores(
     data,
     field,
@@ -313,6 +326,7 @@ def spikes_flagMultivarScores(
     reduction_range=None,
     reduction_drop_flagged=False,
     reduction_thresh=3.5,
+    reduction_min_periods=1,
     **kwargs,
 ):
     """
@@ -445,42 +459,40 @@ def spikes_flagMultivarScores(
     """
 
     # data fransformation/extraction
+    data = data.copy()
+    fields = toSequence(fields)
     val_frame = data[fields]
     val_frame = val_frame.loc[val_frame.index_of("shared")].to_df()
     val_frame.dropna(inplace=True)
-    val_frame = val_frame.apply(trafo)
-
-    if threshing == "stray":
-        to_flag_index = _stray(
-            val_frame,
-            partition_freq=stray_partition,
-            partition_min=stray_partition_min,
-            scoring_method=scoring_method,
-            n_neighbors=n_neighbors,
-            iter_start=iter_start,
-        )
+    if val_frame.empty:
+        return data, flagger
+
+    if threshing == 'stray':
+        to_flag_index = _stray(val_frame,
+                               partition_freq=stray_partition,
+                               partition_min=stray_partition_min,
+                               scoring_method=scoring_method,
+                               n_neighbors=n_neighbors,
+                               iter_start=iter_start,
+                               trafo=trafo)
 
     else:
-        to_flag_index = _expFit(
-            val_frame,
-            scoring_method=scoring_method,
-            n_neighbors=n_neighbors,
-            iter_start=iter_start,
-            alpha=alpha,
-            bin_frac=expfit_binning,
-        )
+        val_frame = val_frame.apply(trafo)
+        to_flag_index = _expFit(val_frame,
+                                scoring_method=scoring_method,
+                                n_neighbors=n_neighbors,
+                                iter_start=iter_start,
+                                alpha=alpha,
+                                bin_frac=expfit_binning)
 
     to_flag_frame = pd.DataFrame({var_name: True for var_name in fields}, index=to_flag_index)
     if post_reduction:
-        to_flag_frame = _reduceMVflags(
-            val_frame,
-            fields,
-            flagger,
-            to_flag_frame,
-            reduction_range,
-            reduction_drop_flagged=reduction_drop_flagged,
-            reduction_thresh=reduction_thresh,
-        )
+        val_frame = data[toSequence(fields)].to_df()
+        to_flag_frame = _reduceMVflags(val_frame, fields, flagger, to_flag_frame, reduction_range,
+                                       reduction_drop_flagged=reduction_drop_flagged,
+                                       reduction_thresh=reduction_thresh,
+                                       reduction_min_periods=reduction_min_periods)
+
 
     for var in fields:
         to_flag_ind = to_flag_frame.loc[:, var]
@@ -490,7 +502,7 @@ def spikes_flagMultivarScores(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def spikes_flagRaise(
     data,
     field,
@@ -651,7 +663,7 @@ def spikes_flagRaise(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def spikes_flagSlidingZscore(
     data, field, flagger, window, offset, count=1, polydeg=1, z=3.5, method="modZ", **kwargs,
 ):
@@ -798,7 +810,7 @@ def spikes_flagSlidingZscore(
     return data, flagger
 
 
-@register
+@register(masking='field')
 def spikes_flagMad(data, field, flagger, window, z=3.5, **kwargs):
 
     """
@@ -857,7 +869,7 @@ def spikes_flagMad(data, field, flagger, window, z=3.5, **kwargs):
     return data, flagger
 
 
-@register
+@register(masking='field')
 def spikes_flagBasic(data, field, flagger, thresh=7, tolerance=0, window="15min", **kwargs):
     """
     A basic outlier test that is designed to work for harmonized and not harmonized data.
@@ -962,7 +974,7 @@ def spikes_flagBasic(data, field, flagger, thresh=7, tolerance=0, window="15min"
     return data, flagger
 
 
-@register
+@register(masking='field')
 def spikes_flagSpektrumBased(
     data,
     field,
@@ -1113,6 +1125,7 @@ def spikes_flagSpektrumBased(
     return data, flagger
 
 
+@register(masking='field')
 def spikes_flagGrubbs(data, field, flagger, winsz, alpha=0.05, min_periods=8, check_lagged=False, **kwargs):
     """
     The function flags values that are regarded outliers due to the grubbs test.
diff --git a/saqc/lib/plotting.py b/saqc/lib/plotting.py
index 9b780d70198b3e6085b6fda21f5524c7d9c619a6..0a9ac3066895e89575a84232dbef171d85ca12f4 100644
--- a/saqc/lib/plotting.py
+++ b/saqc/lib/plotting.py
@@ -5,7 +5,7 @@ import logging
 
 import numpy as np
 import pandas as pd
-import dios.dios as dios
+import dios
 import matplotlib.pyplot as plt
 from typing import List, Dict, Optional
 from saqc.flagger import BaseFlagger
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index c654694eedca4e8e06325ba4a62fb465a0f0d62f..d8383172f5db724609445f0ad2b07ea55fae0a9d 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -7,13 +7,14 @@ from typing import Sequence, Union, Any, Iterator
 import numpy as np
 import numba as nb
 import pandas as pd
-
+import logging
 import dios
-import inspect
+
 
 # from saqc.flagger import BaseFlagger
 from saqc.lib.types import T
 
+logger = logging.getLogger("SaQC")
 
 def assertScalar(name, value, optional=False):
     if (not np.isscalar(value)) and (value is not None) and (optional is True):
@@ -303,12 +304,18 @@ def groupConsecutives(series: pd.Series) -> Iterator[pd.Series]:
         start = stop
 
 
-def mergeDios(left, right, join="merge"):
+def mergeDios(left, right, subset=None, join="merge"):
     # use dios.merge() as soon as it implemented
     # see https://git.ufz.de/rdm/dios/issues/15
 
     merged = left.copy()
-    shared_cols = left.columns.intersection(right.columns)
+    if subset is not None:
+        right_subset_cols = right.columns.intersection(subset)
+    else:
+        right_subset_cols = right.columns
+
+    shared_cols = left.columns.intersection(right_subset_cols)
+
     for c in shared_cols:
         l, r = left[c], right[c]
         if join == "merge":
@@ -323,7 +330,7 @@ def mergeDios(left, right, join="merge"):
             l, r = l.align(r, join=join)
         merged[c] = l.combine_first(r)
 
-    newcols = right.columns.difference(merged.columns)
+    newcols = right_subset_cols.difference(left.columns)
     for c in newcols:
         merged[c] = right[c].copy()
 
@@ -349,3 +356,4 @@ def mutateIndex(index, old_name, new_name):
     index = index.drop(index[pos])
     index = index.insert(pos, new_name)
     return index
+
diff --git a/saqc/lib/types.py b/saqc/lib/types.py
index 0a6b9643bc59c3669f33e5c4f1c332dc225982c1..facebe59987a4d3a74352b7146a0ab2320f8a73f 100644
--- a/saqc/lib/types.py
+++ b/saqc/lib/types.py
@@ -5,7 +5,7 @@ from typing import TypeVar, Union
 
 import numpy as np
 import pandas as pd
-import dios.dios as dios
+import dios
 
 T = TypeVar("T")
 ArrayLike = TypeVar("ArrayLike", np.ndarray, pd.Series, pd.DataFrame)
diff --git a/setup.py b/setup.py
index 28dc64c8ba650fb232fca9cd5eff288ca746dc13..c152bfee6feab1ff885576b1e047d10c7b7b3426 100644
--- a/setup.py
+++ b/setup.py
@@ -25,6 +25,7 @@ setup(
         "pyarrow",
         "python-intervals",
         "astor",
+        "dios"
     ],
     license="GPLv3",
     entry_points={"console_scripts": ["saqc=saqc.__main__:main"],},
diff --git a/sphinx-doc/.gitignore b/sphinx-doc/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..16f05fe5bcb986845d46c131c0e10447581670b2
--- /dev/null
+++ b/sphinx-doc/.gitignore
@@ -0,0 +1,6 @@
+
+_api/
+_build/
+_static/
+*.automodsumm
+_static/*
\ No newline at end of file
diff --git a/sphinx-doc/FlagFunctions.rst b/sphinx-doc/FlagFunctions.rst
new file mode 100644
index 0000000000000000000000000000000000000000..584d0dc5f48d0434a02d86ecbf884e47188d7236
--- /dev/null
+++ b/sphinx-doc/FlagFunctions.rst
@@ -0,0 +1,7 @@
+
+Functions
+=========
+
+.. automodapi:: saqc.funcs
+   :skip: register
+
diff --git a/sphinx-doc/Makefile b/sphinx-doc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..efdfe91d931f29bf94f8b1f08c1b6f3d0661c8ab
--- /dev/null
+++ b/sphinx-doc/Makefile
@@ -0,0 +1,27 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = .
+BUILDDIR      = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile clean
+
+# clean sphinx generated stuff
+clean:
+	rm -rf _build _static _api
+	rm -f *.automodsumm
+	mkdir _static
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
diff --git a/sphinx-doc/conf.py b/sphinx-doc/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..77bdd67bc2a821b18f35e06557be2ea665b6a3c2
--- /dev/null
+++ b/sphinx-doc/conf.py
@@ -0,0 +1,111 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+sys.path.insert(0, os.path.abspath('..'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'SaQC'
+copyright = '2020, Bert Palm, David Schäfer, Peter Lünenschloß, Lennart Schmidt, Juliane Geller'
+author = 'Bert Palm, David Schäfer, Peter Lünenschloß, Lennart Schmidt, Juliane Geller'
+
+# The full version, including alpha/beta/rc tags
+release = 'develop'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    "sphinx.ext.autodoc",
+    "sphinx.ext.autosummary",
+    # "sphinx.ext.doctest",
+    # "sphinx.ext.extlinks",
+    # "sphinx.ext.todo",
+    # "sphinx.ext.intersphinx",
+    # "sphinx.ext.coverage",
+    # "sphinx.ext.mathjax",
+    # "sphinx.ext.ifconfig",
+    "sphinx.ext.autosectionlabel",
+
+    # link source code
+    "sphinx.ext.viewcode",
+
+    # add suupport for NumPy style docstrings
+    "sphinx.ext.napoleon",
+
+    # Doc a whole module
+    # see https://sphinx-automodapi.readthedocs.io/en/latest/
+    'sphinx_automodapi.automodapi',
+    # 'sphinx_automodapi.smart_resolver',
+    # see https://sphinxcontrib-fulltoc.readthedocs.io/en/latest/
+    'sphinxcontrib.fulltoc',
+
+    # Markdown sources support
+    # https://recommonmark.readthedocs.io/en/latest/
+    'recommonmark',
+    # https://github.com/ryanfox/sphinx-markdown-tables
+    'sphinx_markdown_tables',
+]
+
+
+# -- Params of the extensions ------------------------------------------------
+
+numpydoc_show_class_members = False
+
+automodsumm_inherited_members = True
+# write out the files generated by automodapi, mainly for debugging
+automodsumm_writereprocessed = True
+
+automodapi_inheritance_diagram = False
+automodapi_toctreedirnm = '_api'
+autosectionlabel_prefix_document = True
+
+
+# -- Other options -----------------------------------------------------------
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+source_suffix = ['.rst', '.md']
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = "nature"
+
+# use pandas theme
+# html_theme = "pydata_sphinx_theme"
+
+
+# html_theme_options = {
+# }
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
\ No newline at end of file
diff --git a/sphinx-doc/flagger.rst b/sphinx-doc/flagger.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d8536aa3e39c53d92aa11b9057b92ceef84b7535
--- /dev/null
+++ b/sphinx-doc/flagger.rst
@@ -0,0 +1,11 @@
+
+Flagger
+=======
+
+.. automodapi:: saqc.flagger
+   :include-all-objects:
+   :no-heading:
+
+
+
+
diff --git a/sphinx-doc/index.rst b/sphinx-doc/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c8d40fbd90e200486bdc243c67445ccd0e690015
--- /dev/null
+++ b/sphinx-doc/index.rst
@@ -0,0 +1,31 @@
+.. SaQC documentation master file, created by
+   sphinx-quickstart on Mon Aug 17 12:11:29 2020.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to SaQC's documentation!
+================================
+
+Saqc is a great tool to clean data from rubbish.
+
+.. toctree::
+   :hidden:
+
+   Repository <https://git.ufz.de/rdm-software/saqc>
+
+.. toctree::
+   :maxdepth: 2
+
+   flagger
+
+.. toctree::
+   :maxdepth: 2
+
+   FlagFunctions
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/sphinx-doc/make.bat b/sphinx-doc/make.bat
new file mode 100644
index 0000000000000000000000000000000000000000..6247f7e231716482115f34084ac61030743e0715
--- /dev/null
+++ b/sphinx-doc/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/sphinx-doc/requirements_sphinx.txt b/sphinx-doc/requirements_sphinx.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f00da706d00d72a8d9a6c0e73595311889863c1d
--- /dev/null
+++ b/sphinx-doc/requirements_sphinx.txt
@@ -0,0 +1,68 @@
+alabaster==0.7.12
+appdirs==1.4.4
+astor==0.8.1
+attrs==20.1.0
+Babel==2.8.0
+black==20.8b1
+certifi==2020.6.20
+chardet==3.0.4
+Click==7.0
+commonmark==0.9.1
+cycler==0.10.0
+decorator==4.4.2
+dios==0.6.0
+docutils==0.16
+dtw==1.4.0
+idna==2.10
+imagesize==1.2.0
+importlib-metadata==1.5.0
+Jinja2==2.11.2
+joblib==0.14.1
+kiwisolver==1.1.0
+llvmlite==0.34.0
+Markdown==3.2.2
+MarkupSafe==1.1.1
+matplotlib==3.3.1
+mlxtend==0.17.2
+more-itertools==8.5.0
+numba==0.51.1
+numpy==1.19.1
+outlier==0.2
+outlier-utils==0.0.3
+packaging==20.1
+pandas==1.1.1
+pathspec==0.8.0
+pluggy==0.13.1
+py==1.8.1
+pyarrow==1.0.1
+Pygments==2.6.1
+pyparsing==2.4.6
+pytest==5.3.5
+pytest-lazy-fixture==0.6.3
+python-dateutil==2.8.1
+python-intervals==1.10.0
+pytz==2019.3
+PyWavelets==1.1.1
+recommonmark==0.6.0
+regex==2020.7.14
+requests==2.24.0
+scikit-learn==0.22.1
+scipy==1.4.1
+six==1.14.0
+snowballstemmer==2.0.0
+Sphinx==3.2.1
+sphinx-automodapi==0.12
+sphinx-markdown-tables==0.0.15
+sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-devhelp==1.0.2
+sphinxcontrib-fulltoc==1.2.0
+sphinxcontrib-htmlhelp==1.0.3
+sphinxcontrib-jsmath==1.0.1
+sphinxcontrib-qthelp==1.0.3
+sphinxcontrib-serializinghtml==1.1.4
+toml==0.10.1
+typed-ast==1.4.1
+urllib3==1.25.10
+utils==1.0.1
+wcwidth==0.1.8
+zipp==2.2.0
diff --git a/test/core/test_core.py b/test/core/test_core.py
index 21a9624a3d84e2c435a988e00d0fe1deed2be976..f6753980ab7a70fc3c90a6fe21cae37b7c761278 100644
--- a/test/core/test_core.py
+++ b/test/core/test_core.py
@@ -21,7 +21,7 @@ logging.disable(logging.CRITICAL)
 OPTIONAL = [False, True]
 
 
-@register
+@register(masking='field')
 def flagAll(data, field, flagger, **kwargs):
     # NOTE: remember to rename flag -> flag_values
     return data, flagger.setFlags(field=field, flag=flagger.BAD)
@@ -38,10 +38,10 @@ def flags(flagger, data, optional):
         return flagger.initFlags(data[data.columns[::2]])._flags
 
 
-@pytest.mark.skip(reason="does not make sense anymore")
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
 def test_errorHandling(data, flagger):
-    @register
+
+    @register(masking='field')
     def raisingFunc(data, field, flagger, **kwargs):
         raise TypeError
 
@@ -51,6 +51,9 @@ def test_errorHandling(data, flagger):
         # NOTE: should not fail, that's all we are testing here
         SaQC(flagger, data, error_policy=policy).raisingFunc(var1).getResult()
 
+    with pytest.raises(TypeError):
+        SaQC(flagger, data, error_policy='raise').raisingFunc(var1).getResult()
+
 
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
 def test_duplicatedVariable(flagger):
@@ -79,7 +82,7 @@ def test_assignVariable(flagger):
     pdata, pflagger = SaQC(flagger, data).flagAll(var1).flagAll(var2).getResult()
     pflags = pflagger.getFlags()
 
-    assert (pflags.columns == [var1, var2]).all()
+    assert (set(pflags.columns) == {var1, var2})
     assert pflagger.isFlagged(var2).empty
 
 
@@ -99,108 +102,6 @@ def test_dtypes(data, flagger, flags):
     assert dict(flags.dtypes) == dict(pflags.dtypes)
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_masking(data, flagger):
-    """
-    test if flagged values are exluded during the preceding tests
-    """
-    flagger = flagger.initFlags(data)
-    flags = flagger.getFlags()
-    var1 = 'var1'
-    mn = min(data[var1])
-    mx = max(data[var1]) / 2
-
-    qc = SaQC(flagger, data)
-    qc = qc.flagRange(var1, mn, mx)
-    # min is not considered because its the smalles possible value.
-    # if masking works, `data > max` will be masked,
-    # so the following will deliver True for in range (data < max),
-    # otherwise False, like an inverse range-test
-    qc = qc.procGeneric("dummy", func=lambda var1: var1 >= mn)
-
-    pdata, pflagger = qc.getResult()
-    out_of_range = pflagger.isFlagged(var1)
-    in_range = ~out_of_range
-
-    assert (pdata.loc[out_of_range, "dummy"] == False).all()
-    assert (pdata.loc[in_range, "dummy"] == True).all()
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_masking_UnmaskingOnDataChange(data, flagger):
-    """ test if (un)masking works as expected on data-change.
-
-    If the data change in the func, unmasking should respect this changes and
-    should not reapply original data, instead take the new data (and flags) as is.
-    Also if flags change, the data should be taken as is.
-    """
-    FILLER = -9999
-
-    @register
-    def changeData(data, field, flagger, **kwargs):
-        mask = data.isna()
-        data.aloc[mask] = FILLER
-        return data, flagger
-
-    @register
-    def changeFlags(data, field, flagger, **kwargs):
-        mask = data.isna()
-        flagger = flagger.setFlags(field, loc=mask[field], flag=flagger.UNFLAGGED, force=True)
-        return data, flagger
-
-    var = data.columns[0]
-    var_data = data[var]
-    mn, mx = var_data.max() * .25, var_data.max() * .75
-    range_mask = (var_data < mn) | (var_data > mx)
-
-    qc = SaQC(flagger, data)
-    qc = qc.flagRange(var, mn, mx)
-    qcD = qc.changeData(var)
-    qcF = qc.changeFlags(var)
-
-    data, flagger = qcD.getResult()
-    assert (data[var][range_mask] == FILLER).all(axis=None)
-    # only flags change so the data should be still NaN, because
-    # the unmasking was disabled, but the masking indeed was happening
-    data, flagger = qcF.getResult()
-    assert data[var][range_mask].isna().all(axis=None)
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_shapeDiffUnmasking(data, flagger):
-    """ test if (un)masking works as expected on index-change.
-
-    If the index of data (and flags) change in the func, the unmasking,
-    should not reapply original data, instead take the new data (and flags) as is.
-    """
-
-    FILLER = -1111
-
-    @register
-    def pseudoHarmo(data, field, flagger, **kwargs):
-        index = data[field].index.to_series()
-        index.iloc[-len(data[field])//2:] += pd.Timedelta("7.5Min")
-
-        data[field] = pd.Series(data=FILLER, index=index)
-
-        flags = flagger.getFlags()
-        flags[field] = pd.Series(data=flags[field].values, index=index)
-
-        flagger = flagger.initFlags(flags=flags)
-        return data, flagger
-
-    var = data.columns[0]
-    var_data = data[var]
-    mn, mx = var_data.max() * .25, var_data.max() * .75
-
-    qc = SaQC(flagger, data)
-    qc = qc.flagRange(var, mn, mx)
-    qc = qc.pseudoHarmo(var)
-
-    data, flagger = qc.getResult()
-    assert (data[var] == FILLER).all(axis=None)
-
-
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
 def test_plotting(data, flagger):
     """
diff --git a/test/core/test_masking.py b/test/core/test_masking.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe332f28bcb0eec835057213d92f5c465909007a
--- /dev/null
+++ b/test/core/test_masking.py
@@ -0,0 +1,119 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import logging
+
+import pytest
+import pandas as pd
+
+from saqc import SaQC, register
+from test.common import initData, TESTFLAGGER
+
+
+logging.disable(logging.CRITICAL)
+
+
+@pytest.fixture
+def data():
+    return initData(3)
+
+
+@pytest.mark.parametrize("flagger", TESTFLAGGER)
+def test_masking(data, flagger):
+    """
+    test if flagged values are exluded during the preceding tests
+    """
+    flagger = flagger.initFlags(data)
+    var1 = 'var1'
+    mn = min(data[var1])
+    mx = max(data[var1]) / 2
+
+    qc = SaQC(flagger, data)
+    qc = qc.flagRange(var1, mn, mx)
+    # min is not considered because its the smalles possible value.
+    # if masking works, `data > max` will be masked,
+    # so the following will deliver True for in range (data < max),
+    # otherwise False, like an inverse range-test
+    qc = qc.procGeneric("dummy", func=lambda var1: var1 >= mn)
+
+    pdata, pflagger = qc.getResult()
+    out_of_range = pflagger.isFlagged(var1)
+    in_range = ~out_of_range
+
+    assert (pdata.loc[out_of_range, "dummy"] == False).all()
+    assert (pdata.loc[in_range, "dummy"] == True).all()
+
+
+@pytest.mark.parametrize("flagger", TESTFLAGGER)
+def test_masking_UnmaskingOnDataChange(data, flagger):
+    """ test if (un)masking works as expected on data-change.
+
+    If the data change in the func, unmasking should respect this changes and
+    should not reapply original data, instead take the new data (and flags) as is.
+    Also if flags change, the data should be taken as is.
+    """
+    FILLER = -9999
+
+    @register(masking='all')
+    def changeData(data, field, flagger, **kwargs):
+        mask = data.isna()
+        data.aloc[mask] = FILLER
+        return data, flagger
+
+    @register(masking='all')
+    def changeFlags(data, field, flagger, **kwargs):
+        mask = data.isna()
+        flagger = flagger.setFlags(field, loc=mask[field], flag=flagger.UNFLAGGED, force=True)
+        return data, flagger
+
+    var = data.columns[0]
+    var_data = data[var]
+    mn, mx = var_data.max() * .25, var_data.max() * .75
+    range_mask = (var_data < mn) | (var_data > mx)
+
+    qc = SaQC(flagger, data)
+    qc = qc.flagRange(var, mn, mx)
+    qcD = qc.changeData(var)
+    qcF = qc.changeFlags(var)
+
+    data, flagger = qcD.getResult()
+    assert (data[var][range_mask] == FILLER).all(axis=None)
+    # only flags change so the data should be still NaN, because
+    # the unmasking was disabled, but the masking indeed was happening
+    data, flagger = qcF.getResult()
+    assert data[var][range_mask].isna().all(axis=None)
+
+
+@pytest.mark.parametrize("flagger", TESTFLAGGER)
+def test_shapeDiffUnmasking(data, flagger):
+    """ test if (un)masking works as expected on index-change.
+
+    If the index of data (and flags) change in the func, the unmasking,
+    should not reapply original data, instead take the new data (and flags) as is.
+    """
+
+    FILLER = -1111
+
+    @register(masking='none')
+    def pseudoHarmo(data, field, flagger, **kwargs):
+        index = data[field].index.to_series()
+        index.iloc[-len(data[field])//2:] += pd.Timedelta("7.5Min")
+
+        data[field] = pd.Series(data=FILLER, index=index)
+
+        flags = flagger.getFlags()
+        flags[field] = pd.Series(data=flags[field].values, index=index)
+
+        flagger = flagger.initFlags(flags=flags)
+        return data, flagger
+
+    var = data.columns[0]
+    var_data = data[var]
+    mn, mx = var_data.max() * .25, var_data.max() * .75
+
+    qc = SaQC(flagger, data)
+    qc = qc.flagRange(var, mn, mx)
+    qc = qc.pseudoHarmo(var)
+
+    data, flagger = qc.getResult()
+    assert (data[var] == FILLER).all(axis=None)
diff --git a/test/core/test_reader.py b/test/core/test_reader.py
index cb9bd717290c831ae3ae6b6dd38b1c350a220dba..fffb6418b5bacf56e26be7b1c6f04bce8a438fa7 100644
--- a/test/core/test_reader.py
+++ b/test/core/test_reader.py
@@ -6,16 +6,14 @@ from pathlib import Path
 import pytest
 import numpy as np
 import pandas as pd
+import dios
 
-from dios.dios import DictOfSeries
 from saqc.core.config import Fields as F
 from test.common import initData, writeIO
 
 from saqc.core.core import SaQC
 from saqc.flagger import SimpleFlagger
-from saqc.funcs.functions import flagRange, flagDummy
-from saqc.core.register import FUNC_MAP, register, SaQCFunc
-import dios
+from saqc.core.register import FUNC_MAP, register
 
 
 @pytest.fixture
@@ -31,28 +29,10 @@ def test_packagedConfig():
     data_path = path / "data.csv"
 
     data = pd.read_csv(data_path, index_col=0, parse_dates=True,)
-    saqc = SaQC(SimpleFlagger(), DictOfSeries(data)).readConfig(config_path)
+    saqc = SaQC(SimpleFlagger(), dios.DictOfSeries(data)).readConfig(config_path)
     data, flagger = saqc.getResult()
 
 
-def test_configDefaults(data):
-    var1, var2, var3, *_ = data.columns
-
-    header = f"{F.VARNAME};{F.TEST};{F.PLOT}"
-    tests = [
-        (f"{var2};flagRange(min=3, max=6);True", SaQCFunc(flagRange, min=3, max=6, plot=True, lineno=2)),
-        (f"{var3};flagDummy()", SaQCFunc(flagDummy, plot=False, lineno=2)),
-    ]
-
-    for config, expected in tests:
-        fobj = writeIO(header + "\n" + config)
-        saqc = SaQC(SimpleFlagger(), data).readConfig(fobj)
-        result = [func for _, func in saqc._to_call][0]
-        assert result.kwargs == expected.kwargs
-        assert result.lineno == expected.lineno
-        assert result.plot == expected.plot
-
-
 def test_variableRegex(data):
 
     header = f"{F.VARNAME};{F.TEST};{F.PLOT}"
@@ -67,7 +47,7 @@ def test_variableRegex(data):
     for regex, expected in tests:
         fobj = writeIO(header + "\n" + f"{regex} ; flagDummy()")
         saqc = SaQC(SimpleFlagger(), data).readConfig(fobj)
-        result = [field for field, _ in saqc._to_call]
+        result = [f["field"] for f in saqc._to_call]
         assert np.all(result == expected)
 
 
@@ -80,9 +60,9 @@ def test_inlineComments(data):
     pre2        ; flagDummy() # test ; False # test
     """
     saqc = SaQC(SimpleFlagger(), data).readConfig(writeIO(config))
-    result = [func for _, func in saqc._to_call][0]
-    assert result.plot == False
-    assert result.func == FUNC_MAP["flagDummy"].func
+    func_dump = saqc._to_call[0]
+    assert func_dump["ctrl_kws"]["plot"] is False
+    assert func_dump["func"] == FUNC_MAP["flagDummy"]["func"]
 
 
 def test_configReaderLineNumbers(data):
@@ -98,7 +78,7 @@ def test_configReaderLineNumbers(data):
     SM1         ; flagDummy()
     """
     saqc = SaQC(SimpleFlagger(), data).readConfig(writeIO(config))
-    result = [func.lineno for _, func in saqc._to_call]
+    result = [f["ctrl_kws"]["lineno"] for f in saqc._to_call]
     expected = [3, 4, 5, 9]
     assert result == expected
 
@@ -139,7 +119,7 @@ def test_configChecks(data):
     for test, expected in tests:
         fobj = writeIO(header + "\n" + test)
         with pytest.raises(expected):
-            SaQC(SimpleFlagger(), data).readConfig(fobj)
+            SaQC(SimpleFlagger(), data).readConfig(fobj).getResult()
 
 
 def test_supportedArguments(data):
@@ -149,7 +129,7 @@ def test_supportedArguments(data):
 
     # TODO: necessary?
 
-    @register
+    @register(masking='field')
     def func(data, field, flagger, kwarg, **kwargs):
         return data, flagger
 
diff --git a/test/flagger/test_flagger.py b/test/flagger/test_flagger.py
index 94753bf7d5a4c63109e5d23621366edd23bf7b16..ee803ad2baf3ebec29e7f98f499fb640a2afeb82 100644
--- a/test/flagger/test_flagger.py
+++ b/test/flagger/test_flagger.py
@@ -5,7 +5,7 @@ import numpy as np
 import pandas as pd
 from pandas.api.types import is_bool_dtype
 
-import dios.dios as dios
+import dios
 
 from test.common import TESTFLAGGER, initData
 
diff --git a/test/funcs/conftest.py b/test/funcs/conftest.py
index ab891b92e1340d2c5469e51242252acc66264faa..32017f6b002e30cd3c3bd50ab74854d026b56d4e 100644
--- a/test/funcs/conftest.py
+++ b/test/funcs/conftest.py
@@ -2,7 +2,7 @@ import pytest
 import numpy as np
 import pandas as pd
 
-from dios.dios import DictOfSeries
+from dios import DictOfSeries
 
 
 @pytest.fixture
diff --git a/test/funcs/test_data_modelling.py b/test/funcs/test_data_modelling.py
index 9b576cb2e8463d336116878264104320e8f06ddf..9cfe75ff74c2059ac73df43dc802c0b06e1cd3cb 100644
--- a/test/funcs/test_data_modelling.py
+++ b/test/funcs/test_data_modelling.py
@@ -7,8 +7,8 @@
 import pytest
 
 import numpy as np
+import dios
 
-from dios import dios
 from test.common import TESTFLAGGER
 
 from saqc.funcs.data_modelling import modelling_polyFit, modelling_rollingMean
diff --git a/test/funcs/test_generic_config_functions.py b/test/funcs/test_generic_config_functions.py
index 4f4f759903f54f11d96a58720500c1a25c037630..0b36b0f531f396e1f170eff08fa74e27c29a631b 100644
--- a/test/funcs/test_generic_config_functions.py
+++ b/test/funcs/test_generic_config_functions.py
@@ -7,7 +7,7 @@ import pytest
 import numpy as np
 import pandas as pd
 
-from dios.dios import DictOfSeries
+from dios import DictOfSeries
 
 from test.common import TESTFLAGGER, TESTNODATA, initData, writeIO
 from saqc.core.visitor import ConfigFunctionParser
@@ -269,7 +269,7 @@ def test_callableArgumentsUnary(data):
 
     window = 5
 
-    @register
+    @register(masking='field')
     def testFuncUnary(data, field, flagger, func, **kwargs):
         data[field] = data[field].rolling(window=window).apply(func)
         return data, flagger.initFlags(data=data)
@@ -301,7 +301,7 @@ def test_callableArgumentsBinary(data):
     flagger = SimpleFlagger()
     var1, var2 = data.columns[:2]
 
-    @register
+    @register(masking='field')
     def testFuncBinary(data, field, flagger, func, **kwargs):
         data[field] = func(data[var1], data[var2])
         return data, flagger.initFlags(data=data)
diff --git a/test/funcs/test_harm_funcs.py b/test/funcs/test_harm_funcs.py
index facd238316fda544740047ca07f086203030647e..d8825f9689c4c7108b53e9dc22772d203449ab04 100644
--- a/test/funcs/test_harm_funcs.py
+++ b/test/funcs/test_harm_funcs.py
@@ -7,8 +7,8 @@ import pytest
 
 import numpy as np
 import pandas as pd
+import dios
 
-from dios import dios
 from test.common import TESTFLAGGER
 
 from saqc.funcs.harm_functions import (
diff --git a/test/funcs/test_proc_functions.py b/test/funcs/test_proc_functions.py
index 98e462c73d4c0f88ab957fd2e7afdf22619926cd..604c1a5032218ab3e6d567adff571162538a648b 100644
--- a/test/funcs/test_proc_functions.py
+++ b/test/funcs/test_proc_functions.py
@@ -14,6 +14,7 @@ from saqc.funcs.proc_functions import (
     proc_resample,
     proc_transform,
     proc_rollingInterpolateMissing,
+    proc_interpolateGrid
 )
 from saqc.lib.ts_operators import linearInterpolation, polynomialInterpolation
 
@@ -83,3 +84,14 @@ def test_resample(course_5, flagger):
     assert ~np.isnan(data1[field].iloc[0])
     assert np.isnan(data1[field].iloc[1])
     assert np.isnan(data1[field].iloc[2])
+
+@pytest.mark.parametrize("flagger", TESTFLAGGER)
+def test_interpolateGrid(course_5, course_3, flagger):
+    data, _ = course_5()
+    data_grid, characteristics = course_3()
+    data['grid']=data_grid.to_df()
+    #data = dios.DictOfSeries(data)
+    flagger = flagger.initFlags(data)
+    dataInt, *_ = proc_interpolateGrid(data, 'data', flagger, '1h', 'time', grid_field='grid', inter_limit=10)
+
+