diff --git a/saqc/core/register.py b/saqc/core/register.py
index ce88dc4bc665d00cd563a7d14ea1f32a6a22b605..39c1d4b14c0820f989e82672fecd1c9c9e4099c0 100644
--- a/saqc/core/register.py
+++ b/saqc/core/register.py
@@ -49,7 +49,8 @@ def register(masking: MaskingStrT = "all", module: Optional[str] = None):
         # executed if a register-decorated function is called,
         # nevertheless if it is called plain or via `SaQC.func`.
         @wraps(func)
-        def callWrapper(*args, **kwargs):
+        def callWrapper(data, field, flagger, *args, **kwargs):
+            args = data, field, flagger, *args
             args, kwargs, old_state = _preCall(func, args, kwargs, masking, func_name)
             result = func(*args, **kwargs)
             return _postCall(result, old_state)
diff --git a/saqc/flagger/flags.py b/saqc/flagger/flags.py
index 099e3c924b8535fec4cbb21b3080462cfa6bddf0..d40544a9586714a4b70e7104d888d49ab35f4ac1 100644
--- a/saqc/flagger/flags.py
+++ b/saqc/flagger/flags.py
@@ -36,6 +36,12 @@ class _HistAccess:
     def __setitem__(self, key: str, value: Union[History, pd.DataFrame]):
         if not isinstance(value, History):
             value = History(value)
+
+        if not isinstance(value, History):
+            raise TypeError("Not a History")
+
+        History._validate_hist_with_mask(value.hist, value.mask)
+
         self.obj._data[key] = value
         self.obj._cache.pop(key, None)
 
@@ -339,7 +345,9 @@ def initFlagsLike(
     return Flags(result)
 
 
-def applyFunctionOnHistory(flags: Flags, column, hist_func, hist_kws, mask_func, mask_kws, last_column=None):
+def applyFunctionOnHistory(
+        flags: Flags, column, hist_func, hist_kws, mask_func, mask_kws, last_column=None, func_handle_df=False
+):
     """
     Apply function on history.
 
@@ -355,6 +363,7 @@ def applyFunctionOnHistory(flags: Flags, column, hist_func, hist_kws, mask_func,
     mask_func :
     mask_kws :
     last_column :
+    func_handle_df :
 
     Returns
     -------
@@ -363,18 +372,57 @@ def applyFunctionOnHistory(flags: Flags, column, hist_func, hist_kws, mask_func,
     flags = flags.copy()
     history = flags.history[column]
     new_history = History()
-    for pos in history.columns:
-        new_history.hist[pos] = hist_func(history.hist[pos], **hist_kws)
-        new_history.mask[pos] = mask_func(history.mask[pos], **mask_kws)
 
+    if func_handle_df:
+        history.hist = hist_func(history.hist, **hist_kws)
+        history.mask = hist_func(history.mask, **mask_kws)
+
+    else:
+        for pos in history.columns:
+            new_history.hist[pos] = hist_func(history.hist[pos], **hist_kws)
+            new_history.mask[pos] = mask_func(history.mask[pos], **mask_kws)
+
+    # handle unstable state
     if last_column is None:
         new_history.mask.iloc[:, -1:] = True
     else:
+        if isinstance(last_column, str) and last_column == 'dummy':
+            last_column = pd.Series(UNTOUCHED, index=new_history.index, dtype=float)
+
         new_history.append(last_column, force=True)
 
+    # assure a boolean mask
+    new_history.mask = new_history.mask.fillna(False).astype(bool)
+
     flags.history[column] = new_history
     return flags
 
 
+def appendHistory(flags: Flags, column, append_hist):
+    """
+    Function, specialized for used in deharm context.
+
+
+    Parameters
+    ----------
+    flags
+    field
+    source
+    merge_func
+    merge_func_kws
+    last_column
+
+    Returns
+    -------
+
+    """
+    flags = flags.copy()
+    new_history = flags.history[column]
+    for app_k in [k for k in append_hist.columns if k not in new_history.columns]:
+        new_history.hist[app_k] = append_hist.hist[app_k]
+        new_history.mask[app_k] = append_hist.mask[app_k]
+    flags.history[column] = new_history
+    return flags
+
 # for now we keep this name
 Flagger = Flags
diff --git a/saqc/flagger/history.py b/saqc/flagger/history.py
index 72a573bd1e6fca6f0f4c330a68b659b0afdad39b..2acc8f22e7785ad0fc9d00a9b088cfecad9cae51 100644
--- a/saqc/flagger/history.py
+++ b/saqc/flagger/history.py
@@ -333,13 +333,14 @@ class History:
     # validation
     #
 
-    def _validate_hist_with_mask(self, obj: pd.DataFrame, mask: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
+    @staticmethod
+    def _validate_hist_with_mask(obj: pd.DataFrame, mask: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
         """
         check type, columns, index, dtype and if the mask fits the obj.
         """
 
         # check hist
-        self._validate_hist(obj)
+        History._validate_hist(obj)
 
         # check mask
         if not isinstance(mask, pd.DataFrame):
@@ -360,7 +361,8 @@ class History:
 
         return obj, mask
 
-    def _validate_hist(self, obj: pd.DataFrame) -> pd.DataFrame:
+    @staticmethod
+    def _validate_hist(obj: pd.DataFrame) -> pd.DataFrame:
         """
         check type, columns, dtype of obj.
         """
@@ -379,7 +381,8 @@ class History:
 
         return obj
 
-    def _validate_value(self, obj: pd.Series) -> pd.Series:
+    @staticmethod
+    def _validate_value(obj: pd.Series) -> pd.Series:
         """
         index is not checked !
         """
diff --git a/saqc/funcs/breaks.py b/saqc/funcs/breaks.py
index 107c3c3e7f55e4b547d68ace40a4181cef2f72c5..6c394e3e75d12ad3a3a20c114b10164e2e3a03b2 100644
--- a/saqc/funcs/breaks.py
+++ b/saqc/funcs/breaks.py
@@ -53,7 +53,6 @@ def flagMissing(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values may have changed relatively to the flagger input.
     """
-
     datacol = data[field]
     if np.isnan(nodata):
         mask = datacol.isna()
@@ -114,7 +113,6 @@ def flagIsolated(
     --------
     :py:func:`flagMissing`
     """
-
     gap_window = pd.tseries.frequencies.to_offset(gap_window)
     group_window = pd.tseries.frequencies.to_offset(group_window)
 
@@ -166,8 +164,7 @@ def flagJumps(
         Minimum number of periods that have to be present in a window of size `winsz`, so that
         the mean value obtained from that window is regarded valid.
     """
-
-    data, flagger = assignChangePointCluster(
+    return assignChangePointCluster(
         data, field, flagger,
         stat_func=lambda x, y: np.abs(np.mean(x) - np.mean(y)),
         thresh_func=lambda x, y: thresh,
@@ -179,4 +176,3 @@ def flagJumps(
         **kwargs
     )
 
-    return data, flagger
diff --git a/saqc/funcs/changepoints.py b/saqc/funcs/changepoints.py
index 200711da4b87d2382cc27b3298c179f1be5c29e3..7025ad71228801a2dc0a4b4af5cf7db4e4eaaf79 100644
--- a/saqc/funcs/changepoints.py
+++ b/saqc/funcs/changepoints.py
@@ -27,12 +27,12 @@ def flagChangePoints(
         thresh_func: Callable[[np.ndarray, np.ndarray], float],
         bwd_window: FreqString,
         min_periods_bwd: IntegerWindow,
-        fwd_window: Optional[FreqString]=None,
-        min_periods_fwd: Optional[IntegerWindow]=None,
-        closed: Literal["right", "left", "both", "neither"]="both",
-        try_to_jit: bool=True,  # TODO rm, not a user decision
-        reduce_window: FreqString=None,
-        reduce_func: Callable[[np.ndarray, np.ndarray], int]=lambda x, _: x.argmax(),
+        fwd_window: Optional[FreqString] = None,
+        min_periods_fwd: Optional[IntegerWindow] = None,
+        closed: Literal["right", "left", "both", "neither"] = "both",
+        try_to_jit: bool = True,  # TODO rm, not a user decision
+        reduce_window: FreqString = None,
+        reduce_func: Callable[[np.ndarray, np.ndarray], int] = lambda x, _: x.argmax(),
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -85,8 +85,7 @@ def flagChangePoints(
     -------
 
     """
-
-    data, flagger = assignChangePointCluster(
+    return assignChangePointCluster(
         data, field, flagger, stat_func=stat_func, thresh_func=thresh_func,
         bwd_window=bwd_window, min_periods_bwd=min_periods_bwd,
         fwd_window=fwd_window, min_periods_fwd=min_periods_fwd, closed=closed,
@@ -94,7 +93,6 @@ def flagChangePoints(
         reduce_func=reduce_func, flag_changepoints=True, model_by_resids=False,
         assign_cluster=False, **kwargs
     )
-    return data, flagger
 
 
 @register(masking='field', module="changepoints")
@@ -104,18 +102,17 @@ def assignChangePointCluster(
         thresh_func: Callable[[np.array, np.array], float],
         bwd_window: str,
         min_periods_bwd: int,
-        fwd_window: str=None,
-        min_periods_fwd: Optional[int]=None,
-        closed: Literal["right", "left", "both", "neither"]="both",
-        try_to_jit: bool=True,  # TODO: rm, not a user decision
-        reduce_window: str=None,
-        reduce_func: Callable[[np.ndarray, np.ndarray], float]=lambda x, _: x.argmax(),
-        model_by_resids: bool=False,
-        flag_changepoints: bool=False,
-        assign_cluster: bool=True,
+        fwd_window: str = None,
+        min_periods_fwd: Optional[int] = None,
+        closed: Literal["right", "left", "both", "neither"] = "both",
+        try_to_jit: bool = True,  # TODO: rm, not a user decision
+        reduce_window: str = None,
+        reduce_func: Callable[[np.ndarray, np.ndarray], float] = lambda x, _: x.argmax(),
+        model_by_resids: bool = False,
+        flag_changepoints: bool = False,
+        assign_cluster: bool = True,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     Assigns label to the data, aiming to reflect continous regimes of the processes the data is assumed to be
     generated by.
@@ -209,10 +206,13 @@ def assignChangePointCluster(
             try_to_jit = False
             logging.warning('Could not jit passed statistic - omitting jitting!')
 
+    args = data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, check_len
+
     if try_to_jit:
-        stat_arr, thresh_arr = _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, check_len)
+        stat_arr, thresh_arr = _slidingWindowSearchNumba(*args)
     else:
-        stat_arr, thresh_arr = _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, check_len)
+        stat_arr, thresh_arr = _slidingWindowSearch(*args)
+
     result_arr = stat_arr > thresh_arr
 
     if model_by_resids:
@@ -251,7 +251,7 @@ def assignChangePointCluster(
 def _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
     stat_arr = np.zeros(num_val)
     thresh_arr = np.zeros(num_val)
-    for win_i in numba.prange(0, num_val-1):
+    for win_i in numba.prange(0, num_val - 1):
         x = data_arr[bwd_start[win_i]:split[win_i]]
         y = data_arr[split[win_i]:fwd_end[win_i]]
         stat_arr[win_i] = stat_func(x, y)
@@ -262,7 +262,7 @@ def _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, th
 def _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
     stat_arr = np.zeros(num_val)
     thresh_arr = np.zeros(num_val)
-    for win_i in range(0, num_val-1):
+    for win_i in range(0, num_val - 1):
         x = data_arr[bwd_start[win_i]:split[win_i]]
         y = data_arr[split[win_i]:fwd_end[win_i]]
         stat_arr[win_i] = stat_func(x, y)
diff --git a/saqc/funcs/constants.py b/saqc/funcs/constants.py
index 34392fd96f0b5ca58b5e4e00e9124544ae8b0f4b..02327498f6e7e474b76f7df4d6653d4c1fc2b96c 100644
--- a/saqc/funcs/constants.py
+++ b/saqc/funcs/constants.py
@@ -41,7 +41,7 @@ def flagConstants(
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
     field : str
-        The fieldname of the column, holding the data-to-be-flagged.
+        Name of the column, holding the data-to-be-flagged.
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.
     thresh : float
@@ -57,10 +57,9 @@ def flagConstants(
         The flagger object, holding flags and additional informations related to `data`.
         Flags values may have changed, relatively to the flagger input.
     """
-
-    d = data[field]
     if not isinstance(window, str):
         raise TypeError('window must be offset string.')
+    d = data[field]
 
     # min_periods=2 ensures that at least two non-nan values are present
     # in each window and also min() == max() == d[i] is not possible.
@@ -89,7 +88,6 @@ def flagByVariance(
         max_consec_missing: int=None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     Function flags plateaus/series of constant values. Any interval of values y(t),..y(t+n) is flagged, if:
 
@@ -125,21 +123,27 @@ def flagByVariance(
         The flagger object, holding flags and additional informations related to `data`.
         Flags values may have changed, relatively to the flagger input.
     """
-
     dataseries = data[field]
-    data_rate = getFreqDelta(dataseries.index)
 
-    if not data_rate:
+    delta = getFreqDelta(dataseries.index)
+    if not delta:
         raise IndexError('Timeseries irregularly sampled!')
+
     if max_missing is None:
         max_missing = np.inf
+
     if max_consec_missing is None:
         max_consec_missing = np.inf
-    min_periods = int(np.ceil(pd.Timedelta(window) / pd.Timedelta(data_rate)))
 
-    plateaus = dataseries.rolling(window=window, min_periods=min_periods).apply(
-        lambda x: True if varQC(x, max_missing, max_consec_missing) <= thresh else np.nan, raw=False,
-    )
+    min_periods = int(np.ceil(pd.Timedelta(window) / pd.Timedelta(delta)))
+
+    def var_below_thresh(s: pd.Series):
+        if varQC(s, max_missing, max_consec_missing) <= thresh:
+            return True
+        return np.nan
+
+    rolling = dataseries.rolling(window=window, min_periods=min_periods)
+    plateaus = rolling.apply(var_below_thresh, raw=False)
 
     # are there any candidates for beeing flagged plateau-ish
     if plateaus.sum() == 0:
diff --git a/saqc/funcs/curvefit.py b/saqc/funcs/curvefit.py
index 9623d49d10691eaf0116cd27f9587976d3718d89..fc04cbeac17e63b7ed7a25c7d079fb2a9564ca62 100644
--- a/saqc/funcs/curvefit.py
+++ b/saqc/funcs/curvefit.py
@@ -8,24 +8,24 @@ from typing_extensions import Literal
 import numpy as np
 import pandas as pd
 
-
 from dios import DictOfSeries
 
 from saqc.core.register import register
 
 from saqc.lib.tools import getFreqDelta
 from saqc.flagger import Flagger
-from saqc.lib.ts_operators import polyRollerIrregular, polyRollerNumba, polyRoller, polyRollerNoMissingNumba, polyRollerNoMissing
+from saqc.lib.ts_operators import polyRollerIrregular, polyRollerNumba, polyRoller, polyRollerNoMissingNumba, \
+    polyRollerNoMissing
 
 
 @register(masking='field', module="curvefit")
 def fitPolynomial(data: DictOfSeries, field: str, flagger: Flagger,
                   winsz: Union[int, str],
                   polydeg: int,
-                  numba: Literal[True, False, "auto"]="auto",
-                  eval_flags: bool=True,
-                  min_periods: int=0,
-                  return_residues: bool=False,
+                  numba: Literal[True, False, "auto"] = "auto",
+                  eval_flags: bool = True,
+                  min_periods: int = 0,
+                  return_residues: bool = False,
                   **kwargs) -> Tuple[DictOfSeries, Flagger]:
     """
     Function fits a polynomial model to the data and returns the fitted data curve.
@@ -100,7 +100,6 @@ def fitPolynomial(data: DictOfSeries, field: str, flagger: Flagger,
     flagger : saqc.flagger.Flagger
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values may have changed relatively to the flagger input.
-
     """
     # TODO: some (rater large) parts are functional similar to saqc.funcs.rolling.roll
     if data[field].empty:
@@ -127,8 +126,8 @@ def fitPolynomial(data: DictOfSeries, field: str, flagger: Flagger,
         temp = residues.copy()
         for k in centers_iloc.iteritems():
             residues.iloc[k[1]] = temp[k[0]]
-        residues[residues.index[0] : residues.index[centers_iloc[0]]] = np.nan
-        residues[residues.index[centers_iloc[-1]] : residues.index[-1]] = np.nan
+        residues[residues.index[0]: residues.index[centers_iloc[0]]] = np.nan
+        residues[residues.index[centers_iloc[-1]]: residues.index[-1]] = np.nan
     else:
         if isinstance(winsz, str):
             winsz = pd.Timedelta(winsz) // regular
@@ -200,4 +199,3 @@ def fitPolynomial(data: DictOfSeries, field: str, flagger: Flagger,
         flagger[field] = worst
 
     return data, flagger
-
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
index dc2265e8a984a2de0e06c236b11b74151a0a1164..14417c3f0c6ca97fc65282d41e045df55fc77716 100644
--- a/saqc/funcs/drift.py
+++ b/saqc/funcs/drift.py
@@ -21,7 +21,6 @@ from saqc.lib.tools import detectDeviants
 from saqc.lib.types import FreqString, ColumnName, CurveFitter, TimestampColumnName
 from saqc.lib.ts_operators import expModelFunc
 
-
 LinkageString = Literal["single", "complete", "average", "weighted", "centroid", "median", "ward"]
 
 
@@ -33,9 +32,9 @@ def flagDriftFromNorm(
         fields: Sequence[ColumnName],
         segment_freq: FreqString,
         norm_spread: float,
-        norm_frac: float=0.5,
-        metric: Callable[[np.ndarray, np.ndarray], float]=lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
-        linkage_method: LinkageString="single",
+        norm_frac: float = 0.5,
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
+        linkage_method: LinkageString = "single",
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -149,7 +148,7 @@ def flagDriftFromReference(
         fields: Sequence[ColumnName],
         segment_freq: FreqString,
         thresh: float,
-        metric: Callable[[np.ndarray, np.ndarray], float]=lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -193,7 +192,6 @@ def flagDriftFromReference(
     That is, why, the "averaged manhatten metric" is set as the metric default, since it corresponds to the
     averaged value distance, two timeseries have (as opposed by euclidean, for example).
     """
-
     data_to_flag = data[fields].to_df()
     data_to_flag.dropna(inplace=True)
 
@@ -227,13 +225,11 @@ def flagDriftFromScaledNorm(
         fields_scale2: Sequence[ColumnName],
         segment_freq: FreqString,
         norm_spread: float,
-        norm_frac: float=0.5,
-        metric: Callable[[np.ndarray, np.ndarray], float]=lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
-        linkage_method: LinkageString="single",
+        norm_frac: float = 0.5,
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(np.array([x, y]), metric='cityblock') / len(x),
+        linkage_method: LinkageString = "single",
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
-
     """
     The function linearly rescales one set of variables to another set of variables with a different scale and then
     flags value courses that significantly deviate from a group of normal value courses.
@@ -298,7 +294,6 @@ def flagDriftFromScaledNorm(
     Introduction to Hierarchical clustering:
         [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
     """
-
     fields = list(fields_scale1) + list(fields_scale2)
     data_to_flag = data[fields].to_df()
     data_to_flag.dropna(inplace=True)
@@ -343,8 +338,8 @@ def correctExponentialDrift(
         field: ColumnName,
         flagger: Flagger,
         maint_data_field: ColumnName,
-        cal_mean: int=5,
-        flag_maint_period: bool=False,
+        cal_mean: int = 5,
+        flag_maint_period: bool = False,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -420,7 +415,7 @@ def correctExponentialDrift(
     for k in range(0, maint_data.shape[0] - 1):
         # assign group numbers for the timespans in between one maintenance ending and the beginning of the next
         # maintenance time itself remains np.nan assigned
-        drift_frame.loc[maint_data.values[k] : pd.Timestamp(maint_data.index[k + 1]), "drift_group"] = k
+        drift_frame.loc[maint_data.values[k]: pd.Timestamp(maint_data.index[k + 1]), "drift_group"] = k
 
     # define target values for correction
     drift_grouper = drift_frame.groupby("drift_group")
@@ -453,8 +448,9 @@ def correctRegimeAnomaly(
         flagger: Flagger,
         cluster_field: ColumnName,
         model: CurveFitter,
-        regime_transmission: Optional[FreqString]=None,
-        x_date: bool=False
+        regime_transmission: Optional[FreqString] = None,
+        x_date: bool = False,
+        **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
     Function fits the passed model to the different regimes in data[field] and tries to correct
@@ -500,7 +496,6 @@ def correctRegimeAnomaly(
     flagger : saqc.flagger.Flagger
         The flagger object, holding flags and additional Informations related to `data`.
     """
-
     cluster_ser = data[cluster_field]
     unique_successive = pd.unique(cluster_ser.values)
     data_ser = data[field]
@@ -514,10 +509,10 @@ def correctRegimeAnomaly(
     for label, regime in regimes:
         if x_date is False:
             # get seconds data:
-            xdata = (regime.index - regime.index[0]).to_numpy(dtype=float)*10**(-9)
+            xdata = (regime.index - regime.index[0]).to_numpy(dtype=float) * 10 ** (-9)
         else:
             # get seconds from epoch data
-            xdata = regime.index.to_numpy(dtype=float)*10**(-9)
+            xdata = regime.index.to_numpy(dtype=float) * 10 ** (-9)
         ydata = regime.values
         valid_mask = ~np.isnan(ydata)
         if regime_transmission is not None:
@@ -532,7 +527,8 @@ def correctRegimeAnomaly(
         x_mask[label] = valid_mask
 
     first_normal = unique_successive > 0
-    first_valid = np.array([~pd.isna(para_dict[unique_successive[i]]).any() for i in range(0, unique_successive.shape[0])])
+    first_valid = np.array(
+        [~pd.isna(para_dict[unique_successive[i]]).any() for i in range(0, unique_successive.shape[0])])
     first_valid = np.where(first_normal & first_valid)[0][0]
     last_valid = 1
 
@@ -542,7 +538,7 @@ def correctRegimeAnomaly(
             xdata = x_dict[unique_successive[k]]
             ypara = para_dict[unique_successive[k]]
             if k > 0:
-                target_para = para_dict[unique_successive[k-last_valid]]
+                target_para = para_dict[unique_successive[k - last_valid]]
             else:
                 # first regime has no "last valid" to its left, so we use first valid to the right:
                 target_para = para_dict[unique_successive[k + first_valid]]
@@ -568,11 +564,10 @@ def correctOffset(
         normal_spread: float,
         search_winsz: FreqString,
         min_periods: int,
-        regime_transmission: Optional[FreqString]=None,
+        regime_transmission: Optional[FreqString] = None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
-
     Parameters
     ----------
     data : dios.DictOfSeries
@@ -607,7 +602,6 @@ def correctOffset(
         The flagger object, holding flags and additional Informations related to `data`.
 
     """
-
     data, flagger = copy(data, field, flagger, field + '_CPcluster')
     data, flagger = assignChangePointCluster(
         data, field + '_CPcluster', flagger,
@@ -662,9 +656,9 @@ def flagRegimeAnomaly(
         flagger: Flagger,
         cluster_field: ColumnName,
         norm_spread: float,
-        linkage_method: LinkageString="single",
-        metric: Callable[[np.ndarray, np.ndarray], float]=lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
-        norm_frac: float=0.5,
+        linkage_method: LinkageString = "single",
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
+        norm_frac: float = 0.5,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -710,9 +704,7 @@ def flagRegimeAnomaly(
     flagger : saqc.flagger.Flagger
         The flagger object, holding flags and additional informations related to `data`.
         Flags values may have changed, relatively to the flagger input.
-
     """
-
     return assignRegimeAnomaly(
         data, field, flagger,
         cluster_field,
@@ -733,11 +725,11 @@ def assignRegimeAnomaly(
         flagger: Flagger,
         cluster_field: ColumnName,
         norm_spread: float,
-        linkage_method: LinkageString="single",
-        metric: Callable[[np.array, np.array], float]=lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
-        norm_frac: float=0.5,
-        set_cluster: bool=True,
-        set_flags: bool=False,
+        linkage_method: LinkageString = "single",
+        metric: Callable[[np.array, np.array], float] = lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
+        norm_frac: float = 0.5,
+        set_cluster: bool = True,
+        set_flags: bool = False,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -792,9 +784,7 @@ def assignRegimeAnomaly(
     flagger : saqc.flagger.Flagger
         The flagger object, holding flags and additional informations related to `data`.
         Flags values may have changed, relatively to the flagger input.
-
     """
-
     series = data[cluster_field]
     cluster = np.unique(series)
     cluster_dios = DictOfSeries({i: data[field][series == i] for i in cluster})
diff --git a/saqc/funcs/interpolation.py b/saqc/funcs/interpolation.py
index 5f59e21898986c9aeef24a4e0e81f511581183a7..c5d4f0768441b57bb52cd3873f2900abdeced9da 100644
--- a/saqc/funcs/interpolation.py
+++ b/saqc/funcs/interpolation.py
@@ -10,49 +10,57 @@ import pandas as pd
 from dios import DictOfSeries
 
 from saqc.constants import *
-from saqc.core.register import register
+from saqc.core.register import register, isflagged
 from saqc.flagger import Flagger
-
-from saqc.lib.tools import toSequence, evalFreqStr, getDropMask
+from saqc.flagger.flags import applyFunctionOnHistory
 from saqc.lib.ts_operators import interpolateNANs
 
+_SUPPORTED_METHODS = Literal[
+    "linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+    "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"
+]
+
 
 @register(masking='field', module="interpolation")
 def interpolateByRolling(
         data: DictOfSeries, field: str, flagger: Flagger,
         winsz: Union[str, int],
-        func: Callable[[pd.Series], float]=np.median,
-        center: bool=True,
-        min_periods: int=0,
+        func: Callable[[pd.Series], float] = np.median,
+        center: bool = True,
+        min_periods: int = 0,
         flag: float = UNFLAGGED,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
-    Interpolates missing values (nan values present in the data) by assigning them the aggregation result of
-    a window surrounding them.
-
-    Note, that in the current implementation, center=True can only be used with integer window sizes - furthermore
-    note, that integer window sizes can yield screwed aggregation results for not-harmonized or irregular data.
+    Interpolates nan-values in the data by assigning them the aggregation result of the window surrounding them.
 
     Parameters
     ----------
     data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
+        The data container.
+
     field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
+        Name of the column, holding the data-to-be-interpolated.
+
     flagger : saqc.flagger.Flagger
-        A flagger object, holding flags and additional Informations related to `data`.
+        A flagger object, holding flags and additional Information related to `data`.
+
     winsz : int, str
-        The size of the window, the aggregation is computed from. Either counted in periods number (Integer passed),
-        or defined by a total temporal extension (offset String passed).
+        The size of the window, the aggregation is computed from. An integer define the number of periods to be used,
+        an string is interpreted as an offset. ( see `pandas.rolling` for more information).
+        Integer windows may result in screwed aggregations if called on none-harmonized or irregular data.
+
     func : Callable
         The function used for aggregation.
+
     center : bool, default True
-        Wheather or not the window, the aggregation is computed of, is centered around the value to be interpolated.
+        Center the window around the value. Can only be used with integer windows, otherwise it is silently ignored.
+
     min_periods : int
         Minimum number of valid (not np.nan) values that have to be available in a window for its aggregation to be
         computed.
-    flag : float, default UNFLAGGED
+
+    flag : float or None, default UNFLAGGED
         Flag that is to be inserted for the interpolated values. If ``None`` no flags are set.
 
     Returns
@@ -77,7 +85,7 @@ def interpolateByRolling(
         rolled = roller.apply(func)
 
     na_mask = datcol.isna()
-    interpolated = na_mask & ~rolled.isna()
+    interpolated = na_mask & rolled.notna()
     datcol[na_mask] = rolled[na_mask]
     data[field] = datcol
 
@@ -92,47 +100,49 @@ def interpolateInvalid(
         data: DictOfSeries,
         field: str,
         flagger: Flagger,
-        method: Literal["linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric", "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"],
-        inter_order: int=2,
-        inter_limit: int=2,
-        downgrade_interpolation: bool=False,
-        not_interpol_flags=None,
+        method: _SUPPORTED_METHODS,
+        inter_order: int = 2,
+        inter_limit: int = 2,
+        downgrade_interpolation: bool = False,
         flag: float = UNFLAGGED,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     Function to interpolate nan values in the data.
 
     There are available all the interpolation methods from the pandas.interpolate method and they are applicable by
     the very same key words, that you would pass to the ``pd.Series.interpolate``'s method parameter.
 
-    Note, that the `inter_limit` keyword really restricts the interpolation to chunks, not containing more than
-    `inter_limit` successive nan entries.
-
     Parameters
     ----------
     data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
+        The data container.
+
     field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
+        Name of the column, holding the data-to-be-interpolated.
+
     flagger : saqc.flagger.Flagger
-        A flagger object, holding flags and additional Informations related to `data`.
+        A flagger object, holding flags and additional Information related to `data`.
+
     method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
-        The interpolation method you want to apply.
+        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
+        The interpolation method to use.
+
     inter_order : int, default 2
         If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
         order.
+
     inter_limit : int, default 2
-        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated.
+        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
+        interpolation to chunks, containing not more than `inter_limit` successive nan entries.
+
     flag : float or None, default UNFLAGGED
-        Flag that is to be inserted for the interpolated values. If ``None`` no flags are set.
+        Flag that is set for interpolated values. If ``None``, no flags are set at all.
+
     downgrade_interpolation : bool, default False
-        If interpolation can not be performed at `inter_order`, because not enough values are present or the order
-        is not implemented for the passed method, automatically try to interpolate at ``inter_order-1``.
-    not_interpol_flags : None
-        deprecated
+        If `True` and the interpolation can not be performed at current order, retry with a lower order.
+        This can happen, because the chosen ``method`` does not support the passed ``inter_order``, or
+        simply because not enough values are present in a interval.
 
     Returns
     -------
@@ -143,22 +153,15 @@ def interpolateInvalid(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values may have changed relatively to the flagger input.
     """
-
-    data = data.copy()
     inter_data = interpolateNANs(
         data[field],
         method,
         order=inter_order,
         inter_limit=inter_limit,
-        downgrade_interpolation=downgrade_interpolation,
-        return_chunk_bounds=False,
+        downgrade_interpolation=downgrade_interpolation
     )
     interpolated = data[field].isna() & inter_data.notna()
 
-    # TODO: remove with version 2.0
-    if not_interpol_flags is not None:
-        raise ValueError("'not_interpol_flags' is deprecated")
-
     if flag is not None:
         flagger[interpolated, field] = flag
 
@@ -166,77 +169,67 @@ def interpolateInvalid(
     return data, flagger
 
 
-@register(masking='field', module="interpolation")
+def _resampleOverlapping(data: pd.Series, freq: str, fill_value):
+    """TODO: docstring needed"""
+    dtype = data.dtype
+    end = data.index[-1].ceil(freq)
+    data = data.resample(freq).max()
+    data = data.combine(data.shift(1, fill_value=fill_value), max)
+    if end not in data:
+        data.loc[end] = fill_value
+    return data.fillna(fill_value).astype(dtype)
+
+
+@register(masking='none', module="interpolation")
 def interpolateIndex(
         data: DictOfSeries,
         field: str,
         flagger: Flagger,
         freq: str,
-        method: Literal["linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric", "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"],
-        inter_order: int=2,
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
-        downgrade_interpolation: bool=False,
-        empty_intervals_flag: Any=None,
-        grid_field: str=None,
-        inter_limit: int=2,
-        freq_check: Optional[Literal["check", "auto"]]=None,  # TODO: rm not a user decision
+        method: _SUPPORTED_METHODS,
+        inter_order: int = 2,
+        inter_limit: int = 2,
+        downgrade_interpolation: bool = False,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
 
     Note, that the interpolation will only be calculated, for grid timestamps that have a preceding AND a succeeding
     valid data value within "freq" range.
 
-    Note, that the function differs from proc_interpolateMissing, by returning a whole new data set, only containing
-    samples at the interpolated, equidistant timestamps (of frequency "freq").
-
-    Note, it is possible to interpolate unregular "grids" (with no frequencies). In fact, any date index
-    can be target of the interpolation. Just pass the field name of the variable, holding the index
-    you want to interpolate, to "grid_field". 'freq' is then use to determine the maximum gap size for
-    a grid point to be interpolated.
-
     Parameters
     ----------
     data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
+        The data container.
+
     field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
+        Name of the column, holding the data-to-be-interpolated.
+
     flagger : saqc.flagger.Flagger
-        A flagger object, holding flags and additional Informations related to `data`.
+        A flagger object, holding flags and additional Information related to `data`.
+
     freq : str
         An Offset String, interpreted as the frequency of
         the grid you want to interpolate your data at.
+
     method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
         "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
         The interpolation method you want to apply.
-    inter_order : integer, default 2
+
+    inter_order : int, default 2
         If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
         order.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before interpolation - effectively excluding grid points from
-        interpolation, that are only surrounded by values having a flag in them, that is listed in drop flags. Default
-        results in the flaggers *BAD* flag to be the drop_flag.
+
+    inter_limit : int, default 2
+        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
+        interpolation to chunks, containing not more than `inter_limit` successive nan entries.
+
     downgrade_interpolation : bool, default False
-        If interpolation can not be performed at `inter_order` - (not enough values or not implemented at this order) -
-        automatically try to interpolate at order `inter_order` :math:`- 1`.
-    empty_intervals_flag : str, default None
-        A Flag, that you want to assign to those values in the resulting equidistant sample grid, that were not
-        surrounded by valid data in the original dataset, and thus were not interpolated. Default automatically assigns
-        ``BAD`` flag to those values.
-    grid_field : String, default None
-        Use the timestamp of another variable as (not necessarily regular) "grid" to be interpolated.
-    inter_limit : Integer, default 2
-        Maximum number of consecutive Grid values allowed for interpolation. If set
-        to *n*, chunks of *n* and more consecutive grid values, where there is no value in between, wont be
-        interpolated.
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
-          if no uniform sampling rate could be estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+        If `True` and the interpolation can not be performed at current order, retry with a lower order.
+        This can happen, because the chosen ``method`` does not support the passed ``inter_order``, or
+        simply because not enough values are present in a interval.
+
 
     Returns
     -------
@@ -247,123 +240,49 @@ def interpolateIndex(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values and shape may have changed relatively to the flagger input.
     """
-    raise NotImplementedError("currently not available - rewrite needed")
-
-    datcol = data[field]
-    datcol = datcol.copy()
-    flagscol = flagger.getFlags(field)
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-
-    if empty_intervals_flag is None:
-        empty_intervals_flag = BAD
-
-    drop_mask = getDropMask(field, to_drop, flagger, BAD)
-    drop_mask |= flagscol.isna()
-    drop_mask |= datcol.isna()
-    datcol[drop_mask] = np.nan
-    datcol.dropna(inplace=True)
-
-    if datcol.empty:
-        data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+    if data[field].empty:
         return data, flagger
 
-    # account for annoying case of subsequent frequency aligned values,
-    # which differ exactly by the margin of 2*freq
-    spec_case_mask = datcol.index.to_series()
-    spec_case_mask = spec_case_mask - spec_case_mask.shift(1)
-    spec_case_mask = spec_case_mask == 2 * pd.Timedelta(freq)
-    spec_case_mask = spec_case_mask[spec_case_mask]
-    spec_case_mask = spec_case_mask.resample(freq).asfreq().dropna()
+    datcol = data[field].copy()
+
+    start, end = datcol.index[0].floor(freq), datcol.index[-1].ceil(freq)
+    grid_index = pd.date_range(start=start, end=end, freq=freq, name=datcol.index.name)
+
+    flagged = isflagged(flagger[field], kwargs['to_mask'])
+
+    # drop all points that hold no relevant grid information
+    datcol = datcol[~flagged].dropna()
 
-    if not spec_case_mask.empty:
-        spec_case_mask = spec_case_mask.tshift(-1, freq)
+    # account for annoying case of subsequent frequency aligned values,
+    # that differ exactly by the margin of 2*freq
+    gaps = datcol.index[1:] - datcol.index[:-1] == 2 * pd.Timedelta(freq)
+    gaps = datcol.index[1:][gaps]
+    gaps = gaps.intersection(grid_index).shift(-1, freq)
 
     # prepare grid interpolation:
-    if grid_field is None:
-        start, end = datcol.index[0].floor(freq), datcol.index[-1].ceil(freq)
-        grid_index = pd.date_range(start=start, end=end, freq=freq, name=datcol.index.name)
-    else:
-        grid_index = data[grid_field].index
-
-    aligned_start = datcol.index[0] == grid_index[0]
-    aligned_end = datcol.index[-1] == grid_index[-1]
-    datcol = datcol.reindex(datcol.index.join(grid_index, how="outer",))
-
-    # do the interpolation
-    inter_data, chunk_bounds = interpolateNANs(
+    datcol = datcol.reindex(datcol.index.union(grid_index))
+
+    # do the grid interpolation
+    inter_data = interpolateNANs(
         data=datcol,
         method=method,
         order=inter_order,
         inter_limit=inter_limit,
         downgrade_interpolation=downgrade_interpolation,
-        return_chunk_bounds=True
     )
 
     # override falsely interpolated values:
-    if grid_field is None:
-        inter_data[spec_case_mask.index] = np.nan
+    inter_data[gaps] = np.nan
 
     # store interpolated grid
-    inter_data = inter_data[grid_index]
-    data[field] = inter_data
-
-    # flags reshaping (dropping data drops):
-    flagscol.drop(flagscol[drop_mask].index, inplace=True)
-
-    if grid_field is not None:
-        # only basic flag propagation supported for custom grids (take worst from preceeding/succeeding)
-        preceeding = flagscol.reindex(grid_index, method='ffill', tolerance=freq)
-        succeeding = flagscol.reindex(grid_index, method='bfill', tolerance=freq)
-        # check for too big gaps in the source data and drop the values interpolated in those too big gaps
-        na_mask = preceeding.isna() | succeeding.isna()
-        na_mask = na_mask[na_mask]
-        preceeding.drop(na_mask.index, inplace=True)
-        succeeding.drop(na_mask.index, inplace=True)
-        inter_data.drop(na_mask.index, inplace=True)
-        data[field] = inter_data
-        mask = succeeding > preceeding
-        preceeding.loc[mask] = succeeding.loc[mask]
-        flagscol = preceeding
-        flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(flagger_new)
-        return data, flagger
+    data[field] = inter_data[grid_index]
+
+    # do the reshaping on the history
+    flagger = applyFunctionOnHistory(
+        flagger, field,
+        hist_func=_resampleOverlapping, hist_kws=dict(freq=freq, fill_value=UNFLAGGED),
+        mask_func=_resampleOverlapping, mask_kws=dict(freq=freq, fill_value=False),
+        last_column='dummy'
+    )
 
-    # for freq defined grids, max-aggregate flags of every grid points freq-ranged surrounding
-    # hack ahead! Resampling with overlapping intervals:
-    # 1. -> no rolling over categories allowed in pandas, so we translate manually:
-    cats = pd.CategoricalIndex(flagger.dtype.categories, ordered=True)
-    cats_dict = {cats[i]: i for i in range(0, len(cats))}
-    flagscol = flagscol.replace(cats_dict)
-    # 3. -> combine resample+rolling to resample with overlapping intervals:
-    flagscol = flagscol.resample(freq).max()
-    initial = flagscol[0]
-    flagscol = flagscol.rolling(2, center=True, closed="neither").max()
-    flagscol[0] = initial
-    cats_dict = {num: key for (key, num) in cats_dict.items()}
-    flagscol = flagscol.astype(int, errors="ignore").replace(cats_dict)
-    flagscol[flagscol.isna()] = empty_intervals_flag
-
-    # we might miss the flag for interpolated data grids last entry (if we miss it - the datapoint is always nan
-    # - just settling a convention here(resulting GRID should start BEFORE first valid data entry and range to AFTER
-    # last valid data)):
-    if inter_data.shape[0] > flagscol.shape[0]:
-        flagscol = flagscol.append(pd.Series(empty_intervals_flag, index=[datcol.index[-1]]))
-
-    # Additional consistency operation: we have to block first/last interpolated datas flags - since they very
-    # likely represent chunk starts/ends (except data start and or end timestamp were grid-aligned before Grid
-    # interpolation already.)
-    if np.isnan(inter_data[0]) and not aligned_start:
-        chunk_bounds = chunk_bounds.insert(0, inter_data.index[0])
-    if np.isnan(inter_data[-1]) and not aligned_end:
-        chunk_bounds = chunk_bounds.append(pd.DatetimeIndex([inter_data.index[-1]]))
-    chunk_bounds = chunk_bounds.unique()
-    flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-
-    # block chunk ends of interpolation
-    flags_to_block = pd.Series(np.nan, index=chunk_bounds).astype(flagger_new.dtype)
-    flagger_new = flagger_new.setFlags(field, loc=chunk_bounds, flag=flags_to_block, force=True, inplace=True)
-
-    flagger = flagger.slice(drop=field).merge(flagger_new, subset=[field], inplace=True)
     return data, flagger
diff --git a/saqc/funcs/outliers.py b/saqc/funcs/outliers.py
index ab486487d692419535bc2b88bac3107a001cfa68..189995cc58a7c2410bb228b0419e2b6f2c65e9a3 100644
--- a/saqc/funcs/outliers.py
+++ b/saqc/funcs/outliers.py
@@ -29,14 +29,14 @@ import saqc.lib.ts_operators as ts_ops
 
 @register(masking='field', module="outliers")
 def flagByStray(
-    data: DictOfSeries,
-    field: ColumnName,
-    flagger: Flagger,
-    partition_freq: Optional[Union[IntegerWindow, FreqString]]=None,
-    partition_min: int=11,
-    iter_start: float=0.5,
-    alpha: float=0.05,
-    **kwargs
+        data: DictOfSeries,
+        field: ColumnName,
+        flagger: Flagger,
+        partition_freq: Optional[Union[IntegerWindow, FreqString]] = None,
+        partition_min: int = 11,
+        iter_start: float = 0.5,
+        alpha: float = 0.05,
+        **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
     Flag outliers in 1-dimensional (score) data with the STRAY Algorithm.
@@ -132,11 +132,11 @@ def _evalStrayLabels(
         field: str,
         flagger: Flagger,
         fields: Sequence[str],
-        reduction_range: Optional[str]=None,
-        reduction_drop_flagged: bool=False,
-        reduction_thresh: float=3.5,
-        reduction_min_periods: int=1,
-        at_least_one: bool=True,
+        reduction_range: Optional[str] = None,
+        reduction_drop_flagged: bool = False,  # TODO: still a case ?
+        reduction_thresh: float = 3.5,
+        reduction_min_periods: int = 1,
+        at_least_one: bool = True,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -197,7 +197,7 @@ def _evalStrayLabels(
             # check, wheather value under test is sufficiently centered:
             first = test_slice.first_valid_index()
             last = test_slice.last_valid_index()
-            min_range = pd.Timedelta(reduction_range)/4
+            min_range = pd.Timedelta(reduction_range) / 4
 
             if pd.Timedelta(index[1] - first) < min_range or pd.Timedelta(last - index[1]) < min_range:
                 polydeg = 0
@@ -213,11 +213,11 @@ def _evalStrayLabels(
 
             x = (test_slice.index.values.astype(float))
             x_0 = x[0]
-            x = (x - x_0)/10**12
+            x = (x - x_0) / 10 ** 12
 
             polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
 
-            testval = poly.polyval((float(index[1].to_numpy()) - x_0)/10**12, polyfitted)
+            testval = poly.polyval((float(index[1].to_numpy()) - x_0) / 10 ** 12, polyfitted)
             testval = val_frame[var][index[1]] - testval
 
             resids = test_slice.values - poly.polyval(x, polyfitted)
@@ -316,7 +316,7 @@ def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.
     upper_tail_index = int(np.floor(0.5 * hist_argmax + 0.5 * iter_max_bin_index))
     resids_tail_index = findIndex(resids, binz[upper_tail_index], 0)
     upper_tail_hist, bins = np.histogram(
-        resids[resids_tail_index:iter_index], bins=binz[upper_tail_index : iter_max_bin_index + 1]
+        resids[resids_tail_index:iter_index], bins=binz[upper_tail_index: iter_max_bin_index + 1]
     )
 
     while (test_val < crit_val) & (iter_index < resids.size - 1):
@@ -331,7 +331,7 @@ def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.
             upper_tail_hist[-1] += 1
             iter_max_bin_index = new_iter_max_bin_index
             upper_tail_index_new = int(np.floor(0.5 * hist_argmax + 0.5 * iter_max_bin_index))
-            upper_tail_hist = upper_tail_hist[upper_tail_index_new - upper_tail_index :]
+            upper_tail_hist = upper_tail_hist[upper_tail_index_new - upper_tail_index:]
             upper_tail_index = upper_tail_index_new
 
         # fitting
@@ -355,18 +355,18 @@ def flagMVScores(
         field: ColumnName,
         flagger: Flagger,
         fields: Sequence[ColumnName],
-        trafo: Callable[[pd.Series], pd.Series]=lambda x: x,
-        alpha: float=0.05,
-        n_neighbors: int=10,
-        scoring_func: Callable[[pd.Series], float]=np.sum,
-        iter_start: float=0.5,
-        stray_partition: Optional[Union[IntegerWindow, FreqString]]=None,
-        stray_partition_min: int=11,
-        trafo_on_partition: bool=True,
-        reduction_range: Optional[FreqString]=None,
-        reduction_drop_flagged: bool=False,
-        reduction_thresh: float=3.5,
-        reduction_min_periods: int=1,
+        trafo: Callable[[pd.Series], pd.Series] = lambda x: x,
+        alpha: float = 0.05,
+        n_neighbors: int = 10,
+        scoring_func: Callable[[pd.Series], float] = np.sum,
+        iter_start: float = 0.5,
+        stray_partition: Optional[Union[IntegerWindow, FreqString]] = None,
+        stray_partition_min: int = 11,
+        trafo_on_partition: bool = True,
+        reduction_range: Optional[FreqString] = None,
+        reduction_drop_flagged: bool = False,  # TODO: still a case ?
+        reduction_thresh: float = 3.5,
+        reduction_min_periods: int = 1,
         **kwargs,
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -509,11 +509,11 @@ def flagRaise(
         thresh: float,
         raise_window: FreqString,
         intended_freq: FreqString,
-        average_window: Optional[FreqString]=None,
-        mean_raise_factor: float=2.,
-        min_slope: Optional[float]=None,
-        min_slope_weight: float=0.8,
-        numba_boost: bool=True,  # TODO: rm, not a user decision
+        average_window: Optional[FreqString] = None,
+        mean_raise_factor: float = 2.,
+        min_slope: Optional[float] = None,
+        min_slope_weight: float = 0.8,
+        numba_boost: bool = True,  # TODO: rm, not a user decision
         **kwargs,
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -624,23 +624,23 @@ def flagRaise(
     # "unflag" values of insufficient deviation to their predecessors
     if min_slope is not None:
         w_mask = (
-            pd.Series(dataseries.index).diff().dt.total_seconds() / intended_freq.total_seconds()
-        ) > min_slope_weight
+                         pd.Series(dataseries.index).diff().dt.total_seconds() / intended_freq.total_seconds()
+                 ) > min_slope_weight
         slope_mask = np.abs(dataseries.diff()) < min_slope
         to_unflag = raise_series.notna() & w_mask.values & slope_mask
         raise_series[to_unflag] = np.nan
 
     # calculate and apply the weighted mean weights (pseudo-harmonization):
     weights = (
-        pd.Series(dataseries.index).diff(periods=2).shift(-1).dt.total_seconds() / intended_freq.total_seconds() / 2
+            pd.Series(dataseries.index).diff(periods=2).shift(-1).dt.total_seconds() / intended_freq.total_seconds() / 2
     )
 
     weights.iloc[0] = 0.5 + (dataseries.index[1] - dataseries.index[0]).total_seconds() / (
-        intended_freq.total_seconds() * 2
+            intended_freq.total_seconds() * 2
     )
 
     weights.iloc[-1] = 0.5 + (dataseries.index[-1] - dataseries.index[-2]).total_seconds() / (
-        intended_freq.total_seconds() * 2
+            intended_freq.total_seconds() * 2
     )
 
     weights[weights > 1.5] = 1.5
@@ -669,7 +669,7 @@ def flagRaise(
 
 @register(masking='field', module="outliers")
 def flagMAD(
-        data: DictOfSeries, field: ColumnName, flagger: Flagger, window: FreqString, z: float=3.5, **kwargs
+        data: DictOfSeries, field: ColumnName, flagger: Flagger, window: FreqString, z: float = 3.5, **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
     The function represents an implementation of the modyfied Z-score outlier detection method.
@@ -733,8 +733,8 @@ def flagOffset(
         thresh: float,
         tolerance: float,
         window: Union[IntegerWindow, FreqString],
-        rel_thresh: Optional[float]=None,
-        numba_kickin: int=200000,  # TODO: rm, not a user decision
+        rel_thresh: Optional[float] = None,
+        numba_kickin: int = 200000,  # TODO: rm, not a user decision
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -798,7 +798,7 @@ def flagOffset(
 
     # using reverted series - because ... long story.
     ind = dataseries.index
-    rev_ind = ind[0] + ((ind[-1]-ind)[::-1])
+    rev_ind = ind[0] + ((ind[-1] - ind)[::-1])
     map_i = pd.Series(ind, index=rev_ind)
     dataseries = pd.Series(dataseries.values, index=rev_ind)
 
@@ -887,9 +887,9 @@ def flagByGrubbs(
         field: ColumnName,
         flagger: Flagger,
         winsz: Union[FreqString, IntegerWindow],
-        alpha: float=0.05,
-        min_periods: int=8,
-        check_lagged: bool=False,
+        alpha: float = 0.05,
+        min_periods: int = 8,
+        check_lagged: bool = False,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -992,8 +992,8 @@ def flagRange(
         data: DictOfSeries,
         field: ColumnName,
         flagger: Flagger,
-        min: float=-np.inf,
-        max: float=np.inf,
+        min: float = -np.inf,
+        max: float = np.inf,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -1035,7 +1035,7 @@ def flagCrossStatistic(
         flagger: Flagger,
         fields: Sequence[ColumnName],
         thresh: float,
-        cross_stat: Literal["modZscore", "Zscore"]="modZscore",
+        cross_stat: Literal["modZscore", "Zscore"] = "modZscore",
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
diff --git a/saqc/funcs/resampling.py b/saqc/funcs/resampling.py
index d8469586e2a9898cb653df8a3fc30dcd775ab85c..b5d2a109f8fae80565387a2cac71179efba18606 100644
--- a/saqc/funcs/resampling.py
+++ b/saqc/funcs/resampling.py
@@ -12,17 +12,17 @@ import pandas as pd
 from dios import DictOfSeries
 
 from saqc.constants import *
-from saqc.core.register import register
+from saqc.core.register import register, isflagged
 from saqc.flagger import Flagger, initFlagsLike, History
 from saqc.funcs.tools import copy, drop, rename
-from saqc.funcs.interpolation import interpolateIndex
-from saqc.lib.tools import getDropMask, evalFreqStr
+from saqc.funcs.interpolation import interpolateIndex, _SUPPORTED_METHODS
+from saqc.lib.tools import evalFreqStr, getFreqDelta
 from saqc.lib.ts_operators import shift2Freq, aggregate2Freq
-from saqc.flagger.flags import applyFunctionOnHistory
+from saqc.flagger.flags import applyFunctionOnHistory, appendHistory
+from saqc.lib.rolling import customRoller
 
 logger = logging.getLogger("SaQC")
 
-
 METHOD2ARGS = {
     "inverse_fshift": ("backward", pd.Timedelta),
     "inverse_bshift": ("forward", pd.Timedelta),
@@ -41,9 +41,8 @@ def aggregate(
         flagger: Flagger,
         freq: str,
         value_func,
-        flag_func: Callable[[pd.Series], float]=np.nanmax,
-        method: Literal["fagg", "bagg", "nagg"]="nagg",
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
+        flag_func: Callable[[pd.Series], float] = np.nanmax,
+        method: Literal["fagg", "bagg", "nagg"] = "nagg",
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -74,24 +73,26 @@ def aggregate(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-regularized.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.freq
+
     freq : str
         The sampling frequency the data is to be aggregated (resampled) at.
+
     value_func : Callable
         The function you want to use for aggregation.
+
     flag_func : Callable
         The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
         (usually ordered categorical).
+
     method : {'fagg', 'bagg', 'nagg'}, default 'nagg'
         Specifies which intervals to be aggregated for a certain timestamp. (preceeding, succeeding or
         "surrounding" interval). See description above for more details.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before aggregation - effectively excluding values that are flagged
-        with a flag in to_drop from the aggregation process. Default results in BAD
-        values being dropped initially.
 
     Returns
     -------
@@ -104,20 +105,9 @@ def aggregate(
     """
 
     data, flagger = copy(data, field, flagger, field + '_original')
-    data, flagger = resample(
-        data,
-        field,
-        flagger,
-        freq,
-        agg_func=value_func,
-        flag_agg_func=flag_func,
-        method=method,
-        empty_intervals_flag=UNFLAGGED,
-        to_drop=to_drop,
-        all_na_2_empty=True,
-        **kwargs,
+    return resample(
+        data, field, flagger, freq=freq, agg_func=value_func, flag_agg_func=flag_func, method=method, **kwargs
     )
-    return data, flagger
 
 
 @register(masking='none', module="resampling")
@@ -126,7 +116,6 @@ def linear(
         field: str,
         flagger: Flagger,
         freq: str,
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -148,16 +137,15 @@ def linear(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-regularized.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.freq
+
     freq : str
         An offset string. The frequency of the grid you want to interpolate your data at.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in to_drop from the interpolation process. Default results in BAD
-        values being dropped initially.
 
     Returns
     -------
@@ -170,10 +158,7 @@ def linear(
     """
 
     data, flagger = copy(data, field, flagger, field + '_original')
-    data, flagger = interpolateIndex(
-        data, field, flagger, freq, "time", to_drop=to_drop, empty_intervals_flag=UNFLAGGED, **kwargs
-    )
-    return data, flagger
+    return interpolateIndex(data, field, flagger, freq, "time", **kwargs)
 
 
 @register(masking='none', module="resampling")
@@ -182,9 +167,8 @@ def interpolate(
         field: str,
         flagger: Flagger,
         freq: str,
-        method: Literal["linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric", "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"],
-        order: int=1,
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
+        method: _SUPPORTED_METHODS,
+        order: int = 1,
         **kwargs,
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -212,22 +196,23 @@ def interpolate(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-regularized.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.freq
+
     freq : str
         An offset string. The frequency of the grid you want to interpolate your data at.
+
     method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
+        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
         The interpolation method you want to apply.
+
     order : int, default 1
         If your selected interpolation method can be performed at different *orders* - here you pass the desired
         order.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in `to_drop` from the interpolation process. Default results in ``BAD``
-        values being dropped initially.
 
     Returns
     -------
@@ -240,18 +225,7 @@ def interpolate(
     """
 
     data, flagger = copy(data, field, flagger, field + '_original')
-    data, flagger = interpolateIndex(
-        data,
-        field,
-        flagger,
-        freq,
-        method=method,
-        inter_order=order,
-        to_drop=to_drop,
-        empty_intervals_flag=UNFLAGGED,
-        **kwargs,
-    )
-    return data, flagger
+    return interpolateIndex(data, field, flagger, freq, method=method, inter_order=order, **kwargs)
 
 
 @register(masking='none', module="resampling")
@@ -259,8 +233,11 @@ def mapToOriginal(
         data: DictOfSeries,
         field: str,
         flagger: Flagger,
-        method: Literal["inverse_fagg", "inverse_bagg", "inverse_nagg", "inverse_fshift", "inverse_bshift", "inverse_nshift", "inverse_interpolation"],
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
+        method: Literal[
+            "inverse_fagg", "inverse_bagg", "inverse_nagg",
+            "inverse_fshift", "inverse_bshift", "inverse_nshift",
+            "inverse_interpolation"
+        ],
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -305,18 +282,17 @@ def mapToOriginal(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-deharmonized.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.freq
+
     method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift',
             'inverse_interpolation'}
         The method used for projection of regularized flags onto original flags. See description above for more
         details.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in to_drop from the interpolation process. Default results in BAD
-        values being dropped initially.
 
     Returns
     -------
@@ -327,12 +303,10 @@ def mapToOriginal(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values and shape may have changed relatively to the flagger input.
     """
-
     newfield = str(field) + '_original'
-    data, flagger = reindexFlags(data, newfield, flagger, method, source=field, to_drop=to_drop, **kwargs)
+    data, flagger = reindexFlags(data, newfield, flagger, method, source=field, to_mask=False)
     data, flagger = drop(data, field, flagger)
-    data, flagger = rename(data, newfield, flagger, field)
-    return data, flagger
+    return rename(data, newfield, flagger, field)
 
 
 @register(masking='none', module="resampling")
@@ -341,70 +315,43 @@ def shift(
         field: str,
         flagger: Flagger,
         freq: str,
-        method: Literal["fshift", "bshift", "nshift"]="nshift",
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
-        empty_intervals_flag: Optional[str]=None,
-        freq_check: Optional[Literal["check", "auto"]]=None,  # TODO: not a user decision
-        **kwargs
-) -> Tuple[DictOfSeries, Flagger]:
-
-    data, flagger = copy(data, field, flagger, field + '_original')
-    data, flagger = _shift(
-        data, field, flagger, freq, method=method, to_drop=to_drop,
-        empty_intervals_flag=empty_intervals_flag, freq_check=freq_check, **kwargs
-    )
-    return data, flagger
-
-
-def _shift(
-        data: DictOfSeries,
-        field: str,
-        flagger: Flagger,
-        freq: str,
-        method: Literal["fshift", "bshift", "nshift"]="nshift",
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
-        empty_intervals_flag: float = UNFLAGGED,
-        freq_check: Optional[Literal["check", "auto"]]=None,
+        method: Literal["fshift", "bshift", "nshift"] = "nshift",
+        freq_check: Optional[Literal["check", "auto"]] = None,  # TODO: not a user decision
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
-    Function to shift data points to regular (equidistant) timestamps.
-    Values get shifted according to the keyword passed to the `method` parameter.
-
-    * ``'nshift'``: every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
-    * ``'bshift'``:  every grid point gets assigned its first succeeding value - if there is one available in the
-      succeeding sampling interval.
-    * ``'fshift'``:  every grid point gets assigned its ultimately preceeding value - if there is one available in
-      the preceeding sampling interval.
-
-    Note: all data nans get excluded defaultly from shifting. If `to_drop` is ``None``, - all *BAD* flagged values get
-    excluded as well.
+    Function to shift data and flags to a regular (equidistant) timestamp grid, according to ``method``.
 
     Parameters
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-shifted.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.
+
     freq : str
         An frequency Offset String that will be interpreted as the sampling rate you want the data to be shifted to.
-    method: {'fshift', 'bshift', 'nshift'}, default 'nshift'
-        Specifies if datapoints get propagated forwards, backwards or to the nearest grid timestamp. See function
-        description for more details.
-    empty_intervals_flag : float, default UNFLAGGED
-        The Flag, that is assigned to grid points, if no values are available to be shifted to.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before shifting - effectively, excluding values that are flagged
-        with a flag in to_drop from the shifting process. Default - to_drop = None  - results in BAD
-        values being dropped initially.
+
+    method : {'fshift', 'bshift', 'nshift'}, default 'nshift'
+        Specifies how misaligned data-points get propagated to a grid timestamp.
+        Following choices are available:
+
+        * 'nshift' : every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
+        * 'bshift' : every grid point gets assigned its first succeeding value, if one is available in
+          the succeeding sampling interval.
+        * 'fshift' : every grid point gets assigned its ultimately preceding value, if one is available in
+          the preceeding sampling interval.
+
     freq_check : {None, 'check', 'auto'}, default None
 
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
+        * ``None`` : do not validate frequency-string passed to `freq`
+        * 'check' : estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
           or if no uniform sampling rate could be estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+        * 'auto' : estimate frequency and use estimate. (Ignores `freq` parameter.)
 
     Returns
     -------
@@ -415,61 +362,64 @@ def _shift(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values and shape may have changed relatively to the flagger input.
     """
-    data = data.copy()
-    datcol = data[field]
-    flagscol = flagger[field]
+    data, flagger = copy(data, field, flagger, field + '_original')
+    return _shift(data, field, flagger, freq, method=method, freq_check=freq_check, **kwargs)
 
-    drop_mask = getDropMask(field, to_drop, flagger, BAD)
-    drop_mask |= datcol.isna()
-    datcol[drop_mask] = np.nan
-    datcol.dropna(inplace=True)
-    flagscol.drop(drop_mask[drop_mask].index, inplace=True)
 
-    # create a dummys
-    if datcol.empty:
-        datcol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
-        flagscol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
+def _shift(
+        data: DictOfSeries,
+        field: str,
+        flagger: Flagger,
+        freq: str,
+        method: Literal["fshift", "bshift", "nshift"] = "nshift",
+        freq_check: Optional[Literal["check", "auto"]] = None,
+        **kwargs
+) -> Tuple[DictOfSeries, Flagger]:
+    """
+    Function to shift data points to regular (equidistant) timestamps.
 
-        # clear the past
-        flagger.history[field] = flagger.history[field].reindex(datcol.index)
-        flagger[field] = flagscol
+    See Also
+    --------
+    shift : Main caller, docstring
+    """
+    flagged = isflagged(flagger[field], kwargs['to_mask'])
+    datcol = data[field]
+    datcol[flagged] = np.nan
+    freq = evalFreqStr(freq, freq_check, datcol.index)
 
-    # do the shift, we need to process the history manually
-    else:
-        freq = evalFreqStr(freq, freq_check, datcol.index)
-        datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
+    # do the shift
+    datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
 
-        # after next 3 lines we leave history in unstable state
-        # but the following append will fix this
-        history = flagger.history[field]
-        history.hist = shift2Freq(history.hist, method, freq, fill_value=UNTOUCHED)
-        history.mask = shift2Freq(history.mask, method, freq, fill_value=False)
+    # do the shift on the history
+    history = flagger.history[field]
+    history.hist = shift2Freq(history.hist, method, freq, fill_value=UNTOUCHED)
+    history.mask = shift2Freq(history.mask, method, freq, fill_value=False)
 
-        flagscol = shift2Freq(flagscol, method, freq, fill_value=empty_intervals_flag)
-        history.append(flagscol, force=True)
-        flagger.history[field] = history
+    # The last 2 lines left the history in an unstable state, Also we want to
+    # append a dummy column, that represent the 'shift' in the history.
+    # Luckily the append also fix the unstable state - noice.
+    dummy = pd.Series(UNTOUCHED, index=datcol.index, dtype=float)
+    history.append(dummy, force=True)
 
+    flagger.history[field] = history
     data[field] = datcol
     return data, flagger
 
 
-@register(masking='field', module="resampling")
+@register(masking='none', module="resampling")
 def resample(
         data: DictOfSeries,
         field: str,
         flagger: Flagger,
         freq: str,
-        agg_func: Callable[[pd.Series], pd.Series]=np.mean,
-        method: Literal["fagg", "bagg", "nagg"]="bagg",
-        max_invalid_total_d: Optional[int]=None,
-        max_invalid_consec_d: Optional[int]=None,
-        max_invalid_consec_f: Optional[int]=None,
-        max_invalid_total_f: Optional[int]=None,
-        flag_agg_func: Callable[[pd.Series], float]=max,
-        empty_intervals_flag: float = BAD,
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
-        all_na_2_empty: bool=False,
-        freq_check: Optional[Literal["check", "auto"]]=None,
+        agg_func: Callable[[pd.Series], pd.Series] = np.mean,
+        method: Literal["fagg", "bagg", "nagg"] = "bagg",
+        max_invalid_total_d: Optional[int] = None,
+        max_invalid_consec_d: Optional[int] = None,
+        max_invalid_consec_f: Optional[int] = None,
+        max_invalid_total_f: Optional[int] = None,
+        flag_agg_func: Callable[[pd.Series], float] = max,
+        freq_check: Optional[Literal["check", "auto"]] = None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -497,45 +447,48 @@ def resample(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the column, holding the data-to-be-resampled.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.
+
     freq : str
         An Offset String, that will be interpreted as the frequency you want to resample your data with.
+
     agg_func : Callable
         The function you want to use for aggregation.
+
     method: {'fagg', 'bagg', 'nagg'}, default 'bagg'
         Specifies which intervals to be aggregated for a certain timestamp. (preceding, succeeding or
         "surrounding" interval). See description above for more details.
+
     max_invalid_total_d : {None, int}, default None
         Maximum number of invalid (nan) datapoints, allowed per resampling interval. If max_invalid_total_d is
         exceeded, the interval gets resampled to nan. By default (``np.inf``), there is no bound to the number of nan
         values in an interval and only intervals containing ONLY nan values or those, containing no values at all,
         get projected onto nan
+
     max_invalid_consec_d : {None, int}, default None
         Maximum number of consecutive invalid (nan) data points, allowed per resampling interval.
         If max_invalid_consec_d is exceeded, the interval gets resampled to nan. By default (np.inf),
         there is no bound to the number of consecutive nan values in an interval and only intervals
         containing ONLY nan values, or those containing no values at all, get projected onto nan.
+
     max_invalid_total_f : {None, int}, default None
         Same as `max_invalid_total_d`, only applying for the flags. The flag regarded as "invalid" value,
         is the one passed to empty_intervals_flag (default=``BAD``).
         Also this is the flag assigned to invalid/empty intervals.
+
     max_invalid_consec_f : {None, int}, default None
         Same as `max_invalid_total_f`, only applying onto flags. The flag regarded as "invalid" value, is the one passed
         to empty_intervals_flag. Also this is the flag assigned to invalid/empty intervals.
+
     flag_agg_func : Callable, default: max
         The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
         (usually ordered categorical).
-    empty_intervals_flag : float, default BAD
-        A Flag, that you want to assign to invalid intervals. Invalid are those intervals, that contain nan values only,
-        or no values at all. Furthermore the empty_intervals_flag is the flag, serving as "invalid" identifyer when
-        checking for `max_total_invalid_f` and `max_consec_invalid_f patterns`.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before resampling - effectively excluding values that are flagged
-        with a flag in to_drop from the resampling process - this means that they also will not be counted in the
-        the `max_consec`/`max_total evaluation`. `to_drop` = ``None`` results in NO flags being dropped initially.
+
     freq_check : {None, 'check', 'auto'}, default None
 
         * ``None``: do not validate frequency-string passed to `freq`
@@ -552,81 +505,104 @@ def resample(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values and shape may have changed relatively to the flagger input.
     """
-
-    data = data.copy()
+    flagged = isflagged(flagger[field], kwargs['to_mask'])
     datcol = data[field]
-    flagscol = flagger[field]
-
-    drop_mask = getDropMask(field, to_drop, flagger, [])
-    datcol.drop(datcol[drop_mask].index, inplace=True)
+    datcol[flagged] = np.nan
     freq = evalFreqStr(freq, freq_check, datcol.index)
-    flagscol.drop(flagscol[drop_mask].index, inplace=True)
 
-    # create a dummys
-    if all_na_2_empty and datcol.dropna().empty:
-
-        datcol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
-        flagscol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
+    datcol = aggregate2Freq(
+        datcol,
+        method,
+        freq,
+        agg_func,
+        fill_value=np.nan,
+        max_invalid_total=max_invalid_total_d,
+        max_invalid_consec=max_invalid_consec_d,
+    )
 
-        # clear the past
-        flagger.history[field] = flagger.history[field].reindex(datcol.index)
-        flagger[field] = flagscol
+    kws = dict(
+        method=method,
+        freq=freq,
+        agg_func=flag_agg_func,
+        fill_value=UNTOUCHED,
+        max_invalid_total=max_invalid_total_f,
+        max_invalid_consec=max_invalid_consec_f,
+    )
 
-    # do the resampling
-    else:
-        datcol = aggregate2Freq(
-            datcol,
-            method,
-            freq,
-            agg_func,
-            fill_value=np.nan,
-            max_invalid_total=max_invalid_total_d,
-            max_invalid_consec=max_invalid_consec_d,
-        )
-
-        flagscol = aggregate2Freq(
-            flagscol,
-            method,
-            freq,
-            flag_agg_func,
-            fill_value=empty_intervals_flag,
-            max_invalid_total=max_invalid_total_f,
-            max_invalid_consec=max_invalid_consec_f,
-        )
-
-        kws = dict(
-            method=method,
-            freq=freq,
-            agg_func=flag_agg_func,
-            fill_value=UNTOUCHED,
-            max_invalid_total=max_invalid_total_f,
-            max_invalid_consec=max_invalid_consec_f,
-        )
-
-        flagger = applyFunctionOnHistory(
-            flagger, field,
-            hist_func=aggregate2Freq, hist_kws=kws,
-            mask_func=aggregate2Freq, mask_kws=kws,
-            last_column=flagscol
-        )
+    flagger = applyFunctionOnHistory(
+        flagger, field,
+        hist_func=aggregate2Freq, hist_kws=kws,
+        mask_func=aggregate2Freq, mask_kws=kws,
+        last_column='dummy'
+    )
 
     data[field] = datcol
     return data, flagger
 
 
-@register(masking='field', module="resampling")
+def _getChunkBounds(target: pd.Series, flagscol: pd.Series, freq: str):
+    chunk_end = target.reindex(flagscol.index, method='bfill', tolerance=freq)
+    chunk_start = target.reindex(flagscol.index, method='ffill', tolerance=freq)
+    ignore_flags = (chunk_end.isna() | chunk_start.isna())
+    return ignore_flags
+
+
+def _inverseInterpolation(source: pd.Series, target: pd.Series, freq: str, chunk_bounds) -> pd.Series:
+    source = source.copy()
+    if len(chunk_bounds) > 0:
+        source[chunk_bounds] = np.nan
+    backprojected = source.reindex(target.index, method="bfill", tolerance=freq)
+    fwrdprojected = source.reindex(target.index, method="ffill", tolerance=freq)
+    return pd.concat([backprojected, fwrdprojected], axis=1).max(axis=1)
+
+
+def _inverseAggregation(
+        source: Union[pd.Series, pd.DataFrame],
+        target: Union[pd.Series, pd.DataFrame],
+        freq: str,
+        method: str,
+):
+    return source.reindex(target.index, method=method, tolerance=freq)
+
+
+def _inverseShift(source: pd.Series, target: pd.Series, drop_mask: pd.Series,
+                  freq: str, method: str, fill_value) -> pd.Series:
+    dtype = source.dtype
+
+    target_drops = target[drop_mask]
+    target = target[~drop_mask]
+    flags_merged = pd.merge_asof(
+        source,
+        target.index.to_series(name='pre_index'),
+        left_index=True,
+        right_index=True,
+        tolerance=freq,
+        direction=method,
+    )
+    flags_merged.dropna(subset=["pre_index"], inplace=True)
+    flags_merged = flags_merged.set_index(["pre_index"]).squeeze()
+    target[flags_merged.index] = flags_merged.values
+
+    # reinsert drops
+    source = target.reindex(target.index.union(target_drops.index))
+    source.loc[target_drops.index] = target_drops.values
+
+    return source.fillna(fill_value).astype(dtype, copy=False)
+
+
+@register(masking='none', module="resampling")
 def reindexFlags(
         data: DictOfSeries,
         field: str,
         flagger: Flagger,
-        method: Literal["inverse_fagg", "inverse_bagg", "inverse_nagg", "inverse_fshift", "inverse_bshift", "inverse_nshift"],
+        method: Literal[
+            "inverse_fagg", "inverse_bagg", "inverse_nagg",
+            "inverse_fshift", "inverse_bshift", "inverse_nshift"
+        ],
         source: str,
-        freq: Optional[str]=None,
-        to_drop: Optional[Union[Any, Sequence[Any]]]=None,
-        freq_check: Optional[Literal["check", "auto"]]=None,
+        freq: Optional[str] = None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     The Function projects flags of "source" onto flags of "field". Wherever the "field" flags are "better" then the
     source flags projected on them, they get overridden with this associated source flag value.
@@ -663,25 +639,22 @@ def reindexFlags(
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
         The fieldname of the data column, you want to project the source-flags onto.
+
     flagger : saqc.flagger.Flagger
         A flagger object, holding flags and additional Informations related to `data`.
+
     method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift'}
         The method used for projection of source flags onto field flags. See description above for more details.
+
     source : str
         The source source of flags projection.
+
     freq : {None, str},default None
         The freq determines the projection range for the projection method. See above description for more details.
         Defaultly (None), the sampling frequency of source is used.
-    to_drop : {None, str, List[str]}, default None
-        Flags referring to values that are to drop before flags projection. Relevant only when projecting with an
-        inverted shift method. Defaultly BAD is listed.
-    freq_check : {None, 'check', 'auto'}, default None
-        - None: do not validate frequency-string passed to `freq`
-        - 'check': estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
-            if no uniform sampling rate could be estimated
-        - 'auto': estimate frequency and use estimate. (Ignores `freq` parameter.)
 
     Returns
     -------
@@ -691,105 +664,43 @@ def reindexFlags(
         The flagger object, holding flags and additional Informations related to `data`.
         Flags values and shape may have changed relatively to the flagger input.
     """
+    flagscol = flagger[source]
 
-    # TODO: This needs a refactoring
-    raise NotImplementedError("currently not available - rewrite needed")
+    if freq is None:
+        freq = getFreqDelta(flagscol.index)
+        if freq is None and not method == 'match':
+            raise ValueError('To project irregularly sampled data, either use method="match", or pass custom '
+                             'projection range to freq parameter')
 
-    flagscol, metacols = flagger.getFlags(source, full=True)
-    if flagscol.empty:
-        return data, flagger
     target_datcol = data[field]
-    target_flagscol, target_metacols = flagger.getFlags(field, full=True)
-
-    if (freq is None) and (method != "match"):
-        freq_check = 'auto'
-
-    freq = evalFreqStr(freq, freq_check, flagscol.index)
+    target_flagscol = flagger[field]
+    dummy = pd.Series(np.nan, target_flagscol.index)
 
     if method[-13:] == "interpolation":
-        backprojected = flagscol.reindex(target_flagscol.index, method="bfill", tolerance=freq)
-        fwrdprojected = flagscol.reindex(target_flagscol.index, method="ffill", tolerance=freq)
-        b_replacement_mask = (backprojected > target_flagscol) & (backprojected >= fwrdprojected)
-        f_replacement_mask = (fwrdprojected > target_flagscol) & (fwrdprojected > backprojected)
-        target_flagscol.loc[b_replacement_mask] = backprojected.loc[b_replacement_mask]
-        target_flagscol.loc[f_replacement_mask] = fwrdprojected.loc[f_replacement_mask]
-
-        backprojected_meta = {}
-        fwrdprojected_meta = {}
-        for meta_key in target_metacols.keys():
-            backprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='bfill',
-                                                                      tolerance=freq)
-            fwrdprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='ffill',
-                                                                      tolerance=freq)
-            target_metacols[meta_key].loc[b_replacement_mask] = backprojected_meta[meta_key].loc[b_replacement_mask]
-            target_metacols[meta_key].loc[f_replacement_mask] = fwrdprojected_meta[meta_key].loc[f_replacement_mask]
-
-    if method[-3:] == "agg" or method == "match":
-        # Aggregation - Inversion
+        ignore = _getChunkBounds(target_datcol, flagscol, freq)
+        func = _inverseInterpolation
+        func_kws = dict(freq=freq, chunk_bounds=ignore, target=dummy)
+        mask_kws = {**func_kws, 'chunk_bounds': []}
+
+    elif method[-3:] == "agg" or method == "match":
         projection_method = METHOD2ARGS[method][0]
         tolerance = METHOD2ARGS[method][1](freq)
-        flagscol = flagscol.reindex(target_flagscol.index, method=projection_method, tolerance=tolerance)
-        replacement_mask = flagscol > target_flagscol
-        target_flagscol.loc[replacement_mask] = flagscol.loc[replacement_mask]
-        for meta_key in target_metacols.keys():
-            metacols[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method=projection_method,
-                                                            tolerance=tolerance)
-            target_metacols[meta_key].loc[replacement_mask] = metacols[meta_key].loc[replacement_mask]
-
-    if method[-5:] == "shift":
-        # NOTE: although inverting a simple shift seems to be a less complex operation, it has quite some
-        # code assigned to it and appears to be more verbose than inverting aggregation -
-        # that owes itself to the problem of BAD/invalid values blocking a proper
-        # shift inversion and having to be outsorted before shift inversion and re-inserted afterwards.
-        #
-        # starting with the dropping and its memorization:
-
-        drop_mask = getDropMask(field, to_drop, flagger, BAD)
-        drop_mask |= target_datcol.isna()
-        target_flagscol_drops = target_flagscol[drop_mask]
-        target_flagscol.drop(drop_mask[drop_mask].index, inplace=True)
-
-        # shift inversion
+        func = _inverseAggregation
+        func_kws = dict(freq=tolerance, method=projection_method, target=dummy)
+        mask_kws = func_kws
+
+    elif method[-5:] == "shift":
+        drop_mask = (target_datcol.isna() | isflagged(target_flagscol, kwargs['to_mask']))
         projection_method = METHOD2ARGS[method][0]
         tolerance = METHOD2ARGS[method][1](freq)
-        flags_merged = pd.merge_asof(
-            flagscol,
-            pd.Series(target_flagscol.index.values, index=target_flagscol.index, name="pre_index"),
-            left_index=True,
-            right_index=True,
-            tolerance=tolerance,
-            direction=projection_method,
-        )
-        flags_merged.dropna(subset=["pre_index"], inplace=True)
-        flags_merged = flags_merged.set_index(["pre_index"]).squeeze()
-
-        # write flags to target
-        replacement_mask = flags_merged > target_flagscol.loc[flags_merged.index]
-        target_flagscol.loc[replacement_mask[replacement_mask].index] = flags_merged.loc[replacement_mask]
-
-        # reinsert drops
-        target_flagscol = target_flagscol.reindex(target_flagscol.index.join(target_flagscol_drops.index, how="outer"))
-        target_flagscol.loc[target_flagscol_drops.index] = target_flagscol_drops.values
-
-        for meta_key in target_metacols.keys():
-            target_metadrops = target_metacols[meta_key][drop_mask]
-            target_metacols[meta_key].drop(drop_mask[drop_mask].index, inplace=True)
-            meta_merged = pd.merge_asof(
-                metacols[meta_key],
-                pd.Series(target_metacols[meta_key].index.values, index=target_metacols[meta_key].index,
-                          name="pre_index"),
-                left_index=True,
-                right_index=True,
-                tolerance=tolerance,
-                direction=projection_method,
-            )
-            meta_merged.dropna(subset=["pre_index"], inplace=True)
-            meta_merged = meta_merged.set_index(["pre_index"]).squeeze()
-            # reinsert drops
-            target_metacols[meta_key][replacement_mask[replacement_mask].index] = meta_merged[replacement_mask]
-            target_metacols[meta_key] = target_metacols[meta_key].reindex(
-                target_metacols[meta_key].index.join(target_metadrops.index, how="outer"))
-            target_metacols[meta_key].loc[target_metadrops.index] = target_metadrops.values
-
-    flagger = flagger.setFlags(field, flag=target_flagscol, with_extra=True, **target_metacols, **kwargs)
+        func = _inverseShift
+        kws = dict(freq=tolerance, method=projection_method, drop_mask=drop_mask, target=dummy)
+        func_kws = {**kws, 'fill_value': UNTOUCHED}
+        mask_kws = {**kws, 'fill_value': False}
+
+    else:
+        raise ValueError(f"unknown method {method}")
+
+    tmp_flagger = applyFunctionOnHistory(flagger, source, func, func_kws, func, mask_kws, last_column=dummy)
+    flagger = appendHistory(flagger, field, tmp_flagger.history[source])
     return data, flagger
diff --git a/saqc/funcs/residues.py b/saqc/funcs/residues.py
index 16684a43f19cd1635c6e0174d6f879e0811916d1..6abcfd2d64c6e5880511f6c48e3d9eec0d3b8167 100644
--- a/saqc/funcs/residues.py
+++ b/saqc/funcs/residues.py
@@ -21,9 +21,9 @@ def calculatePolynomialResidues(
         flagger: Flagger,
         winsz: Union[str, int],
         polydeg: int,
-        numba: Literal[True, False, "auto"]="auto",
-        eval_flags: bool=True,
-        min_periods: Optional[int]=0,
+        numba: Literal[True, False, "auto"] = "auto",  # TODO: rm, not a a user decision
+        eval_flags: bool = True,  # TODO, not valid anymore, if still needed, maybe assign user-passed ``flag``?
+        min_periods: Optional[int] = 0,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
@@ -117,13 +117,13 @@ def calculateRollingResidues(
         field: str,
         flagger: Flagger,
         winsz: Union[str, int],
-        func: Callable[[np.ndarray], np.ndarray]=np.mean,
-        eval_flags: bool=True,
-        min_periods: Optional[int]=0,
-        center: bool=True,
+        func: Callable[[np.ndarray], np.ndarray] = np.mean,
+        eval_flags: bool = True,
+        min_periods: Optional[int] = 0,
+        center: bool = True,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
+    """ TODO: docstring needed"""
     return roll(
         data, field, flagger,
         winsz=winsz,
@@ -134,4 +134,3 @@ def calculateRollingResidues(
         return_residues=True,
         **kwargs
     )
-
diff --git a/saqc/funcs/rolling.py b/saqc/funcs/rolling.py
index a0740e511bcb88caa294db6160c6b7b667d4e230..6a40c93c2cd0cf02b249800d8e657fc9b8e811d6 100644
--- a/saqc/funcs/rolling.py
+++ b/saqc/funcs/rolling.py
@@ -20,55 +20,55 @@ def roll(
         flagger: Flagger,
         winsz: Union[str, int],
         func: Callable[[pd.Series], float]=np.mean,
-        eval_flags: bool=True,
+        eval_flags: bool=True,  # TODO: not applicable anymore
         min_periods: int=0,
         center: bool=True,
-        return_residues=False,
+        return_residues=False,  # TODO: this should not be public, a wrapper would be better
         **kwargs
 ):
     """
-        Models the data with the rolling mean and returns the residues.
-
-        Note, that the residues will be stored to the `field` field of the input data, so that the data that is modelled
-        gets overridden.
-
-        Parameters
-        ----------
-        data : dios.DictOfSeries
-            A dictionary of pandas.Series, holding all the data.
-        field : str
-            The fieldname of the column, holding the data-to-be-modelled.
-        flagger : saqc.flagger.Flagger
-            A flagger object, holding flags and additional Informations related to `data`.
-        winsz : {int, str}
-            The size of the window you want to roll with. If an integer is passed, the size
-            refers to the number of periods for every fitting window. If an offset string is passed,
-            the size refers to the total temporal extension.
-            For regularly sampled timeseries, the period number will be casted down to an odd number if
-            center = True.
-        func : Callable[np.array, float], default np.mean
-            Function to apply on the rolling window and obtain the curve fit value.
-        eval_flags : bool, default True
-            Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
-            flag present in the interval, the data for its calculation was obtained from.
-            Currently not implemented in combination with not-harmonized timeseries.
-        min_periods : int, default 0
-            The minimum number of periods, that has to be available in every values fitting surrounding for the mean
-            fitting to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
-            regardless of the number of values present.
-        center : bool, default True
-            Wheather or not to center the window the mean is calculated of around the reference value. If False,
-            the reference value is placed to the right of the window (classic rolling mean with lag.)
-
-        Returns
-        -------
-        data : dios.DictOfSeries
-            A dictionary of pandas.Series, holding all the data.
-            Data values may have changed relatively to the data input.
-        flagger : saqc.flagger.Flagger
-            The flagger object, holding flags and additional Informations related to `data`.
-            Flags values may have changed relatively to the flagger input.
-        """
+    Models the data with the rolling mean and returns the residues.
+
+    Note, that the residues will be stored to the `field` field of the input data, so that the data that is modelled
+    gets overridden.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-modelled.
+    flagger : saqc.flagger.Flagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    winsz : {int, str}
+        The size of the window you want to roll with. If an integer is passed, the size
+        refers to the number of periods for every fitting window. If an offset string is passed,
+        the size refers to the total temporal extension.
+        For regularly sampled timeseries, the period number will be casted down to an odd number if
+        center = True.
+    func : Callable[np.array, float], default np.mean
+        Function to apply on the rolling window and obtain the curve fit value.
+    eval_flags : bool, default True
+        Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
+        flag present in the interval, the data for its calculation was obtained from.
+        Currently not implemented in combination with not-harmonized timeseries.
+    min_periods : int, default 0
+        The minimum number of periods, that has to be available in every values fitting surrounding for the mean
+        fitting to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
+        regardless of the number of values present.
+    center : bool, default True
+        Wheather or not to center the window the mean is calculated of around the reference value. If False,
+        the reference value is placed to the right of the window (classic rolling mean with lag.)
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.Flagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
 
     data = data.copy()
     to_fit = data[field]
diff --git a/saqc/funcs/scores.py b/saqc/funcs/scores.py
index 1a49f4e1e101da726dfe70ad42e00d32f022fd72..1f5d0456ebdf284097b7739eb719f6656115a201 100644
--- a/saqc/funcs/scores.py
+++ b/saqc/funcs/scores.py
@@ -21,19 +21,20 @@ def assignKNNScore(
         field: str,
         flagger: Flagger,
         fields: Sequence[str],
-        n_neighbors: int=10,
-        trafo: Callable[[pd.Series], pd.Series]=lambda x: x,
-        trafo_on_partition: bool=True,
-        scoring_func: Callable[[pd.Series], float]=np.sum,
-        target_field: str='kNN_scores',
-        partition_freq: Union[float, str]=np.inf,
-        partition_min: int=2,
-        kNN_algorithm: Literal["ball_tree", "kd_tree", "brute", "auto"]='ball_tree',
-        metric: str='minkowski',
-        p: int=2,
+        n_neighbors: int = 10,
+        trafo: Callable[[pd.Series], pd.Series] = lambda x: x,
+        trafo_on_partition: bool = True,
+        scoring_func: Callable[[pd.Series], float] = np.sum,
+        target_field: str = 'kNN_scores',
+        partition_freq: Union[float, str] = np.inf,
+        partition_min: int = 2,
+        kNN_algorithm: Literal["ball_tree", "kd_tree", "brute", "auto"] = 'ball_tree',
+        metric: str = 'minkowski',
+        p: int = 2,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
     """
+    TODO: docstring need a rework
     Score datapoints by an aggregation of the dictances to their k nearest neighbors.
 
     The function is a wrapper around the NearestNeighbors method from pythons sklearn library (See reference [1]).
diff --git a/saqc/funcs/transformation.py b/saqc/funcs/transformation.py
index fdc99abbffc0dbacc95551d3c6ff7870598d9bff..6a176b4a905a8fc2c811f7aa90e816ca4692f727 100644
--- a/saqc/funcs/transformation.py
+++ b/saqc/funcs/transformation.py
@@ -18,10 +18,9 @@ def transform(
         field: str,
         flagger: Flagger,
         func: Callable[[pd.Series], pd.Series],
-        partition_freq: Optional[Union[float, str]]=None,
+        partition_freq: Optional[Union[float, str]] = None,
         **kwargs
 ) -> Tuple[DictOfSeries, Flagger]:
-
     """
     Function to transform data columns with a transformation that maps series onto series of the same length.
 
@@ -75,5 +74,3 @@ def transform(
 
     data[field] = val_ser
     return data, flagger
-
-
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index dec366b9828d8de410d3f9c45ddab9b8a88699fa..6edfb047146bcd8240dbbb686773e748f272b3af 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -308,18 +308,6 @@ def isQuoted(string):
     return bool(re.search(r"'.*'|\".*\"", string))
 
 
-# TODO: GL167
-def getDropMask(field, to_drop, flagger, default):
-    drop_mask = pd.Series(False, index=flagger[field].index)
-    if to_drop is None:
-        to_drop = default
-    to_drop = toSequence(to_drop)
-    if len(to_drop) > 0:
-        # drop_mask |= flagger.isFlagged(field, flag=to_drop)
-        drop_mask |= flagger[field] == to_drop
-    return drop_mask
-
-
 def mutateIndex(index, old_name, new_name):
     pos = index.get_loc(old_name)
     index = index.drop(index[pos])
diff --git a/saqc/lib/ts_operators.py b/saqc/lib/ts_operators.py
index 8f1f86cefc00506fcea75e62abb700d279370e87..37d8253ab55dc575669ae7c656e065599220c6b1 100644
--- a/saqc/lib/ts_operators.py
+++ b/saqc/lib/ts_operators.py
@@ -7,6 +7,7 @@ The module gathers all kinds of timeseries tranformations.
 import logging
 
 import re
+from typing import Union
 
 import pandas as pd
 import numpy as np
@@ -179,7 +180,7 @@ def meanQC(data, max_nan_total=np.inf, max_nan_consec=np.inf):
     return np.nanmean(data[~validationTrafo(data.isna(), max_nan_total, max_nan_consec)])
 
 
-def interpolateNANs(data, method, order=2, inter_limit=2, downgrade_interpolation=False, return_chunk_bounds=False):
+def interpolateNANs(data, method, order=2, inter_limit=2, downgrade_interpolation=False):
     """
     The function interpolates nan-values (and nan-grids) in timeseries data. It can be passed all the method keywords
     from the pd.Series.interpolate method and will than apply this very methods. Note, that the inter_limit keyword
@@ -198,18 +199,12 @@ def interpolateNANs(data, method, order=2, inter_limit=2, downgrade_interpolatio
     :param downgrade_interpolation:  Boolean. Default False. If True:
                                     If a data chunk not contains enough values for interpolation of the order "order",
                                     the highest order possible will be selected for that chunks interpolation.
-    :param return_chunk_bounds:     Boolean. Default False. If True:
-                                    Additionally to the interpolated data, the start and ending points of data chunks
-                                    not containing no series consisting of more then "inter_limit" nan values,
-                                    are calculated and returned.
-                                    (This option fits requirements of the "interpolateNANs" functions use in the
-                                    context of saqc harmonization mainly.)
 
     :return:
     """
     inter_limit = int(inter_limit)
     data = pd.Series(data).copy()
-    gap_mask = (data.rolling(inter_limit, min_periods=0).apply(lambda x: np.sum(np.isnan(x)), raw=True)) != inter_limit
+    gap_mask = data.isna().rolling(inter_limit, min_periods=0).sum() != inter_limit
 
     if inter_limit == 2:
         gap_mask = gap_mask & gap_mask.shift(-1, fill_value=True)
@@ -218,13 +213,6 @@ def interpolateNANs(data, method, order=2, inter_limit=2, downgrade_interpolatio
             gap_mask.replace(True, np.nan).fillna(method="bfill", limit=inter_limit).replace(np.nan, True).astype(bool)
         )
 
-    if return_chunk_bounds:
-        # start end ending points of interpolation chunks have to be memorized to block their flagging:
-        chunk_switches = gap_mask.astype(int).diff()
-        chunk_starts = chunk_switches[chunk_switches == -1].index
-        chunk_ends = chunk_switches[(chunk_switches.shift(-1) == 1)].index
-        chunk_bounds = chunk_starts.join(chunk_ends, how="outer", sort=True)
-
     pre_index = data.index
     data = data[gap_mask]
 
@@ -260,18 +248,18 @@ def interpolateNANs(data, method, order=2, inter_limit=2, downgrade_interpolatio
         data = data.squeeze(axis=1)
         data.name = dat_name
     data = data.reindex(pre_index)
-    if return_chunk_bounds:
-        return data, chunk_bounds
-    else: return data
+
+    return data
 
 
 def aggregate2Freq(
-    data, method, freq, agg_func, fill_value=np.nan, max_invalid_total=None, max_invalid_consec=None
+    data: pd.Series, method, freq, agg_func, fill_value=np.nan, max_invalid_total=None, max_invalid_consec=None
 ):
-    # The function aggregates values to an equidistant frequency grid with agg_func.
-    # Timestamps that have no values projected on them, get "fill_value" assigned. Also,
-    # "fill_value" serves as replacement for "invalid" intervals
-
+    """
+    The function aggregates values to an equidistant frequency grid with agg_func.
+    Timestamps that gets no values projected, get filled with the fill-value. It
+    also serves as a replacement for "invalid" intervals.
+    """
     methods = {
         "nagg": lambda seconds_total: (seconds_total/2, "left", "left"),
         "bagg": lambda _: (0, "left", "left"),
@@ -323,9 +311,11 @@ def aggregate2Freq(
     return data
 
 
-def shift2Freq(data, method, freq, fill_value=np.nan):
-    # shift timestamps backwards/forwards in order to allign them with an equidistant
-    # frequencie grid.
+def shift2Freq(data: Union[pd.Series, pd.DataFrame], method: str, freq: str, fill_value):
+    """
+    shift timestamps backwards/forwards in order to align them with an equidistant
+    frequency grid. Resulting Nan's are replaced with the fill-value.
+    """
 
     methods = {
         "fshift": lambda freq: ("ffill", pd.Timedelta(freq)),
diff --git a/tests/common.py b/tests/common.py
index 3e70ff349fb2ae082ef4a3b14b00c89f65a86a97..e225b64102b7848ba128315322bb2e6f3d756499 100644
--- a/tests/common.py
+++ b/tests/common.py
@@ -42,3 +42,49 @@ def writeIO(content):
     return f
 
 
+def checkDataFlaggerInvariants(data, flagger, field, identical=True):
+    """
+    Check all invariants that must hold at any point for
+        * field
+        * data
+        * flagger
+        * data[field]
+        * flagger[field]
+        * data[field].index
+        * flagger[field].index
+        * between data and flagger
+        * between data[field] and flagger[field]
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        data container
+    flagger : Flags
+        flags container
+    field : str
+        the field in question
+    identical : bool, default True
+        whether to check indexes of data and flagger to be
+        identical (True, default) of just for equality.
+    """
+    assert isinstance(data, dios.DictOfSeries)
+    assert isinstance(flagger, Flagger)
+
+    # all columns in data are in flagger
+    assert data.columns.difference(flagger.columns).empty
+
+    # ------------------------------------------------------------------------
+    # below here, we just check on and with field
+    # ------------------------------------------------------------------------
+    assert field in data
+    assert field in flagger
+
+    assert flagger[field].dtype == float
+
+    # `pd.Index.identical` also check index attributes like `freq`
+    if identical:
+        assert data[field].index.identical(flagger[field].index)
+    else:
+        assert data[field].index.equals(flagger[field].index)
+
+
diff --git a/tests/funcs/test_harm_funcs.py b/tests/funcs/test_harm_funcs.py
index 4f12a41d2a6629c685abe89ff33ce49d884f2b62..052d5f8daf7c818d66bbcf9c3b6870e6ef616b5b 100644
--- a/tests/funcs/test_harm_funcs.py
+++ b/tests/funcs/test_harm_funcs.py
@@ -1,15 +1,13 @@
 #! /usr/bin/env python
 # -*- coding: utf-8 -*-
 
-
-# see test/functs/fixtures.py for global fixtures "course_..."
 import pytest
 import numpy as np
 import pandas as pd
 import dios
 
-from tests.common import TESTFLAGGER
-
+from saqc.flagger import Flagger, initFlagsLike
+from saqc.constants import BAD, UNFLAGGED
 from saqc.funcs.resampling import (
     linear,
     interpolate,
@@ -18,9 +16,7 @@ from saqc.funcs.resampling import (
     mapToOriginal,
 )
 
-RESHAPERS = ["nshift", "fshift", "bshift", "nagg", "bagg", "fagg", "interpolation"]
-
-INTERPOLATIONS = ["time", "polynomial"]
+from tests.common import checkDataFlaggerInvariants
 
 
 @pytest.fixture
@@ -39,184 +35,177 @@ def data():
     return data
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-@pytest.mark.parametrize("reshaper", RESHAPERS)
-def test_harmSingleVarIntermediateFlagging(data, flagger, reshaper):
-    flagger = flagger.initFlags(data)
-    # make pre harm copies:
-    pre_data = data.copy()
-    pre_flags = flagger.getFlags()
+@pytest.mark.parametrize('func, kws', [
+    ('linear', dict()),
+    ('shift', dict(method="nshift")),
+    ('interpolate', dict(method="spline")),
+    ('aggregate', dict(value_func=np.nansum, method="nagg")),
+])
+def test_wrapper(data, func, kws):
+    field = 'data'
     freq = "15min"
-    assert len(data.columns) == 1
-    field = data.columns[0]
-    data, flagger = linear(data, "data", flagger, freq)
-    # flag something bad
-    flagger = flagger.setFlags("data", loc=data[field].index[3:4])
-    data, flagger = mapToOriginal(data, "data", flagger, method="inverse_" + reshaper)
-    d = data[field]
-    if reshaper == "nagg":
-        assert flagger.isFlagged(loc=d.index[3:7]).squeeze().all()
-        assert (~flagger.isFlagged(loc=d.index[0:3]).squeeze()).all()
-        assert (~flagger.isFlagged(loc=d.index[7:]).squeeze()).all()
-    if reshaper == "nshift":
-        assert (flagger.isFlagged().squeeze() == [False, False, False, False, True, False, False, False, False]).all()
-    if reshaper == "bagg":
-        assert flagger.isFlagged(loc=d.index[5:7]).squeeze().all()
-        assert (~flagger.isFlagged(loc=d.index[0:5]).squeeze()).all()
-        assert (~flagger.isFlagged(loc=d.index[7:]).squeeze()).all()
-    if reshaper == "bshift":
-        assert (flagger.isFlagged().squeeze() == [False, False, False, False, False, True, False, False, False]).all()
-    if reshaper == "fagg":
-        assert flagger.isFlagged(loc=d.index[3:5]).squeeze().all()
-        assert (~flagger.isFlagged(loc=d.index[0:3]).squeeze()).all()
-        assert (~flagger.isFlagged(loc=d.index[5:]).squeeze()).all()
-    if reshaper == "fshift":
-        assert (flagger.isFlagged().squeeze() == [False, False, False, False, True, False, False, False, False]).all()
-
-    flags = flagger.getFlags()
-    assert pre_data[field].equals(data[field])
-    assert len(data[field]) == len(flags[field])
-    assert (pre_flags[field].index == flags[field].index).all()
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_harmSingleVarInterpolations(data, flagger):
-    flagger = flagger.initFlags(data)
-    field = data.columns[0]
-    pre_data = data[field]
-    pre_flags = flagger.getFlags(field)
-    tests = [
-        (
-            "nagg",
-            "15Min",
-            pd.Series(
-                data=[-87.5, -25.0, 0.0, 37.5, 50.0],
-                index=pd.date_range("2011-01-01 00:00:00", "2011-01-01 01:00:00", freq="15min"),
-            ),
-        ),
-        (
-            "nagg",
-            "30Min",
-            pd.Series(
-                data=[-87.5, -25.0, 87.5],
-                index=pd.date_range("2011-01-01 00:00:00", "2011-01-01 01:00:00", freq="30min"),
-            ),
-        ),
-        (
-            "bagg",
-            "15Min",
-            pd.Series(
-                data=[-50.0, -37.5, -37.5, 12.5, 37.5, 50.0],
-                index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15min"),
-            ),
-        ),
-        (
-            "bagg",
-            "30Min",
-            pd.Series(
-                data=[-50.0, -75.0, 50.0, 50.0],
-                index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30min"),
-            ),
-        ),
-    ]
-
-    for interpolation, freq, expected in tests:
-        data_harm, flagger_harm = aggregate(
-            data, field, flagger, freq, value_func=np.sum, method=interpolation
-        )
-        assert data_harm[field].equals(expected)
-        data_deharm, flagger_deharm = mapToOriginal(
-            data_harm, "data", flagger_harm, method="inverse_" + interpolation
-        )
-        assert data_deharm[field].equals(pre_data)
-        assert flagger_deharm.getFlags([field]).squeeze().equals(pre_flags)
-
-    tests = [
-        (
-            "fshift",
-            "15Min",
-            pd.Series(
-                data=[np.nan, -37.5, -25.0, 0.0, 37.5, 50.0],
-                index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"),
-            ),
-        ),
-        (
-            "fshift",
-            "30Min",
-            pd.Series(
-                data=[np.nan, -37.5, 0.0, 50.0],
-                index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"),
-            ),
-        ),
-        (
-            "bshift",
-            "15Min",
-            pd.Series(
-                data=[-50.0, -37.5, -25.0, 12.5, 37.5, 50.0],
-                index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"),
-            ),
-        ),
-        (
-            "bshift",
-            "30Min",
-            pd.Series(
-                data=[-50.0, -37.5, 12.5, 50.0],
-                index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"),
-            ),
-        ),
-        (
-            "nshift",
-            "15min",
-            pd.Series(
-                data=[np.nan, -37.5, -25.0, 12.5, 37.5, 50.0],
-                index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"),
-            ),
-        ),
-        (
-            "nshift",
-            "30min",
-            pd.Series(
-                data=[np.nan, -37.5, 12.5, 50.0],
-                index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"),
-            ),
-        ),
-    ]
-
-    for interpolation, freq, expected in tests:
-        data_harm, flagger_harm = shift(data, field, flagger, freq, method=interpolation)
-        assert data_harm[field].equals(expected)
-        data_deharm, flagger_deharm = mapToOriginal(
-            data_harm, "data", flagger_harm, method="inverse_" + interpolation
-        )
-        assert data_deharm[field].equals(pre_data)
-        assert flagger_deharm.getFlags([field]).squeeze().equals(pre_flags)
-
-
-@pytest.mark.parametrize("method", INTERPOLATIONS)
+    flagger = initFlagsLike(data)
+
+    import saqc
+    func = getattr(saqc.funcs, func)
+    data, flagger = func(data, field, flagger, freq, **kws)
+
+    # check minimal requirements
+    checkDataFlaggerInvariants(data, flagger, field)
+    assert data[field].index.freq == pd.Timedelta(freq)
+
+
+@pytest.mark.parametrize("method", ["time", "polynomial"])
 def test_gridInterpolation(data, method):
     freq = "15min"
-    data = data.squeeze()
-    field = data.name
+    field = 'data'
+    data = data[field]
     data = (data * np.sin(data)).append(data.shift(1, "2h")).shift(1, "3s")
     data = dios.DictOfSeries(data)
-    flagger = TESTFLAGGER[0].initFlags(data)
+    flagger = initFlagsLike(data)
 
     # we are just testing if the interpolation gets passed to the series without causing an error:
+    res = interpolate(data, field, flagger, freq, method=method, downcast_interpolation=True)
 
-    interpolate(data, field, flagger, freq, method=method, downcast_interpolation=True)
     if method == "polynomial":
-        interpolate(data, field, flagger, freq, order=2, method=method, downcast_interpolation=True)
-        interpolate(data, field, flagger, freq, order=10, method=method, downcast_interpolation=True)
+        res = interpolate(data, field, flagger, freq, order=2, method=method, downcast_interpolation=True)
+        res = interpolate(data, field, flagger, freq, order=10, method=method, downcast_interpolation=True)
 
+    # check minimal requirements
+    rdata, rflagger = res
+    checkDataFlaggerInvariants(rdata, rflagger, field, identical=False)
+    assert rdata[field].index.freq == pd.Timedelta(freq)
+
+
+@pytest.mark.parametrize('func, kws', [
+    ('linear', dict()),
+    ('shift', dict(method="nshift")),
+    ('interpolate', dict(method="spline")),
+    ('aggregate', dict(value_func=np.nansum, method="nagg")),
+])
+def test_flagsSurviveReshaping(func, kws):
+    """
+    flagging -> reshaping -> test (flags also was reshaped correctly)
+    """
+    pass
+
+
+def test_flagsSurviveInverseReshaping():
+    """
+    inverse reshaping -> flagging -> test (flags also was reshaped correctly)"""
+    pass
+
+
+def test_flagsSurviveBackprojection():
+    """
+    flagging -> reshaping -> inverse reshaping -> test (flags == original-flags)
+    """
+    pass
+
+
+@pytest.mark.parametrize("reshaper", ["nshift", "fshift", "bshift", "nagg", "bagg", "fagg", "interpolation"])
+def test_harmSingleVarIntermediateFlagging(data, reshaper):
+    flagger = initFlagsLike(data)
+    field = 'data'
+
+    pre_data = data.copy()
+    pre_flagger = flagger.copy()
+
+    data, flagger = linear(data, field, flagger, freq="15min")
+    checkDataFlaggerInvariants(data, flagger, field, identical=True)
+    assert data[field].index.freq == pd.Timedelta('15min')
+
+    # flag something bad
+    flagger[data[field].index[3:4], field] = BAD
+    data, flagger = mapToOriginal(data, field, flagger, method="inverse_" + reshaper)
+
+    assert len(data[field]) == len(flagger[field])
+    assert data[field].equals(pre_data[field])
+    assert flagger[field].index.equals(pre_flagger[field].index)
+
+    if 'agg' in reshaper:
+        if reshaper == "nagg":
+            start, end = 3, 7
+        elif reshaper == "fagg":
+            start, end = 3, 5
+        elif reshaper == "bagg":
+            start, end = 5, 7
+        else:
+            raise NotImplementedError('untested test case')
+
+        assert all(flagger[field].iloc[start:end] > UNFLAGGED)
+        assert all(flagger[field].iloc[:start] == UNFLAGGED)
+        assert all(flagger[field].iloc[end:] == UNFLAGGED)
+
+    elif 'shift' in reshaper:
+        if reshaper == "nshift":
+            exp = [False, False, False, False, True, False, False, False, False]
+        elif reshaper == "fshift":
+            exp = [False, False, False, False, True, False, False, False, False]
+        elif reshaper == "bshift":
+            exp = [False, False, False, False, False, True, False, False, False]
+        else:
+            raise NotImplementedError('untested test case')
+
+        flagged = flagger[field] > UNFLAGGED
+        assert all(flagged == exp)
+
+    elif reshaper == 'interpolation':
+        pytest.skip('no testcase for interpolation')
+
+    else:
+        raise NotImplementedError('untested test case')
+
+
+@pytest.mark.parametrize(
+    'params, expected',
+    [
+        (("nagg", "15Min"), pd.Series(data=[-87.5, -25.0, 0.0, 37.5, 50.0], index=pd.date_range("2011-01-01 00:00:00", "2011-01-01 01:00:00", freq="15min"))),
+        (("nagg", "30Min"), pd.Series(data=[-87.5, -25.0, 87.5], index=pd.date_range("2011-01-01 00:00:00", "2011-01-01 01:00:00", freq="30min"))),
+        (("bagg", "15Min"), pd.Series(data=[-50.0, -37.5, -37.5, 12.5, 37.5, 50.0], index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15min"))),
+        (("bagg", "30Min"), pd.Series(data=[-50.0, -75.0, 50.0, 50.0], index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30min"))),
+    ])
+def test_harmSingleVarInterpolationAgg(data, params, expected):
+    flagger = initFlagsLike(data)
+    field = 'data'
+
+    pre_data = data.copy()
+    pre_flaggger = flagger.copy()
+    method, freq = params
+
+    data_harm, flagger_harm = aggregate(data, field, flagger, freq, value_func=np.sum, method=method)
+    checkDataFlaggerInvariants(data_harm, flagger_harm, field, identical=True)
+    assert data_harm[field].index.freq == pd.Timedelta(freq)
+    assert data_harm[field].equals(expected)
+
+    data_deharm, flagger_deharm = mapToOriginal(data_harm, "data", flagger_harm, method="inverse_" + method)
+    checkDataFlaggerInvariants(data_harm, flagger_harm, field, identical=True)
+    assert data_deharm[field].equals(pre_data[field])
+    assert flagger_deharm[field].equals(pre_flaggger[field])
+
+
+@pytest.mark.parametrize(
+    'params, expected',
+    [
+        (("bshift", "15Min"), pd.Series(data=[-50.0, -37.5, -25.0, 12.5, 37.5, 50.0], index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"))),
+        (("fshift", "15Min"), pd.Series(data=[np.nan, -37.5, -25.0, 0.0, 37.5, 50.0], index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"))),
+        (("nshift", "15min"), pd.Series(data=[np.nan, -37.5, -25.0, 12.5, 37.5, 50.0], index=pd.date_range("2010-12-31 23:45:00", "2011-01-01 01:00:00", freq="15Min"))),
+        (("bshift", "30Min"), pd.Series(data=[-50.0, -37.5, 12.5, 50.0], index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"))),
+        (("fshift", "30Min"), pd.Series(data=[np.nan, -37.5, 0.0, 50.0], index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"))),
+        (("nshift", "30min"), pd.Series(data=[np.nan, -37.5, 12.5, 50.0], index=pd.date_range("2010-12-31 23:30:00", "2011-01-01 01:00:00", freq="30Min"))),
+    ])
+def test_harmSingleVarInterpolationShift(data, params, expected):
+    flagger = initFlagsLike(data)
+    field = 'data'
+    pre_data = data.copy()
+    pre_flagger = flagger.copy()
+    method, freq = params
+
+    data_harm, flagger_harm = shift(data, field, flagger, freq, method=method)
+    assert data_harm[field].equals(expected)
+
+    data_deharm, flagger_deharm = mapToOriginal(data_harm, "data", flagger_harm, method="inverse_" + method)
+    assert data_deharm[field].equals(pre_data[field])
+    assert flagger_deharm[field].equals(pre_flagger[field])
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_wrapper(data, flagger):
-    # we are only testing, whether the wrappers do pass processing:
-    field = data.columns[0]
-    freq = "15min"
-    flagger = flagger.initFlags(data)
 
-    linear(data, field, flagger, freq, to_drop=None)
-    aggregate(data, field, flagger, freq, value_func=np.nansum, method="nagg", to_drop=None)
-    shift(data, field, flagger, freq, method="nshift", to_drop=None)
-    interpolate(data, field, flagger, freq, method="spline")
diff --git a/tests/funcs/test_modelling.py b/tests/funcs/test_modelling.py
index 6d99d5786e6a2d3e420aa793a7e0c2baf1dde2f8..248c122460a653a24829577b2f81b1bdbb01740b 100644
--- a/tests/funcs/test_modelling.py
+++ b/tests/funcs/test_modelling.py
@@ -6,23 +6,21 @@
 
 import dios
 
+from saqc import BAD, UNFLAGGED
+from saqc.flagger import initFlagsLike
 from saqc.funcs.tools import mask
 from saqc.funcs.residues import calculatePolynomialResidues, calculateRollingResidues
 
 from tests.fixtures import *
-from tests.common import TESTFLAGGER
 
-TF = TESTFLAGGER[:1]
 
-
-@pytest.mark.parametrize("flagger", TF)
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_2")])
-def test_modelling_polyFit_forRegular(dat, flagger):
+def test_modelling_polyFit_forRegular(dat):
     data, _ = dat(freq="10min", periods=30, initial_level=0, final_level=100, out_val=-100)
     # add some nice sine distortion
     data = data + 10 * np.sin(np.arange(0, len(data.indexes[0])))
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     result1, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=False)
     result2, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=True)
     assert (result1["data"] - result2["data"]).abs().max() < 10 ** -10
@@ -35,39 +33,49 @@ def test_modelling_polyFit_forRegular(dat, flagger):
     assert result5["data"].iloc[10:19].isna().all()
 
 
-@pytest.mark.parametrize("flagger", TF)
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_2")])
-def test_modelling_rollingMean_forRegular(dat, flagger):
+def test_modelling_rollingMean_forRegular(dat):
     data, _ = dat(freq="10min", periods=30, initial_level=0, final_level=100, out_val=-100)
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     calculateRollingResidues(data, "data", flagger, 5, func=np.mean, eval_flags=True, min_periods=0, center=True)
     calculateRollingResidues(data, "data", flagger, 5, func=np.mean, eval_flags=True, min_periods=0, center=False)
 
 
-@pytest.mark.parametrize("flagger", TF)
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_1")])
-def test_modelling_mask(dat, flagger):
+def test_modelling_mask(dat):
     data, _ = dat()
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
-    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="20:00",
-                                           period_end="40:00", include_bounds=False)
-    flaggs = flagger_seasonal._flags["data"]
-    assert flaggs[np.logical_and(20 <= flaggs.index.minute, 40 >= flaggs.index.minute)].isna().all()
-    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="15:00:00",
-                                           period_end="02:00:00")
-    flaggs = flagger_seasonal._flags["data"]
-    assert flaggs[np.logical_and(15 <= flaggs.index.hour, 2 >= flaggs.index.hour)].isna().all()
-    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="03T00:00:00",
-                                           period_end="10T00:00:00")
-    flaggs = flagger_seasonal._flags["data"]
-    assert flaggs[np.logical_and(3 <= flaggs.index.hour, 10 >= flaggs.index.hour)].isna().all()
+    flagger = initFlagsLike(data)
+    field = "data"
+
+    # set flags everywhere to test unflagging
+    flagger[:, field] = BAD
+
+    common = dict(data=data, field=field, flagger=flagger, mode='periodic')
+    data_seasonal, flagger_seasonal = mask(**common, period_start="20:00", period_end="40:00", include_bounds=False)
+    flags = flagger_seasonal[field]
+    m = (20 <= flags.index.minute) & (flags.index.minute <= 40)
+    assert all(flagger_seasonal[field][m] == UNFLAGGED)
+    assert all(data_seasonal[field][m].isna())
+
+    data_seasonal, flagger_seasonal = mask(**common, period_start="15:00:00", period_end="02:00:00")
+    flags = flagger_seasonal[field]
+    m = (15 <= flags.index.hour) & (flags.index.hour <= 2)
+    assert all(flagger_seasonal[field][m] == UNFLAGGED)
+    assert all(data_seasonal[field][m].isna())
+
+    data_seasonal, flagger_seasonal = mask(**common, period_start="03T00:00:00", period_end="10T00:00:00")
+    flags = flagger_seasonal[field]
+    m = (3 <= flags.index.hour) & (flags.index.hour <= 10)
+    assert all(flagger_seasonal[field][m] == UNFLAGGED)
+    assert all(data_seasonal[field][m].isna())
 
     mask_ser = pd.Series(False, index=data["data"].index)
     mask_ser[::5] = True
     data["mask_ser"] = mask_ser
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     data_masked, flagger_masked = mask(data, "data", flagger, mode='mask_var', mask_var="mask_ser")
-    flaggs = flagger_masked._flags["data"]
-    assert flaggs[data_masked['mask_ser']].isna().all()
+    m = mask_ser
+    assert all(flagger_masked[field][m] == UNFLAGGED)
+    assert all(data_masked[field][m].isna())
diff --git a/tests/funcs/test_proc_functions.py b/tests/funcs/test_proc_functions.py
index d7cada07858fc9f9e1d8d11d70f6785313295e7e..d9d137359ead0d1b286a330d72523c4b90fa6bac 100644
--- a/tests/funcs/test_proc_functions.py
+++ b/tests/funcs/test_proc_functions.py
@@ -7,6 +7,7 @@
 import dios
 
 from saqc.constants import *
+from saqc.flagger import initFlagsLike
 from saqc.funcs.transformation import transform
 from saqc.funcs.drift import correctOffset
 from saqc.funcs.interpolation import interpolateByRolling, interpolateInvalid, interpolateIndex
@@ -14,15 +15,13 @@ from saqc.funcs.resampling import resample
 from saqc.lib.ts_operators import linearInterpolation, polynomialInterpolation
 
 from tests.fixtures import *
-from tests.common import TESTFLAGGER
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_rollingInterpolateMissing(course_5, flagger):
+def test_rollingInterpolateMissing(course_5):
     data, characteristics = course_5(periods=10, nan_slice=[5, 6])
     field = data.columns[0]
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     dataInt, *_ = interpolateByRolling(
         data, field, flagger, 3, func=np.median, center=True, min_periods=0, interpol_flag=UNFLAGGED
     )
@@ -35,12 +34,11 @@ def test_rollingInterpolateMissing(course_5, flagger):
     assert dataInt[field][characteristics["missing"]].isna().all()
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_interpolateMissing(course_5, flagger):
+def test_interpolateMissing(course_5):
     data, characteristics = course_5(periods=10, nan_slice=[5])
     field = data.columns[0]
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     dataLin, *_ = interpolateInvalid(data, field, flagger, method="linear")
     dataPoly, *_ = interpolateInvalid(data, field, flagger, method="polynomial")
     assert dataLin[field][characteristics["missing"]].notna().all()
@@ -54,12 +52,11 @@ def test_interpolateMissing(course_5, flagger):
     assert dataLin3[field][characteristics["missing"]].notna().all()
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_transform(course_5, flagger):
+def test_transform(course_5):
     data, characteristics = course_5(periods=10, nan_slice=[5, 6])
     field = data.columns[0]
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     data1, *_ = transform(data, field, flagger, func=linearInterpolation)
     assert data1[field][characteristics["missing"]].isna().all()
     data1, *_ = transform(data, field, flagger, func=lambda x: linearInterpolation(x, inter_limit=3))
@@ -70,35 +67,32 @@ def test_transform(course_5, flagger):
     assert data1[field][characteristics["missing"]].notna().all()
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_resample(course_5, flagger):
+def test_resample(course_5):
     data, characteristics = course_5(freq="1min", periods=30, nan_slice=[1, 11, 12, 22, 24, 26])
     field = data.columns[0]
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     data1, *_ = resample(data, field, flagger, "10min", np.mean, max_invalid_total_d=2, max_invalid_consec_d=1)
     assert ~np.isnan(data1[field].iloc[0])
     assert np.isnan(data1[field].iloc[1])
     assert np.isnan(data1[field].iloc[2])
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_interpolateGrid(course_5, course_3, flagger):
+def test_interpolateGrid(course_5, course_3):
     data, _ = course_5()
     data_grid, characteristics = course_3()
     data['grid'] = data_grid.to_df()
     # data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
+    flagger = initFlagsLike(data)
     dataInt, *_ = interpolateIndex(data, 'data', flagger, '1h', 'time', grid_field='grid', inter_limit=10)
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_offsetCorrecture(flagger):
+def test_offsetCorrecture():
     data = pd.Series(0, index=pd.date_range('2000', freq='1d', periods=100), name='dat')
     data.iloc[30:40] = -100
     data.iloc[70:80] = 100
     data = dios.DictOfSeries(data)
-    flagger = flagger.initFlags(data)
-    data, flagger = correctOffset(data, 'dat', flagger, 40, 20, '3d', 1)
+    flagger = initFlagsLike(data)
+    data, _ = correctOffset(data, 'dat', flagger, 40, 20, '3d', 1)
     assert (data == 0).all()[0]