diff --git a/README.md b/README.md
index 35c04b031c760a99ab2068823cf0d97a89129622..7e79a734077672dc28fb0691fac071d71e3ff684 100644
--- a/README.md
+++ b/README.md
@@ -60,10 +60,10 @@ The content of such a configuration could look like this:
 ```
 varname    ; test                                
 #----------;------------------------------------
-SM2        ; harm_shift2Grid(freq="15Min")       
+SM2        ; shiftToFreq(freq="15Min")       
 SM2        ; flagMissing(nodata=NAN)             
 'SM(1|2)+' ; flagRange(min=10, max=60)           
-SM2        ; spikes_flagMad(window="30d", z=3.5)
+SM2        ; flagMad(window="30d", z=3.5)
 ```
 
 As soon as the basic inputs, a dataset and the configuration file are
@@ -81,14 +81,16 @@ The following snippet implements the same configuration given above through
 the Python-API:
 
 ```python
+import saqc.funcs.outliers
 from saqc import SaQC, SimpleFlagger
 
-saqc = (SaQC(SimpleFlagger(), data)
-        .harm_shift2Grid("SM2", freq="15Min")
+saqc = saqc = (SaQC(SimpleFlagger(), data)
+        .shiftToFreq("SM2", freq="15Min")
         .flagMissing("SM2", nodata=np.nan)
         .flagRange("SM(1|2)+", regex=True, min=10, max=60)
-        .spikes_flagMad("SM2", window="30d", z=3.5))
-        
+        .flagMad("SM2", window="30d", z=3.5))
+
+
 data, flagger = saqc.getResult()
 ```
 
diff --git "a/aggregate\n\n" "b/aggregate\n\n"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md
index c034992821df5a9fa9f310748d801ccfeeb3c7f3..88c775cb86af6b781aa3f621c328b7048fb816b1 100644
--- a/docs/GettingStarted.md
+++ b/docs/GettingStarted.md
@@ -129,7 +129,7 @@ and paste the following lines into it:
 	
 	varname;test;plot
 	SM2;flagRange(min=10, max=60);False
-	SM2;spikes_flagMad(window="30d", z=3.5);True
+	SM2;flagMad(window="30d", z=3.5);True
 
 These lines illustrate how different quality control tests can be specified for
 different variables by following the pattern:
@@ -188,10 +188,10 @@ So, what do we see here?
 
 * The plot shows the data as well as the quality flags that were set by the
   tests for the variable `SM2`, as defined in the config-file
-* Following our definition in the config-file, first the `range`-test that flags
+* Following our definition in the config-file, first the `flagRange`-test that flags
   all values outside the range [10,60] was executed and after that,
-  the `spikes_simpleMad`-test to identify spikes in the data
-* In the config, we set the plotting option to `True` for `spikes_simpleMad`,
+  the `flagMad`-test to identify spikes in the data
+* In the config, we set the plotting option to `True` for `flagMad`,
   only. Thus, the plot aggregates all preceeding tests (here: `range`) to black
   points and highlights the flags of the selected test as red points.
 
@@ -226,7 +226,7 @@ range-test:
 	
 	varname;test;plot
 	SM2;flagRange(min=-20, max=60);False
-	SM2;spikes_flagMad(window="30d", z=3.5);True
+	SM2;flagMad(window="30d", z=3.5);True
 Rerunning SaQC as above produces the following plot:
 
 ![Changing the config](../ressources/images/example_plot_2.png "Changing the config")
@@ -245,8 +245,8 @@ something like this:
 	varname;test;plot
 	SM1;flagRange(min=10, max=60);False
 	SM2;flagRange(min=10, max=60);False
-	SM1;spikes_flagMad(window="15d", z=3.5);True
-	SM2;spikes_flagMad(window="30d", z=3.5);True
+	SM1;flagMad(window="15d", z=3.5);True
+	SM2;flagMad(window="30d", z=3.5);True
 
 which gives you separate plots for each line where the plotting option is set to
 `True` as well as one summary "data plot" that depicts the joint flags from all
@@ -265,8 +265,8 @@ series. Also, you can write your own tests using a python-based
 [extension language](docs/GenericFunctions.md). This would look like this:
 
 	varname;test;plot
-	SM2;harm_shift2Grid(freq="15Min");False
-	SM2;flagGeneric(func=(SM2 < 30));True
+	SM2;shiftToFreq(freq="15Min");False
+	SM2;generic(func=(SM2 < 30));True
 
 The above executes an internal framework that harmonizes the timestamps of SM2
 to a 15min-grid (see data below). Further information about this routine can be
diff --git a/ressources/data/config.csv b/ressources/data/config.csv
index e1e00c54e99c0e31b5a95c1ac759ac9063fe5fae..c8f7f803de8e32aafba102654ee9112aaa3e659d 100644
--- a/ressources/data/config.csv
+++ b/ressources/data/config.csv
@@ -1,6 +1,6 @@
 varname    ; test                                ; plot
 #----------;-------------------------------------;------
-SM2        ; harm_shift2Grid(freq="15Min")       ; False
+SM2        ; shift(freq="15Min")       ; False
 SM2        ; flagMissing(nodata=NAN)             ; False
 'SM(1|2)+' ; flagRange(min=10, max=60)           ; False
-SM2        ; spikes_flagMad(window="30d", z=3.5) ; True
+SM2        ; flagMAD(window="30d", z=3.5) ; True
diff --git a/ressources/data/config_ci.csv b/ressources/data/config_ci.csv
index f631338ade105552e37c61d16ea72aab50dab106..74ddfbae40943f3bbcc75e58fbab4379e39815c4 100644
--- a/ressources/data/config_ci.csv
+++ b/ressources/data/config_ci.csv
@@ -1,7 +1,7 @@
 varname;test;plot
-SM2;harm_shift2Grid(freq="15Min");False
+SM2;shift(freq="15Min");False
 '.*';flagRange(min=10, max=60);False
 SM2;flagMissing(nodata=NAN);False
 SM2;flagRange(min=10, max=60);False
-SM2;spikes_flagMad(window="30d", z=3.5);False
-Dummy;flagGeneric(func=(isflagged(SM1) | isflagged(SM2)))
+SM2;flagMAD(window="30d", z=3.5);False
+Dummy;flag(func=(isflagged(SM1) | isflagged(SM2)))
diff --git a/ressources/machine_learning/data/soil_moisture_mwe.feather b/ressources/machine_learning/data/soil_moisture_mwe.feather
deleted file mode 100644
index b17f4e7371c2fdba0e7555b8e2c030634ed899f6..0000000000000000000000000000000000000000
--- a/ressources/machine_learning/data/soil_moisture_mwe.feather
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:d0abca6000efcb966bdb46800cf99acc6e925d3a8f8de8f13e3d53c8215e63eb
-size 13563392
diff --git a/ressources/machine_learning/models/testmodel_0.2.pkl b/ressources/machine_learning/models/testmodel_0.2.pkl
deleted file mode 100644
index 00b3bc956da02123ecdd6df7f13dc252b91b79b0..0000000000000000000000000000000000000000
--- a/ressources/machine_learning/models/testmodel_0.2.pkl
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:9f661a9cb4a6092f31ca018e7e283dc1170a140d64c921e544ec902159289a2b
-size 29718964
diff --git a/ressources/machine_learning/train_machine_learning.py b/ressources/machine_learning/train_machine_learning.py
deleted file mode 100644
index 13f80d21e0cc7fd7096e1704913b603816de2f00..0000000000000000000000000000000000000000
--- a/ressources/machine_learning/train_machine_learning.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import pandas as pd
-import numpy as np
-import random  # for random sampling of training/test
-from sklearn.ensemble import RandomForestClassifier
-from sklearn.metrics import recall_score, precision_score, classification_report
-import joblib  # for saving of model objects
-import os
-import time
-import datetime
-
-###--------------------
-### EXAMPLE PARAMETRIZATION:
-###--------------------
-
-# pd.options.mode.chained_assignment = None  # default='warn'
-# data = pd.read_feather("data/sm/02_data.feather")
-# data = data.reset_index()#data.index has to be reset as I use row nos only for indexing
-#
-# ### Reflagging
-# index_manual = data.Flag == "Manual"
-# data["FlagMan"] = index_manual.astype("int")# True/False as 0 or 1
-# index_auto = data.Flag.str.contains("Auto")
-# data["flag_bin"] = index_auto.astype("int")# True/False as 0 or 1
-#
-# field = "Target"
-# references = ["Var1","Var2"]
-# window_values = 20
-# window_flags = 20
-# modelname="name"
-# path = "models/"
-# sensor_field="SensorID"
-# group_field = "GroupVar"
-
-
-def trainML(
-    data,
-    field,
-    references,
-    sensor_field: str,
-    group_field: str,
-    window_values: int,
-    window_flags: int,
-    path: str,
-    modelname: str,
-    testratio: float,
-    **kwargs
-):
-
-    """This Function trains machine-learning models to reproduce manual flags that were set for a specific variable. Inputs to the training script are the timeseries of the
-    respective target variable at multiple sensors, the automatic flags that were assigned by SaQC as well as multiple reference series.
-    Internally, context information for each point is gathered in form of moving windows to improve the flagging algorithm. By default, the
-    information of the previous and preceeding timestep of each data point t are gathered: For the target and reference series, this refers to the gradient of t+/-1 with respect to t. For
-    the automatic flgs, this denotes whether an automatic flag was set at t+/-1.
-    Next, according to user inputs of window_flags and window_values, the number of flags
-    and the mean gradient within the specified moving windows is calculated, both for t+windowsize and t-windowsize. The moving window calculations are executed for each sensor, seperately,
-    and multiple models are trained, one for each level a grouping variable that can be defined by the user. The model objects that can be used for future flagging are stored
-    along with log-files that give information on the training process, e.g. models`accuracy on training and test. The algorithm used is randomForest at default parameters.
-    For usage of the model inside the SaQC-pipeline, see "machinelearning" in the function reference.
-
-
-    :param data:                        The pandas dataframe holding the data of the target variable at multiple sensors in long format, i.e. concatenated row-wise.
-                                        Along with this, there should be columns with the respective series of reference variables and a column of quality flags. The latter
-                                        should contain both automatic and manual flags.
-    :param field:                       Fieldname of the field in data that is to be flagged
-    :param references:                  A list of strings, denoting the fieldnames of the data series that should be used as reference variables
-    :parameters sensor_field:           A string denoting the fieldname of unique sensor-IDs
-    :parameter group_field:             A string denoting the fieldname of the grouping variable. For each level of this variable, a seperate model will be trained.
-    :param window_values:               An integer, denoting the window size that is used to derive the gradients of both the field- and reference-series inside the moving window
-    :param window_flags:                An integer, denoting the window size that is used to count the surrounding automatic flags that have been set before
-    :param path:                        A string denoting the path to the folder where the model objects along with log-files should be saved to
-    :param modelname:                   A string denoting the name of the model. The name is used for naming of the model objects as well as log-files. Naming will
-                                        be: 'modelname'_'value of group_field'.pkl
-    :param testratio                    A float denoting the ratio of the test- vs. training-set to be drawn from the data, e.g. 0.3
-    """
-
-    def _refCalc(reference, window_values):
-        # Helper function for calculation of moving window values
-        outdata = dios.DictOfSeries()
-        name = reference.name
-        # derive gradients from reference series
-        outdata[name + "_Dt_1"] = reference - reference.shift(1)  # gradient t vs. t-1
-        outdata[name + "_Dt1"] = reference - reference.shift(-1)  # gradient t vs. t+1
-        # moving mean of gradients var1 and var2 before/after
-        outdata[name + "_Dt_" + str(window_values)] = (
-            outdata[name + "_Dt_1"].rolling(window_values, center=False).mean()
-        )  # mean gradient t to t-window
-        outdata[name + "_Dt" + str(window_values)] = (
-            outdata[name + "_Dt_1"].iloc[::-1].rolling(window_values, center=False).mean()[::-1]
-        )  # mean gradient t to t+window
-        return outdata
-
-    randomseed = 36
-    ### Prepare data, i.e. compute moving windows
-    print("Computing time-lags")
-    # save original row index for merging into original dataframe, as NAs will be introduced
-    data = data.rename(columns={"index": "RowIndex"})
-    # define Test/Training
-    data = data.assign(TeTr="Tr")
-    # create empty df for training data
-    traindata = dios.DictOfSeries()
-    # calculate windows
-    for sensor_id in data[sensor_field].unique():
-        print(sensor_id)
-        sensordf = data.loc[data[sensor_field] == sensor_id]
-        index_test = sensordf.RowIndex.sample(
-            n=int(testratio * len(sensordf)), random_state=randomseed
-        )  # draw random sample
-        sensordf.TeTr[index_test] = "Te"  # assign test samples
-
-        sensordf["flag_bin_t_1"] = sensordf["flag_bin"] - sensordf["flag_bin"].shift(1)  # Flag at t-1
-        sensordf["flag_bin_t1"] = sensordf["flag_bin"] - sensordf["flag_bin"].shift(-1)  # Flag at t+1
-        sensordf["flag_bin_t_" + str(window_flags)] = (
-            sensordf["flag_bin"].rolling(window_flags + 1, center=False).sum()
-        )  # n Flags in interval t to t-window_flags
-        sensordf["flag_bin_t" + str(window_flags)] = (
-            sensordf["flag_bin"].iloc[::-1].rolling(window_flags + 1, center=False).sum()[::-1]
-        )  # n Flags in interval t to t+window_flags
-        # forward-orientation not possible, so right-orientation on reversed data an reverse result
-
-        # Add context information for field+references
-        for i in [field] + references:
-            sensordf = pd.concat([sensordf, _refCalc(reference=sensordf[i], window_values=window_values),], axis=1,)
-
-        # write back into new dataframe
-        traindata = traindata.append(sensordf)
-
-    # remove rows that contain NAs (new ones occured during predictor calculation)
-    traindata = traindata.dropna(axis=0, how="any")
-
-    ################
-    ### FIT Model
-    ################
-    n_cores = os.getenv("NSLOTS", 1)
-    print("MODEL TRAINING ON " + str(n_cores) + " CORES")
-
-    # make column in "traindata" to store predictions
-    traindata = traindata.assign(PredMan=0)
-    outinfo_df = []
-    resultfile = open(os.path.join(os.getcwd(), path, modelname + "_resultfile.txt"), "w")
-    starttime = time.time()
-    # For each category of groupvar, fit a separate model
-
-    for groupvar in traindata[group_field].unique():
-        resultfile.write("GROUPVAR: " + str(groupvar) + "\n")
-        print("GROUPVAR: " + str(groupvar))
-        print("TRAINING MODEL...")
-        # drop unneeded columns
-        groupdata = traindata[traindata[group_field] == groupvar].drop(
-            columns=["Time", "RowIndex", "Flag", "flag_bin", "PredMan", group_field, sensor_field,]
-        )
-        forest = RandomForestClassifier(n_estimators=500, random_state=randomseed, oob_score=True, n_jobs=-1)
-        X_tr = groupdata.drop(columns=["TeTr", "FlagMan"])[groupdata.TeTr == "Tr"]
-        Y_tr = groupdata.FlagMan[groupdata.TeTr == "Tr"]
-        forest.fit(y=Y_tr, X=X_tr)
-        # save model object
-        joblib.dump(forest, os.path.join(path, modelname + "_" + str(groupvar) + ".pkl"))
-        # retrieve training predictions
-        print("PREDICTING...")
-        preds_tr = (
-            forest.oob_decision_function_[:, 1] > forest.oob_decision_function_[:, 0]
-        )  # training, derive from OOB class votes
-        preds_tr = preds_tr.astype("int")
-
-        # get test predictions
-        X_te = groupdata.drop(columns=["TeTr", "FlagMan"])[groupdata.TeTr == "Te"]
-        Y_te = groupdata.FlagMan[groupdata.TeTr == "Te"]
-        preds_te = forest.predict(X_te)  # test
-
-        # Collect info on model run (n datapoints, share of flags, Test/Training accuracy...)
-        outinfo = [
-            groupvar,
-            groupdata.shape[0],
-            len(preds_tr),
-            len(preds_te),
-            sum(groupdata.FlagMan[groupdata.TeTr == "Tr"]) / len(preds_tr) * 100,
-            sum(groupdata.FlagMan[groupdata.TeTr == "Te"]) / len(preds_te) * 100,
-            recall_score(Y_tr, preds_tr),
-            recall_score(Y_te, preds_te),
-            precision_score(Y_tr, preds_tr),
-            precision_score(Y_te, preds_te),
-        ]
-        resultfile.write("TRAINING RECALL:" + "\n")
-        resultfile.write(
-            str(recall_score(groupdata.FlagMan[groupdata.TeTr == "Tr"], preds_tr)) + "\n"
-        )  # Training error (Out-of-Bag)
-        resultfile.write("TEST RECALL:" + "\n")
-        resultfile.write(
-            str(recall_score(groupdata.FlagMan[groupdata.TeTr == "Te"], preds_te)) + "\n" + "\n"
-        )  # Test error
-        outinfo_df.append(outinfo)
-        # save back to dataframe
-        traindata.PredMan[(traindata.TeTr == "Tr") & (traindata[group_field] == groupvar)] = preds_tr
-        traindata.PredMan[(traindata.TeTr == "Te") & (traindata[group_field] == groupvar)] = preds_te
-
-    endtime = time.time()
-    print("TIME ELAPSED: " + str(datetime.timedelta(seconds=endtime - starttime)) + " hours")
-    outinfo_df = dios.DictOfSeries.from_records(
-        outinfo_df,
-        columns=[
-            group_field,
-            "n",
-            "n_Tr",
-            "n_Te",
-            "Percent_Flags_Tr",
-            "Percent_Flags_Te",
-            "Recall_Tr",
-            "Recall_Te",
-            "Precision_Tr",
-            "Precision_Te",
-        ],
-    )
-    outinfo_df = outinfo_df.assign(Modelname=modelname)
-    resultfile.write(str(outinfo_df))
-    outinfo_df.to_csv(os.path.join(path, modelname + "_outinfo.csv"), index=False)
-    resultfile.close()
-
-    # write results back into original "data" dataframe
-    data = data.assign(PredMan=np.nan)
-    data.PredMan[traindata.RowIndex] = traindata.PredMan  # based on RowIndex as NAs were created in traindata
-    data.to_feather(os.path.join(path, modelname + "_data_preds.feather"))
-
-
-trainML(
-    data, field, references, sensor_field, group_field, window_values, window_flags, path, modelname, 0.3,
-)
diff --git a/saqc/core/core.py b/saqc/core/core.py
index 13b7a7087c29012ef486d3a62c1897006570727f..5e0c0cf80308eefcf20529a08e281a038d0c546e 100644
--- a/saqc/core/core.py
+++ b/saqc/core/core.py
@@ -21,7 +21,7 @@ import inspect
 from saqc.lib.plotting import plotHook, plotAllHook
 from saqc.flagger import BaseFlagger, CategoricalFlagger, SimpleFlagger, DmpFlagger
 from saqc.core.register import FUNC_MAP
-from saqc.funcs.proc_functions import proc_copy
+from saqc.funcs.tools import copy
 
 
 logger = logging.getLogger("SaQC")
@@ -298,7 +298,7 @@ def _saqcCallFunc(func_dump, data, flagger):
     masking = func_dump.ctrl.masking
 
     if (target != field) and (func_dump.regex is False):
-        data, flagger = proc_copy(data, field, flagger, target)
+        data, flagger = copy(data, field, flagger, target)
         field = target
 
     if masking == 'all':
diff --git a/saqc/funcs/__init__.py b/saqc/funcs/__init__.py
index e5c5153cbfef7a22c55982abbf7ae8cb369ffe74..407cf5adc1c1185efede09c174178c98d026f2e0 100644
--- a/saqc/funcs/__init__.py
+++ b/saqc/funcs/__init__.py
@@ -3,11 +3,19 @@
 
 # imports needed to make the functions register themself
 from saqc.core.register import register
-from saqc.funcs.functions import *
-from saqc.funcs.breaks_detection import *
-from saqc.funcs.constants_detection import *
-from saqc.funcs.soil_moisture_tests import *
-from saqc.funcs.spikes_detection import *
-from saqc.funcs.harm_functions import *
-from saqc.funcs.modelling import *
-from saqc.funcs.proc_functions import *
+from saqc.funcs.breaks import *
+from saqc.funcs.changepoints import *
+from saqc.funcs.constants import *
+from saqc.funcs.curvefit import *
+from saqc.funcs.drift import *
+from saqc.funcs.generic import *
+from saqc.funcs.interpolation import *
+from saqc.funcs.outliers import *
+from saqc.funcs.pattern import *
+from saqc.funcs.resampling import *
+from saqc.funcs.residues import *
+from saqc.funcs.rolling import *
+from saqc.funcs.scores import *
+from saqc.funcs.tools import *
+from saqc.funcs.transformation import *
+from saqc.funcs.flagtools import *
diff --git a/saqc/funcs/breaks.py b/saqc/funcs/breaks.py
new file mode 100644
index 0000000000000000000000000000000000000000..8111d47612ee624478b0f4ff98c808931e906d93
--- /dev/null
+++ b/saqc/funcs/breaks.py
@@ -0,0 +1,172 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+import dios
+import numpy as np
+import pandas as pd
+
+
+from saqc.lib.tools import groupConsecutives
+from saqc.funcs.changepoints import assignChangePointCluster
+from saqc.core.register import register
+
+
+@register(masking='field')
+def flagMissing(data, field, flagger, nodata=np.nan, **kwargs):
+    """
+    The function flags all values indicating missing data.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    nodata : any, default np.nan
+        A value that defines missing data.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+    datacol = data[field]
+    if np.isnan(nodata):
+        mask = datacol.isna()
+    else:
+        mask = datacol == nodata
+
+    flagger = flagger.setFlags(field, loc=mask, **kwargs)
+    return data, flagger
+
+
+@register(masking='field')
+def flagIsolated(data, field, flagger, gap_window, group_window, **kwargs):
+    """
+    The function flags arbitrary large groups of values, if they are surrounded by sufficiently
+    large data gaps. A gap is defined as group of missing and/or flagged values.
+
+    A series of values x_k,x_(k+1),...,x_(k+n), with associated timestamps t_k,t_(k+1),...,t_(k+n),
+    is considered to be isolated, if:
+
+    1. t_(k+1) - t_n < `group_window`
+    2. None of the x_j with 0 < t_k - t_j < `gap_window`, is valid or unflagged (preceeding gap).
+    3. None of the x_j with 0 < t_j - t_(k+n) < `gap_window`, is valid or unflagged (succeding gap).
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    gap_window :
+        The minimum size of the gap before and after a group of valid values, making this group considered an
+        isolated group. See condition (2) and (3)
+    group_window :
+        The maximum temporal extension allowed for a group that is isolated by gaps of size 'gap_window',
+        to be actually flagged as isolated group. See condition (1).
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+    gap_window = pd.tseries.frequencies.to_offset(gap_window)
+    group_window = pd.tseries.frequencies.to_offset(group_window)
+
+    col = data[field].mask(flagger.isFlagged(field))
+    mask = col.isnull()
+
+    flags = pd.Series(data=0, index=col.index, dtype=bool)
+    for srs in groupConsecutives(mask):
+        if np.all(~srs):
+            start = srs.index[0]
+            stop = srs.index[-1]
+            if stop - start <= group_window:
+                left = mask[start - gap_window: start].iloc[:-1]
+                if left.all():
+                    right = mask[stop: stop + gap_window].iloc[1:]
+                    if right.all():
+                        flags[start:stop] = True
+
+    flagger = flagger.setFlags(field, flags, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='field')
+def flagJumps(data, field, flagger, tresh, winsz, min_periods=1, **kwargs):
+    """,
+    Flag datapoints, where the parametrization of the process, the data is assumed to generate by, significantly
+    changes.
+
+    The change points detection is based on a sliding window search.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The reference variable, the deviation from wich determines the flagging.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional informations related to `data`.
+    stat_func : Callable[numpy.array, numpy.array]
+         A function that assigns a value to every twin window. Left window content will be passed to first variable,
+        right window content will be passed to the second.
+    thresh_func : Callable[numpy.array, numpy.array]
+        A function that determines the value level, exceeding wich qualifies a timestamps stat func value as denoting a
+        changepoint.
+    bwd_window : str
+        The left (backwards facing) windows temporal extension (freq-string).
+    min_periods_bwd : {str, int}
+        Minimum number of periods that have to be present in a backwards facing window, for a changepoint test to be
+        performed.
+    fwd_window : {Non/home/luenensc/PyPojects/testSpace/flagBasicMystery.pye, str}, default None
+        The right (fo/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyrward facing) windows temporal extension (freq-string).
+    min_periods_fwd : {None, str, int}, default None
+        Minimum numbe/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyr of periods that have to be present in a forward facing window, for a changepoint test to be
+        performed.
+    closed : {'right', 'left', 'both', 'neither'}, default 'both'
+        Determines the closure of the sliding windows.
+    reduce_window : {None, False, str}, default None
+        The sliding window search method is not an exact CP search method and usually there wont be
+        detected a single changepoint, but a "region" of change around a changepoint.
+        If `reduce_window` is not False, for every window of size `reduce_window`, there
+        will be selected the value with index `reduce_func(x, y)` and the others will be dropped.
+        If `reduce_window` is None, the reduction window size equals the
+        twin window size, the changepoints have been detected with.
+    reduce_func : Callable[numpy.array, numpy.array], default lambda x, y: x.argmax()
+        A function that must return an index value upon input of two arrays x and y.
+        First input parameter will hold the result from the stat_func evaluation for every
+        reduction window. Second input parameter holds the result from the thresh_func evaluation.
+        The default reduction function just selects the value that maximizes the stat_func.
+
+
+    Returns
+    -------
+
+    """
+
+    data, flagger = assignChangePointCluster(data, field, flagger,
+                                             stat_func=lambda x, y: np.abs(np.mean(x) - np.mean(y)),
+                                             tresh_func=lambda x, y: tresh,
+                                             bwd_window=winsz,
+                                             min_periods_bwd=min_periods,
+                                             flag_changepoints=True,
+                                             _model_by_resids=False,
+                                             _assign_cluster=False)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/breaks_detection.py b/saqc/funcs/breaks_detection.py
deleted file mode 100644
index d6da3d33d97f98bb2377244622c709f6487a5f8a..0000000000000000000000000000000000000000
--- a/saqc/funcs/breaks_detection.py
+++ /dev/null
@@ -1,251 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import pandas as pd
-import dios
-
-from scipy.signal import savgol_filter
-
-from saqc.core.register import register
-from saqc.lib.tools import retrieveTrustworthyOriginal, detectDeviants
-
-
-@register(masking='all')
-def breaks_flagRegimeAnomaly(data, field, flagger, cluster_field, norm_spread, linkage_method='single',
-                             metric=lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
-                             norm_frac=0.5, set_cluster=True, set_flags=True, **kwargs):
-    """
-    A function to flag values belonging to an anomalous regime regarding modelling regimes of field.
-
-    "Normality" is determined in terms of a maximum spreading distance, regimes must not exceed in respect
-    to a certain metric and linkage method.
-
-    In addition, only a range of regimes is considered "normal", if it models more then `norm_frac` percentage of
-    the valid samples in "field".
-
-    Note, that you must detect the regime changepoints prior to calling this function.
-
-    Note, that it is possible to perform hypothesis tests for regime equality by passing the metric
-    a function for p-value calculation and selecting linkage method "complete".
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    cluster_field : str
-        The name of the column in data, holding the cluster labels for the samples in field. (has to be indexed
-        equal to field)
-    norm_spread : float
-        A threshold denoting the valuelevel, up to wich clusters a agglomerated.
-    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        The linkage method used for hierarchical (agglomerative) clustering of the variables.
-    metric : Callable[[numpy.array, numpy.array], float], default lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y))
-        A metric function for calculating the dissimilarity between 2 regimes. Defaults to just the difference in mean.
-    norm_frac : float
-        Has to be in [0,1]. Determines the minimum percentage of samples,
-        the "normal" group has to comprise to be the normal group actually.
-    set_cluster : bool, default True
-        If True, all data, considered "anormal", gets assigned a negative clusterlabel.
-    set_flags : bool, default True
-        Wheather or not to flag abnormal values (do not flag them, if you want to correct them
-        afterwards, becasue flagged values usually are not visible in further tests.).
-
-    kwargs
-
-    Returns
-    -------
-
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    """
-
-    clusterser = data[cluster_field]
-    cluster = np.unique(clusterser)
-    cluster_dios = dios.DictOfSeries({i: data[field][clusterser == i] for i in cluster})
-    plateaus = detectDeviants(cluster_dios, metric, norm_spread, norm_frac, linkage_method, 'samples')
-
-    if set_flags:
-        for p in plateaus:
-            flagger = flagger.setFlags(field, loc=cluster_dios.iloc[:, p].index, **kwargs)
-
-    if set_cluster:
-        for p in plateaus:
-            if cluster[p] > 0:
-                clusterser[clusterser == cluster[p]] = -cluster[p]
-
-    data[cluster_field] = clusterser
-    return data, flagger
-
-
-@register(masking='field')
-def breaks_flagSpektrumBased(
-    data,
-    field,
-    flagger,
-    thresh_rel=0.1,
-    thresh_abs=0.01,
-    first_der_factor=10,
-    first_der_window="12h",
-    scnd_der_ratio_range=0.05,
-    scnd_der_ratio_thresh=10,
-    smooth=True,
-    smooth_window=None,
-    smooth_poly_deg=2,
-    **kwargs
-):
-
-    """
-    The Function is a generalization of the Spectrum based break flagging mechanism as presented in:
-
-    The function flags breaks (jumps/drops) in input measurement series by evaluating its derivatives.
-    A measurement y_t is flagged a, break, if:
-
-    (1) y_t is changing relatively to its preceeding value by at least (100*`rel_change_rate_min`) percent
-    (2) y_(t-1) is difffering from its preceeding value, by a margin of at least `thresh_abs`
-    (3) Absolute first derivative |(y_t)'| has to be at least `first_der_factor` times as big as the arithmetic middle
-        over all the first derivative values within a 2 times `first_der_window_size` hours window, centered at t.
-    (4) The ratio of the second derivatives at t and t+1 has to be "aproximately" 1.
-        ([1-`scnd_der_ration_margin_1`, 1+`scnd_ratio_margin_1`])
-    (5) The ratio of the second derivatives at t+1 and t+2 has to be larger than `scnd_der_ratio_margin_2`
-
-    NOTE 1: As no reliable statement about the plausibility of the meassurements before and after the jump is possible,
-    only the jump itself is flagged. For flagging constant values following upon a jump, use a flagConstants test.
-
-    NOTE 2: All derivatives in the reference publication are obtained by applying a Savitzky-Golay filter to the data
-    before differentiating.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    thresh_rel : float, default 0.1
-        Float in [0,1]. See (1) of function description above to learn more
-    thresh_abs : float, default 0.01
-        Float > 0. See (2) of function descritpion above to learn more.
-    first_der_factor : float, default 10
-        Float > 0. See (3) of function descritpion above to learn more.
-    first_der_window_range : str, default '12h'
-        Offset string. See (3) of function description to learn more.
-    scnd_der_ratio_margin_1 : float, default 0.05
-        Float in [0,1]. See (4) of function descritpion above to learn more.
-    scnd_der_ratio_margin_2 : float, default 10
-        Float in [0,1]. See (5) of function descritpion above to learn more.
-    smooth : bool, default True
-        Method for obtaining dataseries' derivatives.
-        * False: Just take series step differences (default)
-        * True: Smooth data with a Savitzky Golay Filter before differentiating.
-    smooth_window : {None, str}, default 2
-        Effective only if `smooth` = True
-        Offset string. Size of the filter window, used to calculate the derivatives.
-    smooth_poly_deg : int, default 2
-        Effective only, if `smooth` = True
-        Polynomial order, used for smoothing with savitzk golay filter.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    The Function is a generalization of the Spectrum based break flagging mechanism as presented in:
-
-    [1] Dorigo,W. et al.: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-
-    Find a brief mathematical description of the function here:
-
-    [2] https://git.ufz.de/rdm-software/saqc/-/blob/testfuncDocs/docs/funcs
-        /FormalDescriptions.md#breaks_flagspektrumbased
-    """
-
-    # retrieve data series input at its original sampling rate
-    dataseries, data_rate = retrieveTrustworthyOriginal(data, field, flagger)
-
-    if smooth_window is None:
-        smooth_window = 3 * pd.Timedelta(data_rate)
-    else:
-        smooth_window = pd.Timedelta(smooth_window)
-
-    # relative - change - break criteria testing:
-    abs_change = np.abs(dataseries.shift(+1) - dataseries)
-    breaks = (abs_change > thresh_abs) & (abs_change / dataseries > thresh_rel)
-    breaks = breaks[breaks]
-
-    # First derivative criterion
-    smoothing_periods = int(np.ceil((smooth_window.seconds / data_rate.n)))
-    if smoothing_periods % 2 == 0:
-        smoothing_periods += 1
-
-    for brake in breaks.index:
-        # slice out slice-to-be-filtered (with some safety extension of 12 times the data rate)
-        slice_start = brake - pd.Timedelta(first_der_window) - smoothing_periods * pd.Timedelta(data_rate)
-        slice_end = brake + pd.Timedelta(first_der_window) + smoothing_periods * pd.Timedelta(data_rate)
-        data_slice = dataseries[slice_start:slice_end]
-
-        # obtain first derivative:
-        if smooth is True:
-            first_deri_series = pd.Series(
-                data=savgol_filter(data_slice, window_length=smoothing_periods, polyorder=smooth_poly_deg, deriv=1,),
-                index=data_slice.index,
-            )
-        else:
-            first_deri_series = data_slice.diff()
-
-        # condition constructing and testing:
-        test_slice = first_deri_series[brake - pd.Timedelta(first_der_window) : brake + pd.Timedelta(first_der_window)]
-
-        test_sum = abs((test_slice.sum() * first_der_factor) / test_slice.size)
-
-        if abs(first_deri_series[brake]) > test_sum:
-            # second derivative criterion:
-            slice_start = brake - 12 * pd.Timedelta(data_rate)
-            slice_end = brake + 12 * pd.Timedelta(data_rate)
-            data_slice = data_slice[slice_start:slice_end]
-
-            # obtain second derivative:
-            if smooth is True:
-                second_deri_series = pd.Series(
-                    data=savgol_filter(
-                        data_slice, window_length=smoothing_periods, polyorder=smooth_poly_deg, deriv=2,
-                    ),
-                    index=data_slice.index,
-                )
-            else:
-                second_deri_series = data_slice.diff().diff()
-
-            # criterion evaluation:
-            first_second = (
-                (1 - scnd_der_ratio_range)
-                < abs((second_deri_series.shift(+1)[brake] / second_deri_series[brake]))
-                < 1 + scnd_der_ratio_range
-            )
-
-            second_second = abs(second_deri_series[brake] / second_deri_series.shift(-1)[brake]) > scnd_der_ratio_thresh
-
-            if (~first_second) | (~second_second):
-                breaks[brake] = False
-
-        else:
-            breaks[brake] = False
-
-    flagger = flagger.setFlags(field, breaks, **kwargs)
-
-    return data, flagger
diff --git a/saqc/funcs/changepoints.py b/saqc/funcs/changepoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..a048f8142bca372f73c65853d3f3f609163f0ac6
--- /dev/null
+++ b/saqc/funcs/changepoints.py
@@ -0,0 +1,251 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+import numpy as np
+import numba
+
+from saqc.core.register import register
+from saqc.lib.tools import customRoller
+import logging
+
+logger = logging.getLogger("SaQC")
+
+
+@register(masking='field')
+def flagChangePoints(data, field, flagger, stat_func, thresh_func, bwd_window, min_periods_bwd,
+                     fwd_window=None, min_periods_fwd=None, closed='both', try_to_jit=True,
+                     reduce_window=None, reduce_func=lambda x, y: x.argmax(), flag_changepoints=False,
+                     _model_by_resids=False, _assign_cluster=True, **kwargs):
+    """
+    Flag datapoints, where the parametrization of the process, the data is assumed to generate by, significantly
+    changes.
+
+    The change points detection is based on a sliding window search.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The reference variable, the deviation from wich determines the flagging.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional informations related to `data`.
+    stat_func : Callable[numpy.array, numpy.array]
+         A function that assigns a value to every twin window. Left window content will be passed to first variable,
+        right window content will be passed to the second.
+    thresh_func : Callable[numpy.array, numpy.array]
+        A function that determines the value level, exceeding wich qualifies a timestamps stat func value as denoting a
+        changepoint.
+    bwd_window : str
+        The left (backwards facing) windows temporal extension (freq-string).
+    min_periods_bwd : {str, int}
+        Minimum number of periods that have to be present in a backwards facing window, for a changepoint test to be
+        performed.
+    fwd_window : {Non/home/luenensc/PyPojects/testSpace/flagBasicMystery.pye, str}, default None
+        The right (fo/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyrward facing) windows temporal extension (freq-string).
+    min_periods_fwd : {None, str, int}, default None
+        Minimum numbe/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyr of periods that have to be present in a forward facing window, for a changepoint test to be
+        performed.
+    closed : {'right', 'left', 'both', 'neither'}, default 'both'
+        Determines the closure of the sliding windows.
+    reduce_window : {None, False, str}, default None
+        The sliding window search method is not an exact CP search method and usually there wont be
+        detected a single changepoint, but a "region" of change around a changepoint.
+        If `reduce_window` is not False, for every window of size `reduce_window`, there
+        will be selected the value with index `reduce_func(x, y)` and the others will be dropped.
+        If `reduce_window` is None, the reduction window size equals the
+        twin window size, the changepoints have been detected with.
+    reduce_func : Callable[numpy.array, numpy.array], default lambda x, y: x.argmax()
+        A function that must return an index value upon input of two arrays x and y.
+        First input parameter will hold the result from the stat_func evaluation for every
+        reduction window. Second input parameter holds the result from the thresh_func evaluation.
+        The default reduction function just selects the value that maximizes the stat_func.
+
+
+    Returns
+    -------
+
+    """
+
+    data, flagger = assignChangePointCluster(data, field, flagger, stat_func=stat_func, thresh_func=thresh_func,
+                                             bwd_window=bwd_window, min_periods_bwd=min_periods_bwd,
+                                             fwd_window=fwd_window, min_periods_fwd=min_periods_fwd, closed=closed,
+                                             try_to_jit=try_to_jit, reduce_window=reduce_window,
+                                             reduce_func=reduce_func, flag_changepoints=True, _model_by_resids=False,
+                                             _assign_cluster=False)
+    return data, flagger
+
+
+@register(masking='field')
+def assignChangePointCluster(data, field, flagger, stat_func, thresh_func, bwd_window, min_periods_bwd,
+                             fwd_window=None, min_periods_fwd=None, closed='both', try_to_jit=True,
+                             reduce_window=None, reduce_func=lambda x, y: x.argmax(), flag_changepoints=False,
+                             _model_by_resids=False, _assign_cluster=True, **kwargs):
+    """
+    Assigns label to the data, aiming to reflect continous regimes of the processes the data is assumed to be
+    generated by.
+    The regime change points detection is based on a sliding window search.
+
+    Note, that the cluster labels will be stored to the `field` field of the input data, so that the data that is
+    clustered gets overridden.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The reference variable, the deviation from wich determines the flagging.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional informations related to `data`.
+    stat_func : Callable[numpy.array, numpy.array]
+        A function that assigns a value to every twin window. Left window content will be passed to first variable,
+        right window content will be passed to the second.
+    thresh_func : Callable[numpy.array, numpy.array]
+        A function that determines the value level, exceeding wich qualifies a timestamps stat func value as denoting a
+        changepoint.
+    bwd_window : str
+        The left (backwards facing) windows temporal extension (freq-string).
+    min_periods_bwd : {str, int}
+        Minimum number of periods that have to be present in a backwards facing window, for a changepoint test to be
+        performed.
+    fwd_window : {Non/home/luenensc/PyPojects/testSpace/flagBasicMystery.pye, str}, default None
+        The right (fo/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyrward facing) windows temporal extension (freq-string).
+    min_periods_fwd : {None, str, int}, default None
+        Minimum numbe/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyr of periods that have to be present in a forward facing window, for a changepoint test to be
+        performed.
+    closed : {'right', 'left', 'both', 'neither'}, default 'both'
+        Determines the closure of the sliding windows.
+    reduce_window : {None, False, str}, default None
+        The sliding window search method is not an exact CP search method and usually there wont be
+        detected a single changepoint, but a "region" of change around a changepoint.
+        If `reduce_window` is not False, for every window of size `reduce_window`, there
+        will be selected the value with index `reduce_func(x, y)` and the others will be dropped.
+        If `reduce_window` is None, the reduction window size equals the
+        twin window size, the changepoints have been detected with.
+    reduce_func : Callable[numpy.array, numpy.array], default lambda x, y: x.argmax()
+        A function that must return an index value upon input of two arrays x and y.
+        First input parameter will hold the result from the stat_func evaluation for every
+        reduction window. Second input parameter holds the result from the thresh_func evaluation.
+        The default reduction function just selects the value that maximizes the stat_func.
+    flag_changepoints : bool, default False
+        If true, the points, where there is a change in data modelling regime detected get flagged bad.
+    _model_by_resids : bool, default False
+        If True, the data is replaced by the stat_funcs results instead of regime labels.
+    _assign_cluster : bool, default True
+        Is set to False, if called by function that oly wants to calculate flags.
+
+    Returns
+    -------
+
+    """
+    data = data.copy()
+    data_ser = data[field].dropna()
+    center = False
+    var_len = data_ser.shape[0]
+    if fwd_window is None:
+        fwd_window = bwd_window
+    if min_periods_fwd is None:
+        min_periods_fwd = min_periods_bwd
+    if reduce_window is None:
+        reduce_window = f"{int(pd.Timedelta(bwd_window).total_seconds() + pd.Timedelta(fwd_window).total_seconds())}s"
+
+    roller = customRoller(data_ser, window=bwd_window)
+    bwd_start, bwd_end = roller.window.get_window_bounds(len(data_ser), min_periods=min_periods_bwd, closed=closed)
+
+    roller = customRoller(data_ser, window=fwd_window, forward=True)
+    fwd_start, fwd_end = roller.window.get_window_bounds(len(data_ser), min_periods=min_periods_fwd, closed=closed)
+
+    min_mask = ~((fwd_end - fwd_start <= min_periods_fwd) | (bwd_end - bwd_start <= min_periods_bwd))
+    fwd_end = fwd_end[min_mask]
+    split = bwd_end[min_mask]
+    bwd_start = bwd_start[min_mask]
+    masked_index = data_ser.index[min_mask]
+    check_len = len(fwd_end)
+    data_arr = data_ser.values
+
+    if try_to_jit:
+        jit_sf = numba.jit(stat_func, nopython=True)
+        jit_tf = numba.jit(thresh_func, nopython=True)
+        try:
+            jit_sf(data_arr[bwd_start[0]:bwd_end[0]], data_arr[fwd_start[0]:fwd_end[0]])
+            jit_tf(data_arr[bwd_start[0]:bwd_end[0]], data_arr[fwd_start[0]:fwd_end[0]])
+            stat_func = jit_sf
+            thresh_func = jit_tf
+            try_to_jit = True
+        except numba.core.errors.TypingError:
+            try_to_jit = False
+            logging.warning('Could not jit passed statistic - omitting jitting!')
+
+    if try_to_jit:
+        stat_arr, thresh_arr = _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func,
+                                                    check_len)
+    else:
+        stat_arr, thresh_arr = _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func,
+                                                    check_len)
+    result_arr = stat_arr > thresh_arr
+
+    if _model_by_resids:
+        residues = pd.Series(np.nan, index=data[field].index)
+        residues[masked_index] = stat_arr
+        data[field] = residues
+        flagger = flagger.setFlags(field, flag=flagger.UNFLAGGED, force=True, **kwargs)
+        return data, flagger
+
+    det_index = masked_index[result_arr]
+    detected = pd.Series(True, index=det_index)
+    if reduce_window is not False:
+        l = detected.shape[0]
+        roller = customRoller(detected, window=reduce_window)
+        start, end = roller.window.get_window_bounds(num_values=l, min_periods=1, closed='both', center=True)
+
+        detected = _reduceCPCluster(stat_arr[result_arr], thresh_arr[result_arr], start, end, reduce_func, l)
+        det_index = det_index[detected]
+
+    if _assign_cluster:
+        cluster = pd.Series(False, index=data[field].index)
+        cluster[det_index] = True
+        cluster = cluster.cumsum()
+        # (better to start cluster labels with number one)
+        cluster += 1
+        data[field] = cluster
+        flagger = flagger.setFlags(field, flag=flagger.UNFLAGGED, force=True, **kwargs)
+
+    if flag_changepoints:
+        flagger = flagger.setFlags(field, loc=det_index)
+    return data, flagger
+
+
+@numba.jit(parallel=True, nopython=True)
+def _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
+    stat_arr = np.zeros(num_val)
+    thresh_arr = np.zeros(num_val)
+    for win_i in numba.prange(0, num_val-1):
+        x = data_arr[bwd_start[win_i]:split[win_i]]
+        y = data_arr[split[win_i]:fwd_end[win_i]]
+        stat_arr[win_i] = stat_func(x, y)
+        thresh_arr[win_i] = thresh_func(x, y)
+    return stat_arr, thresh_arr
+
+
+def _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
+    stat_arr = np.zeros(num_val)
+    thresh_arr = np.zeros(num_val)
+    for win_i in range(0, num_val-1):
+        x = data_arr[bwd_start[win_i]:split[win_i]]
+        y = data_arr[split[win_i]:fwd_end[win_i]]
+        stat_arr[win_i] = stat_func(x, y)
+        thresh_arr[win_i] = thresh_func(x, y)
+    return stat_arr, thresh_arr
+
+
+def _reduceCPCluster(stat_arr, thresh_arr, start, end, obj_func, num_val):
+    out_arr = np.zeros(shape=num_val, dtype=bool)
+    for win_i in numba.prange(0, num_val):
+        s, e = start[win_i], end[win_i]
+        x = stat_arr[s:e]
+        y = thresh_arr[s:e]
+        pos = s + obj_func(x, y) + 1
+        out_arr[s:e] = False
+        out_arr[pos] = True
+    return out_arr
\ No newline at end of file
diff --git a/saqc/funcs/constants_detection.py b/saqc/funcs/constants.py
similarity index 97%
rename from saqc/funcs/constants_detection.py
rename to saqc/funcs/constants.py
index d402056901b32bd78997f89fb2671305f7dfe2ae..a00ee55a2db6082b5da862b1ff894fba89258aec 100644
--- a/saqc/funcs/constants_detection.py
+++ b/saqc/funcs/constants.py
@@ -10,7 +10,7 @@ from saqc.lib.tools import retrieveTrustworthyOriginal, customRoller
 
 
 @register(masking='field')
-def constants_flagBasic(data, field, flagger, thresh, window, **kwargs):
+def flagConstants(data, field, flagger, thresh, window, **kwargs):
     """
     This functions flags plateaus/series of constant values of length `window` if
     their maximum total change is smaller than thresh.
@@ -65,7 +65,7 @@ def constants_flagBasic(data, field, flagger, thresh, window, **kwargs):
 
 
 @register(masking='field')
-def constants_flagVarianceBased(
+def flagByVariance(
     data, field, flagger, window="12h", thresh=0.0005, max_missing=None, max_consec_missing=None, **kwargs
 ):
 
diff --git a/saqc/funcs/curvefit.py b/saqc/funcs/curvefit.py
new file mode 100644
index 0000000000000000000000000000000000000000..03289a39d007b3103cc9e067370796f122ed01f3
--- /dev/null
+++ b/saqc/funcs/curvefit.py
@@ -0,0 +1,189 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import numpy as np
+import pandas as pd
+
+
+from saqc.core.register import register
+from saqc.lib.ts_operators import polyRollerIrregular, polyRollerNumba, polyRoller, polyRollerNoMissingNumba, \
+    polyRollerNoMissing
+
+
+@register(masking='field')
+def fitPolynomial(data, field, flagger, winsz, polydeg, numba="auto", eval_flags=True, min_periods=0,
+                  _return_residues=False, **kwargs):
+    """
+    Function fits a polynomial model to the data and returns the fitted data curve.
+
+    The fit is calculated by fitting a polynomial of degree `polydeg` to a data slice
+    of size `winsz`, that has x at its center.
+
+    Note, that the resulting fit is stored to the `field` field of the input data, so that the original data, the
+    polynomial is fitted to, gets overridden.
+
+    Note, that, if data[field] is not alligned to an equidistant frequency grid, the window size passed,
+    has to be an offset string. Also numba boost options don`t apply for irregularly sampled
+    timeseries.
+
+    Note, that calculating the residues tends to be quite costy, because a function fitting is perfomed for every
+    sample. To improve performance, consider the following possibillities:
+
+    In case your data is sampled at an equidistant frequency grid:
+
+    (1) If you know your data to have no significant number of missing values, or if you do not want to
+        calculate residues for windows containing missing values any way, performance can be increased by setting
+        min_periods=winsz.
+
+    (2) If your data consists of more then around 200000 samples, setting numba=True, will boost the
+        calculations up to a factor of 5 (for samplesize > 300000) - however for lower sample sizes,
+        numba will slow down the calculations, also, up to a factor of 5, for sample_size < 50000.
+        By default (numba='auto'), numba is set to true, if the data sample size exceeds 200000.
+
+    in case your data is not sampled at an equidistant frequency grid:
+
+    (1) Harmonization/resampling of your data will have a noticable impact on polyfittings performance - since
+        numba_boost doesnt apply for irregularly sampled data in the current implementation.
+
+    Note, that in the current implementation, the initial and final winsz/2 values do not get fitted.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-modelled.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    winsz : {str, int}
+        The size of the window you want to use for fitting. If an integer is passed, the size
+        refers to the number of periods for every fitting window. If an offset string is passed,
+        the size refers to the total temporal extension. The window will be centered around the vaule-to-be-fitted.
+        For regularly sampled timeseries the period number will be casted down to an odd number if
+        even.
+    polydeg : int
+        The degree of the polynomial used for fitting
+    numba : {True, False, "auto"}, default "auto"
+        Wheather or not to apply numbas just-in-time compilation onto the poly fit function. This will noticably
+        increase the speed of calculation, if the sample size is sufficiently high.
+        If "auto" is selected, numba compatible fit functions get applied for data consisiting of > 200000 samples.
+    eval_flags : bool, default True
+        Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
+        flag present in the interval, the data for its calculation was obtained from.
+    min_periods : {int, np.nan}, default 0
+        The minimum number of periods, that has to be available in every values fitting surrounding for the polynomial
+        fit to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
+        regardless of the number of values present (results in overfitting for too sparse intervals). To automatically
+        set the minimum number of periods to the number of values in an offset defined window size, pass np.nan.
+    _return_residues : bool, default False
+        Internal parameter. Makes the method return the residues instead of the fit.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+
+    """
+    if data[field].empty:
+        return data, flagger
+    data = data.copy()
+    to_fit = data[field]
+    flags = flagger.getFlags(field)
+    i = to_fit.index
+    # checking if index is regular here (index.freqstr property is not reliable)
+    if not pd.date_range(i[0], i[-1], len(i)).equals(i):
+        if isinstance(winsz, int):
+            raise NotImplementedError("Integer based window size is not supported for not-harmonized" "sample series.")
+        # get interval centers
+        centers = np.floor((to_fit.rolling(pd.Timedelta(winsz) / 2, closed="both", min_periods=min_periods).count()))
+        centers = centers.drop(centers[centers.isna()].index)
+        centers = centers.astype(int)
+        residues = to_fit.rolling(pd.Timedelta(winsz), closed="both", min_periods=min_periods).apply(
+            polyRollerIrregular, args=(centers, polydeg)
+        )
+
+        def center_func(x, y=centers):
+            pos = x.index[int(len(x) - y[x.index[-1]])]
+            return y.index.get_loc(pos)
+
+        centers_iloc = centers.rolling(winsz, closed="both").apply(center_func, raw=False).astype(int)
+        temp = residues.copy()
+        for k in centers_iloc.iteritems():
+            residues.iloc[k[1]] = temp[k[0]]
+        residues[residues.index[0] : residues.index[centers_iloc[0]]] = np.nan
+        residues[residues.index[centers_iloc[-1]] : residues.index[-1]] = np.nan
+    else:
+        if isinstance(winsz, str):
+            winsz = int(np.floor(pd.Timedelta(winsz) / pd.Timedelta(to_fit.index.freqstr)))
+        if winsz % 2 == 0:
+            winsz = int(winsz - 1)
+        if numba == "auto":
+            if to_fit.shape[0] < 200000:
+                numba = False
+            else:
+                numba = True
+
+        val_range = np.arange(0, winsz)
+        center_index = int(np.floor(winsz / 2))
+        if min_periods < winsz:
+            if min_periods > 0:
+                to_fit = to_fit.rolling(winsz, min_periods=min_periods, center=True).apply(
+                    lambda x, y: x[y], raw=True, args=(center_index,)
+                )
+
+            # we need a missing value marker that is not nan, because nan values dont get passed by pandas rolling
+            # method
+            miss_marker = to_fit.min()
+            miss_marker = np.floor(miss_marker - 1)
+            na_mask = to_fit.isna()
+            to_fit[na_mask] = miss_marker
+            if numba:
+                residues = to_fit.rolling(winsz).apply(
+                    polyRollerNumba,
+                    args=(miss_marker, val_range, center_index, polydeg),
+                    raw=True,
+                    engine="numba",
+                    engine_kwargs={"no_python": True},
+                )
+                # due to a tiny bug - rolling with center=True doesnt work when using numba engine.
+                residues = residues.shift(-int(center_index))
+            else:
+                residues = to_fit.rolling(winsz, center=True).apply(
+                    polyRoller, args=(miss_marker, val_range, center_index, polydeg), raw=True
+                )
+            residues[na_mask] = np.nan
+        else:
+            # we only fit fully populated intervals:
+            if numba:
+                residues = to_fit.rolling(winsz).apply(
+                    polyRollerNoMissingNumba,
+                    args=(val_range, center_index, polydeg),
+                    engine="numba",
+                    engine_kwargs={"no_python": True},
+                    raw=True,
+                )
+                # due to a tiny bug - rolling with center=True doesnt work when using numba engine.
+                residues = residues.shift(-int(center_index))
+            else:
+                residues = to_fit.rolling(winsz, center=True).apply(
+                    polyRollerNoMissing, args=(val_range, center_index, polydeg), raw=True
+                )
+
+    if _return_residues:
+        residues = residues - to_fit
+
+    data[field] = residues
+    if eval_flags:
+        num_cats, codes = flags.factorize()
+        num_cats = pd.Series(num_cats, index=flags.index).rolling(winsz, center=True, min_periods=min_periods).max()
+        nan_samples = num_cats[num_cats.isna()]
+        num_cats.drop(nan_samples.index, inplace=True)
+        to_flag = pd.Series(codes[num_cats.astype(int)], index=num_cats.index)
+        to_flag = to_flag.align(nan_samples)[0]
+        to_flag[nan_samples.index] = flags[nan_samples.index]
+        flagger = flagger.setFlags(field, to_flag.values, **kwargs)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
new file mode 100644
index 0000000000000000000000000000000000000000..71061240b4c43fffb25b60b527efb3bd6a7c93aa
--- /dev/null
+++ b/saqc/funcs/drift.py
@@ -0,0 +1,720 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import functools
+
+import dios
+import numpy as np
+import pandas as pd
+import scipy
+from scipy import stats
+from scipy.optimize import curve_fit
+
+
+from saqc.core.register import register
+from saqc.funcs.resampling import shift
+from saqc.funcs.changepoints import assignChangePointCluster
+from saqc.funcs.tools import drop, copy
+from saqc.lib.tools import detectDeviants
+from saqc.lib.ts_operators import expModelFunc
+
+
+@register(masking='all')
+def flagDriftFromNorm(data, field, flagger, fields, segment_freq, norm_spread, norm_frac=0.5,
+                      metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
+                                                                       metric='cityblock') / len(x),
+                      linkage_method='single', **kwargs):
+    """
+    The function flags value courses that significantly deviate from a group of normal value courses.
+
+    "Normality" is determined in terms of a maximum spreading distance, that members of a normal group must not exceed.
+    In addition, only a group is considered "normal" if it contains more then `norm_frac` percent of the
+    variables in "fields".
+
+    See the Notes section for a more detailed presentation of the algorithm
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        A dummy parameter.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    fields : str
+        List of fieldnames in data, determining which variables are to be included into the flagging process.
+    segment_freq : str
+        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
+        applied on.
+    norm_spread : float
+        A parameter limiting the maximum "spread" of the timeseries, allowed in the "normal" group. See Notes section
+        for more details.
+    norm_frac : float, default 0.5
+        Has to be in [0,1]. Determines the minimum percentage of variables, the "normal" group has to comprise to be the
+        normal group actually. The higher that value, the more stable the algorithm will be with respect to false
+        positives. Also, nobody knows what happens, if this value is below 0.5.
+    metric : Callable[(numpyp.array, numpy-array), float]
+        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
+        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
+        See the Notes section to get an idea of why this could be a good choice.
+    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+        The linkage method used for hierarchical (agglomerative) clustering of the timeseries.
+        See the Notes section for more details.
+        The keyword gets passed on to scipy.hierarchy.linkage. See its documentation to learn more about the different
+        keywords (References [1]).
+        See wikipedia for an introduction to hierarchical clustering (References [2]).
+    kwargs
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the input flagger.
+
+    Notes
+    -----
+    following steps are performed for every data "segment" of length `segment_freq` in order to find the
+    "abnormal" data:
+
+    1. Calculate the distances :math:`d(x_i,x_j)` for all :math:`x_i` in parameter `fields`. (with :math:`d`
+       denoting the distance function
+       passed to the parameter `metric`.
+    2. Calculate a dendogram with a hierarchical linkage algorithm, specified by the parameter `linkage_method`.
+    3. Flatten the dendogram at the level, the agglomeration costs exceed the value given by the parameter `norm_spread`
+    4. check if there is a cluster containing more than `norm_frac` percentage of the variables in fields.
+
+        1. if yes: flag all the variables that are not in that cluster (inside the segment)
+        2. if no: flag nothing
+
+    The main parameter giving control over the algorithms behavior is the `norm_spread` parameter, that determines
+    the maximum spread of a normal group by limiting the costs, a cluster agglomeration must not exceed in every
+    linkage step.
+    For singleton clusters, that costs just equal half the distance, the timeseries in the clusters, have to
+    each other. So, no timeseries can be clustered together, that are more then
+    2*`norm_spread` distanted from each other.
+    When timeseries get clustered together, this new clusters distance to all the other timeseries/clusters is
+    calculated according to the linkage method specified by `linkage_method`. By default, it is the minimum distance,
+    the members of the clusters have to each other.
+    Having that in mind, it is advisable to choose a distance function, that can be well interpreted in the units
+    dimension of the measurement and where the interpretation is invariant over the length of the timeseries.
+    That is, why, the "averaged manhatten metric" is set as the metric default, since it corresponds to the
+    averaged value distance, two timeseries have (as opposed by euclidean, for example).
+
+    References
+    ----------
+    Documentation of the underlying hierarchical clustering algorithm:
+        [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
+    Introduction to Hierarchical clustering:
+        [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
+    """
+
+    data_to_flag = data[fields].to_df()
+    data_to_flag.dropna(inplace=True)
+    segments = data_to_flag.groupby(pd.Grouper(freq=segment_freq))
+    for segment in segments:
+        if segment[1].shape[0] <= 1:
+            continue
+        drifters = detectDeviants(segment[1], metric, norm_spread, norm_frac, linkage_method, 'variables')
+
+        for var in drifters:
+            flagger = flagger.setFlags(fields[var], loc=segment[1].index, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='all')
+def flagDriftFromReference(data, field, flagger, fields, segment_freq, thresh,
+                      metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
+                                                                    metric='cityblock')/len(x),
+                       **kwargs):
+    """
+    The function flags value courses that deviate from a reference course by a margin exceeding a certain threshold.
+
+    The deviation is measured by the distance function passed to parameter metric.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The reference variable, the deviation from wich determines the flagging.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    fields : str
+        List of fieldnames in data, determining wich variables are to be included into the flagging process.
+    segment_freq : str
+        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
+        applied on.
+    thresh : float
+        The threshod by wich normal variables can deviate from the reference variable at max.
+    metric : Callable[(numpyp.array, numpy-array), float]
+        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
+        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
+        See the Notes section to get an idea of why this could be a good choice.
+    kwargs
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the input flagger.
+
+    Notes
+    -----
+    it is advisable to choose a distance function, that can be well interpreted in the units
+    dimension of the measurement and where the interpretation is invariant over the length of the timeseries.
+    That is, why, the "averaged manhatten metric" is set as the metric default, since it corresponds to the
+    averaged value distance, two timeseries have (as opposed by euclidean, for example).
+    """
+
+    data_to_flag = data[fields].to_df()
+    data_to_flag.dropna(inplace=True)
+    if field not in fields:
+        fields.append(field)
+    var_num = len(fields)
+    segments = data_to_flag.groupby(pd.Grouper(freq=segment_freq))
+
+    for segment in segments:
+
+        if segment[1].shape[0] <= 1:
+            continue
+        for i in range(var_num):
+            dist = metric(segment[1].iloc[:, i].values, segment[1].loc[:, field].values)
+            if dist > thresh:
+                flagger = flagger.setFlags(fields[i], loc=segment[1].index, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='all')
+def flagDriftFromScaledNorm(data, field, flagger, fields_scale1, fields_scale2, segment_freq, norm_spread, norm_frac=0.5,
+                            metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
+                                                                                    metric='cityblock')/len(x),
+                            linkage_method='single', **kwargs):
+
+
+    """
+    The function linearly rescales one set of variables to another set of variables with a different scale and then
+    flags value courses that significantly deviate from a group of normal value courses.
+
+    The two sets of variables can be linearly scaled one to another and hence the scaling transformation is performed
+    via linear regression: A linear regression is performed on each pair of variables giving a slope and an intercept.
+    The transformation is then calculated a the median of all the calculated slopes and intercepts.
+
+    Once the transformation is performed, the function flags those values, that deviate from a group of normal values.
+    "Normality" is determined in terms of a maximum spreading distance, that members of a normal group must not exceed.
+    In addition, only a group is considered "normal" if it contains more then `norm_frac` percent of the
+    variables in "fields".
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        A dummy parameter.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional informations related to `data`.
+    fields_scale1 : str
+        List of fieldnames in data to be included into the flagging process which are scaled according to scaling
+        scheme 1.
+    fields_scale2 : str
+        List of fieldnames in data to be included into the flagging process which are scaled according to scaling
+        scheme 2.
+    segment_freq : str
+        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
+        applied on.
+    norm_spread : float
+        A parameter limiting the maximum "spread" of the timeseries, allowed in the "normal" group. See Notes section
+        for more details.
+    norm_frac : float, default 0.5
+        Has to be in [0,1]. Determines the minimum percentage of variables, the "normal" group has to comprise to be the
+        normal group actually. The higher that value, the more stable the algorithm will be with respect to false
+        positives. Also, nobody knows what happens, if this value is below 0.5.
+    metric : Callable[(numpyp.array, numpy-array), float]
+        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
+        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
+        See the Notes section to get an idea of why this could be a good choice.
+    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+        The linkage method used for hierarchical (agglomerative) clustering of the timeseries.
+        See the Notes section for more details.
+        The keyword gets passed on to scipy.hierarchy.linkage. See its documentation to learn more about the different
+        keywords (References [1]).
+        See wikipedia for an introduction to hierarchical clustering (References [2]).
+    kwargs
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the input flagger.
+
+    References
+    ----------
+    Documentation of the underlying hierarchical clustering algorithm:
+        [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
+    Introduction to Hierarchical clustering:
+        [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
+    """
+
+    fields = fields_scale1 + fields_scale2
+    data_to_flag = data[fields].to_df()
+    data_to_flag.dropna(inplace=True)
+
+    convert_slope = []
+    convert_intercept = []
+
+    for field1 in fields_scale1:
+        for field2 in fields_scale2:
+            slope, intercept, r_value, p_value, std_err = stats.linregress(data_to_flag[field1], data_to_flag[field2])
+            convert_slope.append(slope)
+            convert_intercept.append(intercept)
+
+    factor_slope = np.median(convert_slope)
+    factor_intercept = np.median(convert_intercept)
+
+    dat = dios.DictOfSeries()
+    for field1 in fields_scale1:
+        dat[field1] = factor_intercept + factor_slope * data_to_flag[field1]
+    for field2 in fields_scale2:
+        dat[field2] = data_to_flag[field2]
+
+    dat_to_flag = dat[fields].to_df()
+
+    segments = dat_to_flag.groupby(pd.Grouper(freq=segment_freq))
+    for segment in segments:
+        if segment[1].shape[0] <= 1:
+            continue
+        drifters = detectDeviants(segment[1], metric, norm_spread, norm_frac, linkage_method, 'variables')
+        for var in drifters:
+            flagger = flagger.setFlags(fields[var], loc=segment[1].index, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='all')
+def correctExponentialDrift(data, field, flagger, maint_data_field, cal_mean=5, flag_maint_period=False,
+                            check_maint='1h', **kwargs):
+    """
+    The function fits an exponential model to chunks of data[field].
+    It is assumed, that between maintenance events, there is a drift effect shifting the meassurements in a way, that
+    can be described by the model M:
+
+    M(t, a, b, c) = a + b(exp(c*t))
+
+    Where as the values y_0 and y_1, describing the mean value directly after the last maintenance event (y_0) and
+    directly before the next maintenance event (y_1), impose the following additional conditions on the drift model:.
+
+    M(0, a, b, c) = y0
+    M(1, a, b, c) = y1
+
+    Solving the equation, one obtains the one-parameter Model:
+
+    M_drift(t, c) = y0 + [(y1 - y0)/(exp(c) - )] * (exp(c*t) - 1)
+
+    For every datachunk in between maintenance events.
+
+    After having found the optimal parameter c*, the correction is performed by bending the fitted curve M_drift(t, c*),
+    in a way that it matches y2 at t=1 (,with y2 being the mean value observed directly after the end of the next
+    maintenance event).
+    This bended curve is given by:
+
+    M_shift(t, c*) = M(t, y0, [(y1 - y0)/(exp(c*) - )], c*)
+
+    And the new values at t are computed via:
+
+    new_vals(t) = old_vals(t) + M_shift(t) - M_drift(t)
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to correct.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    maint_data_field : str
+        The fieldname of the datacolumn holding the maintenance information.
+        The maint data is to expected to have following form:
+        The series' timestamp itself represents the beginning of a
+        maintenance event, wheras the values represent the endings of the maintenance intervals.
+    cal_mean : int, default 5
+        The number of values the mean is computed over, for obtaining the value level directly after and
+        directly before maintenance event. This values are needed for shift calibration. (see above description)
+    flag_maint_period : bool, default False
+        Wheather or not to flag BAD the values directly obtained while maintenance.
+    check_maint : bool, default True
+        Wheather or not to check, if the reported maintenance intervals match are plausible
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+
+    # 1: extract fit intervals:
+    if data[maint_data_field].empty:
+        return data, flagger
+    data = data.copy()
+    to_correct = data[field]
+    maint_data = data[maint_data_field]
+    drift_frame = pd.DataFrame({"drift_group": np.nan, to_correct.name: to_correct.values}, index=to_correct.index)
+
+    # group the drift frame
+    for k in range(0, maint_data.shape[0] - 1):
+        # assign group numbers for the timespans in between one maintenance ending and the beginning of the next
+        # maintenance time itself remains np.nan assigned
+        drift_frame.loc[maint_data.values[k] : pd.Timestamp(maint_data.index[k + 1]), "drift_group"] = k
+    drift_grouper = drift_frame.groupby("drift_group")
+    # define target values for correction
+    shift_targets = shift(-1)
+
+    for k, group in drift_grouper:
+        dataSeries = group[to_correct.name]
+        dataFit, dataShiftTarget = _drift_fit(dataSeries, shift_targets.loc[k, :][0], cal_mean)
+        dataFit = pd.Series(dataFit, index=group.index)
+        dataShiftTarget = pd.Series(dataShiftTarget, index=group.index)
+        dataShiftVektor = dataShiftTarget - dataFit
+        shiftedData = dataSeries + dataShiftVektor
+        to_correct[shiftedData.index] = shiftedData
+
+    if flag_maint_period:
+        to_flag = drift_frame["drift_group"]
+        to_flag = to_flag.drop(to_flag[: maint_data.index[0]].index)
+        to_flag = to_flag[to_flag.isna()]
+        flagger = flagger.setFlags(field, loc=to_flag, **kwargs)
+
+    data[field] = to_correct
+
+    return data, flagger
+
+
+@register(masking='all')
+def correctRegimeAnomaly(data, field, flagger, cluster_field, model, regime_transmission=None, x_date=False):
+    """
+    Function fits the passed model to the different regimes in data[field] and tries to correct
+    those values, that have assigned a negative label by data[cluster_field].
+
+    Currently, the only correction mode supported is the "parameter propagation."
+
+    This means, any regime :math:`z`, labeled negatively and being modeled by the parameters p, gets corrected via:
+
+    :math:`z_{correct} = z + (m(p^*) - m(p))`,
+
+    where :math:`p^*` denotes the parameter set belonging to the fit of the nearest not-negatively labeled cluster.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to correct.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    clusterfield : str
+        A string denoting the field in data, holding the cluster label for the data you want to correct.
+    model : Callable
+        The model function to be fitted to the regimes.
+        It must be a function of the form :math:`f(x, *p)`, where :math:`x` is the ``numpy.array`` holding the
+        independent variables and :math:`p` are the model parameters that are to be obtained by fitting.
+        Depending on the `x_date` parameter, independent variable x will either be the timestamps
+        of every regime transformed to seconds from epoch, or it will be just seconds, counting the regimes length.
+    regime_transmission : {None, str}, default None:
+        If an offset string is passed, a data chunk of length `regime_transimission` right at the
+        start and right at the end is ignored when fitting the model. This is to account for the
+        unreliability of data near the changepoints of regimes.
+    x_date : bool, default False
+        If True, use "seconds from epoch" as x input to the model func, instead of "seconds from regime start".
+
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger
+        The flagger object, holding flags and additional Informations related to `data`.
+    """
+
+    cluster_ser = data[cluster_field]
+    unique_successive = pd.unique(cluster_ser.values)
+    data_ser = data[field]
+    regimes = data_ser.groupby(cluster_ser)
+    para_dict = {}
+    x_dict = {}
+    x_mask = {}
+    if regime_transmission is not None:
+        # get seconds
+        regime_transmission = pd.Timedelta(regime_transmission).total_seconds()
+    for label, regime in regimes:
+        if x_date is False:
+            # get seconds data:
+            xdata = (regime.index - regime.index[0]).to_numpy(dtype=float)*10**(-9)
+        else:
+            # get seconds from epoch data
+            xdata = regime.index.to_numpy(dtype=float)*10**(-9)
+        ydata = regime.values
+        valid_mask = ~np.isnan(ydata)
+        if regime_transmission is not None:
+            valid_mask &= (xdata > xdata[0] + regime_transmission)
+            valid_mask &= (xdata < xdata[-1] - regime_transmission)
+        try:
+            p, pcov = curve_fit(model, xdata[valid_mask], ydata[valid_mask])
+        except (RuntimeError, ValueError):
+            p = np.array([np.nan])
+        para_dict[label] = p
+        x_dict[label] = xdata
+        x_mask[label] = valid_mask
+
+    first_normal = unique_successive > 0
+    first_valid = np.array([~pd.isna(para_dict[unique_successive[i]]).any() for i in range(0, unique_successive.shape[0])])
+    first_valid = np.where(first_normal & first_valid)[0][0]
+    last_valid = 1
+
+    for k in range(0, unique_successive.shape[0]):
+        if unique_successive[k] < 0 & (not pd.isna(para_dict[unique_successive[k]]).any()):
+            ydata = data_ser[regimes.groups[unique_successive[k]]].values
+            xdata = x_dict[unique_successive[k]]
+            ypara = para_dict[unique_successive[k]]
+            if k > 0:
+                target_para = para_dict[unique_successive[k-last_valid]]
+            else:
+                # first regime has no "last valid" to its left, so we use first valid to the right:
+                target_para = para_dict[unique_successive[k + first_valid]]
+            y_shifted = ydata + (model(xdata, *target_para) - model(xdata, *ypara))
+            data_ser[regimes.groups[unique_successive[k]]] = y_shifted
+            if k > 0:
+                last_valid += 1
+        elif pd.isna(para_dict[unique_successive[k]]).any() & (k > 0):
+            last_valid += 1
+        else:
+            last_valid = 1
+
+    data[field] = data_ser
+    return data, flagger
+
+
+@register(masking='all')
+def correctOffset(data, field, flagger, max_mean_jump, normal_spread, search_winsz, min_periods,
+                  regime_transmission=None):
+    """
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to correct.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    max_mean_jump : float
+        when searching for changepoints in mean - this is the threshold a mean difference in the
+        sliding window search must exceed to trigger changepoint detection.
+    normal_spread : float
+        threshold denoting the maximum, regimes are allowed to abolutely differ in their means
+        to form the "normal group" of values.
+    search_winsz : str
+        Size of the adjacent windows that are used to search for the mean changepoints.
+    min_periods : int
+        Minimum number of periods a search window has to contain, for the result of the changepoint
+        detection to be considered valid.
+    regime_transmission : {None, str}, default None:
+        If an offset string is passed, a data chunk of length `regime_transimission` right from the
+        start and right before the end of any regime is ignored when calculating a regimes mean for data correcture.
+        This is to account for the unrelyability of data near the changepoints of regimes.
+
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger
+        The flagger object, holding flags and additional Informations related to `data`.
+
+    """
+
+    data, flagger = copy(data, field, flagger, field + '_CPcluster')
+    data, flagger = assignChangePointCluster(data, field + '_CPcluster', flagger,
+                                             lambda x, y: np.abs(np.mean(x) - np.mean(y)),
+                                             lambda x, y: max_mean_jump,
+                                             bwd_window=search_winsz,
+                                             min_periods_bwd=min_periods)
+    data, flagger = assignRegimeAnomaly(data, field, flagger, field + '_CPcluster', normal_spread)
+    data, flagger = correctRegimeAnomaly(data, field, flagger, field + '_CPcluster',
+                                         lambda x, p1: np.array([p1] * x.shape[0]),
+                                         regime_transmission=regime_transmission)
+    data, flagger = drop(data, field + '_CPcluster', flagger)
+
+    return data, flagger
+
+
+def _drift_fit(x, shift_target, cal_mean):
+    x_index = x.index - x.index[0]
+    x_data = x_index.total_seconds().values
+    x_data = x_data / x_data[-1]
+    y_data = x.values
+    origin_mean = np.mean(y_data[:cal_mean])
+    target_mean = np.mean(y_data[-cal_mean:])
+
+    def modelWrapper(x, c, a=origin_mean, target_mean=target_mean):
+        # final fitted curves val = target mean
+        b = (target_mean - a) / (np.exp(c) - 1)
+        return expModelFunc(x, a, b, c)
+
+    dataFitFunc = functools.partial(modelWrapper, a=origin_mean, target_mean=target_mean)
+
+    try:
+        fitParas, _ = curve_fit(dataFitFunc, x_data, y_data, bounds=([0], [np.inf]))
+        dataFit = dataFitFunc(x_data, fitParas[0])
+        b_val = (shift_target - origin_mean) / (np.exp(fitParas[0]) - 1)
+        dataShiftFunc = functools.partial(expModelFunc, a=origin_mean, b=b_val, c=fitParas[0])
+        dataShift = dataShiftFunc(x_data)
+    except RuntimeError:
+        dataFit = np.array([0] * len(x_data))
+        dataShift = np.array([0] * len(x_data))
+
+    return dataFit, dataShift
+
+
+@register(masking='all')
+def flagRegimeAnomaly(data, field, flagger, cluster_field, norm_spread, linkage_method='single',
+                       metric=lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
+                       norm_frac=0.5, **kwargs):
+    """
+    A function to flag values belonging to an anomalous regime regarding modelling regimes of field.
+
+    "Normality" is determined in terms of a maximum spreading distance, regimes must not exceed in respect
+    to a certain metric and linkage method.
+
+    In addition, only a range of regimes is considered "normal", if it models more then `norm_frac` percentage of
+    the valid samples in "field".
+
+    Note, that you must detect the regime changepoints prior to calling this function.
+
+    Note, that it is possible to perform hypothesis tests for regime equality by passing the metric
+    a function for p-value calculation and selecting linkage method "complete".
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    cluster_field : str
+        The name of the column in data, holding the cluster labels for the samples in field. (has to be indexed
+        equal to field)
+    norm_spread : float
+        A threshold denoting the valuelevel, up to wich clusters a agglomerated.
+    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+        The linkage method used for hierarchical (agglomerative) clustering of the variables.
+    metric : Callable[[numpy.array, numpy.array], float], default lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y))
+        A metric function for calculating the dissimilarity between 2 regimes. Defaults to just the difference in mean.
+    norm_frac : float
+        Has to be in [0,1]. Determines the minimum percentage of samples,
+        the "normal" group has to comprise to be the normal group actually.
+
+    Returns
+    -------
+
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger
+        The flagger object, holding flags and additional informations related to `data`.
+        Flags values may have changed, relatively to the flagger input.
+
+    """
+
+    data, flagger = assignRegimeAnomaly(data, field, flagger, cluster_field, norm_spread,
+                                        linkage_method=linkage_method, metric=metric, norm_frac=norm_frac,
+                                        _set_cluster=False, _set_flags=True, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='all')
+def assignRegimeAnomaly(data, field, flagger, cluster_field, norm_spread, linkage_method='single',
+                        metric=lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y)),
+                        norm_frac=0.5, _set_cluster=True, _set_flags=False, **kwargs):
+    """
+    A function to detect values belonging to an anomalous regime regarding modelling regimes of field.
+
+    The function changes the value of the regime cluster labels to be negative.
+
+    "Normality" is determined in terms of a maximum spreading distance, regimes must not exceed in respect
+    to a certain metric and linkage method.
+
+    In addition, only a range of regimes is considered "normal", if it models more then `norm_frac` percentage of
+    the valid samples in "field".
+
+    Note, that you must detect the regime changepoints prior to calling this function. (They are expected to be stored
+    parameter `cluster_field`.)
+
+    Note, that it is possible to perform hypothesis tests for regime equality by passing the metric
+    a function for p-value calculation and selecting linkage method "complete".
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    cluster_field : str
+        The name of the column in data, holding the cluster labels for the samples in field. (has to be indexed
+        equal to field)
+    norm_spread : float
+        A threshold denoting the valuelevel, up to wich clusters a agglomerated.
+    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+        The linkage method used for hierarchical (agglomerative) clustering of the variables.
+    metric : Callable[[numpy.array, numpy.array], float], default lambda x, y: np.abs(np.nanmean(x) - np.nanmean(y))
+        A metric function for calculating the dissimilarity between 2 regimes. Defaults to just the difference in mean.
+    norm_frac : float
+        Has to be in [0,1]. Determines the minimum percentage of samples,
+        the "normal" group has to comprise to be the normal group actually.
+    _set_cluster : bool, default False
+        If True, all data, considered "anormal", gets assigned a negative clusterlabel. This option
+        is present for further use (correction) of the anomaly information.
+    _set_flags : bool, default True
+        Wheather or not to flag abnormal values (do not flag them, if you want to correct them
+        afterwards, becasue flagged values usually are not visible in further tests.).
+
+    Returns
+    -------
+
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger
+        The flagger object, holding flags and additional informations related to `data`.
+        Flags values may have changed, relatively to the flagger input.
+
+    """
+
+    clusterser = data[cluster_field]
+    cluster = np.unique(clusterser)
+    cluster_dios = dios.DictOfSeries({i: data[field][clusterser == i] for i in cluster})
+    plateaus = detectDeviants(cluster_dios, metric, norm_spread, norm_frac, linkage_method, 'samples')
+
+    if _set_flags:
+        for p in plateaus:
+            flagger = flagger.setFlags(field, loc=cluster_dios.iloc[:, p].index, **kwargs)
+
+    if _set_cluster:
+        for p in plateaus:
+            if cluster[p] > 0:
+                clusterser[clusterser == cluster[p]] = -cluster[p]
+
+    data[cluster_field] = clusterser
+    return data, flagger
diff --git a/saqc/funcs/flagtools.py b/saqc/funcs/flagtools.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5b7f6ab7acd715c75eab0f30ed592a390864bca
--- /dev/null
+++ b/saqc/funcs/flagtools.py
@@ -0,0 +1,253 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import dios
+import numpy as np
+import pandas as pd
+
+
+from saqc.core.register import register
+from dios import DictOfSeries
+from typing import Any
+
+
+@register(masking='field')
+def clearFlags(data, field, flagger, **kwargs):
+    flagger = flagger.clearFlags(field, **kwargs)
+    return data, flagger
+
+
+@register(masking='field')
+def forceFlags(data, field, flagger, flag, **kwargs):
+    flagger = flagger.clearFlags(field).setFlags(field, flag=flag, inplace=True, **kwargs)
+    return data, flagger
+
+
+@register(masking='field')
+def flagDummy(data, field, flagger, **kwargs):
+    """
+    Function does nothing but returning data and flagger.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+    """
+    return data, flagger
+
+
+@register(masking='field')
+def flagForceFail(data, field, flagger, **kwargs):
+    """
+    Function raises a runtime error.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+
+    """
+    raise RuntimeError("Works as expected :D")
+
+
+@register(masking='field')
+def flagUnflagged(data, field, flagger, **kwargs):
+    """
+    Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
+    If there is an entry 'flag' in the kwargs dictionary passed, the
+    function sets the kwargs['flag'] flag to all values flagged better kwargs['flag']
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    kwargs : Dict
+        If kwargs contains 'flag' entry, kwargs['flag] is set, if no entry 'flag' is present,
+        'flagger.UNFLAGGED' is set.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+    """
+
+    flag = kwargs.pop('flag', flagger.GOOD)
+    flagger = flagger.setFlags(field, flag=flag, **kwargs)
+    return data, flagger
+
+
+@register(masking='field')
+def flagGood(data, field, flagger, **kwargs):
+    """
+    Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
+
+    Parameters
+    ----------
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+
+    """
+    kwargs.pop('flag', None)
+    return flagUnflagged(data, field, flagger, **kwargs)
+
+
+@register(masking='field')
+def flagManual(data, field, flagger, mdata, mflag: Any = 1, method="plain", **kwargs):
+    """
+    Flag data by given, "manually generated" data.
+
+    The data is flagged at locations where `mdata` is equal to a provided flag (`mflag`).
+    The format of mdata can be an indexed object, like pd.Series, pd.Dataframe or dios.DictOfSeries,
+    but also can be a plain list- or array-like.
+    How indexed mdata is aligned to data is specified via the `method` parameter.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    mdata : {pd.Series, pd.Dataframe, DictOfSeries, str}
+        The "manually generated" data
+    mflag : scalar
+        The flag that indicates data points in `mdata`, of wich the projection in data should be flagged.
+    method : {'plain', 'ontime', 'left-open', 'right-open'}, default plain
+        Defines how mdata is projected on data. Except for the 'plain' method, the methods assume mdata to have an
+        index.
+
+        * 'plain': mdata must have the same length as data and is projected one-to-one on data.
+        * 'ontime': works only with indexed mdata. mdata entries are matched with data entries that have the same index.
+        * 'right-open': mdata defines intervals, values are to be projected on.
+          The intervals are defined by any two consecutive timestamps t_1 and 1_2 in mdata.
+          the value at t_1 gets projected onto all data timestamps t with t_1 <= t < t_2.
+        * 'left-open': like 'right-open', but the projected interval now covers all t with t_1 < t <= t_2.
+
+    Returns
+    -------
+    data, flagger: original data, modified flagger
+
+    Examples
+    --------
+    An example for mdata
+    >>> mdata = pd.Series([1,0,1], index=pd.to_datetime(['2000-02', '2000-03', '2001-05']))
+    >>> mdata
+    2000-02-01    1
+    2000-03-01    0
+    2001-05-01    1
+    dtype: int64
+
+    On *dayly* data, with the 'ontime' method, only the provided timestamnps are used.
+    Bear in mind that only exact timestamps apply, any offset will result in ignoring
+    the timestamp.
+    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='ontime')
+    >>> fl.isFlagged(field)
+    2000-01-31    False
+    2000-02-01    True
+    2000-02-02    False
+    2000-02-03    False
+    ..            ..
+    2000-02-29    False
+    2000-03-01    True
+    2000-03-02    False
+    Freq: D, dtype: bool
+
+    With the 'right-open' method, the mdata is forward fill:
+    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='right-open')
+    >>> fl.isFlagged(field)
+    2000-01-31    False
+    2000-02-01    True
+    2000-02-02    True
+    ..            ..
+    2000-02-29    True
+    2000-03-01    False
+    2000-03-02    False
+    Freq: D, dtype: bool
+
+    With the 'left-open' method, backward filling is used:
+    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='left-open')
+    >>> fl.isFlagged(field)
+    2000-01-31    False
+    2000-02-01    False
+    2000-02-02    True
+    ..            ..
+    2000-02-29    True
+    2000-03-01    True
+    2000-03-02    False
+    Freq: D, dtype: bool
+    """
+    dat = data[field]
+    if isinstance(mdata, str):
+        # todo import path type in mdata, use
+        #  s = pd.read_csv(mdata, index_col=N, usecol=[N,N,..]) <- use positional
+        #  use a list-arg in config to get the columns
+        #  at last, fall throug to next checks
+        raise NotImplementedError("giving a path is currently not supported")
+
+    if isinstance(mdata, (pd.DataFrame, DictOfSeries)):
+        mdata = mdata[field]
+
+    hasindex = isinstance(mdata, (pd.Series, pd.DataFrame, DictOfSeries))
+    if not hasindex and method != "plain":
+        raise ValueError("mdata has no index")
+
+    if method == "plain":
+        if hasindex:
+            mdata = mdata.to_numpy()
+        if len(mdata) != len(dat):
+            raise ValueError("mdata must have same length then data")
+        mdata = pd.Series(mdata, index=dat.index)
+    elif method == "ontime":
+        pass  # reindex will do the job later
+    elif method in ["left-open", "right-open"]:
+        mdata = mdata.reindex(dat.index.union(mdata.index))
+
+        # -->)[t0-->)[t1--> (ffill)
+        if method == "right-open":
+            mdata = mdata.ffill()
+
+        # <--t0](<--t1](<-- (bfill)
+        if method == "left-open":
+            mdata = mdata.bfill()
+    else:
+        raise ValueError(method)
+
+    mask = mdata == mflag
+    mask = mask.reindex(dat.index).fillna(False)
+    flagger = flagger.setFlags(field=field, loc=mask, **kwargs)
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/functions.py b/saqc/funcs/functions.py
deleted file mode 100644
index c93731500a732099cfaf71a4b37d6e753ab50644..0000000000000000000000000000000000000000
--- a/saqc/funcs/functions.py
+++ /dev/null
@@ -1,1005 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from functools import partial
-from inspect import signature
-
-import dios
-import numpy as np
-import pandas as pd
-import scipy
-import itertools
-import collections
-import numba
-from mlxtend.evaluate import permutation_test
-from scipy import stats
-from scipy.cluster.hierarchy import linkage, fcluster
-
-
-from saqc.lib.tools import groupConsecutives, detectDeviants
-from saqc.lib.tools import groupConsecutives, seasonalMask
-from saqc.funcs.proc_functions import proc_fork, proc_drop, proc_projectFlags
-from saqc.funcs.modelling import modelling_mask
-
-from saqc.core.register import register
-from saqc.core.visitor import ENVIRONMENT
-from dios import DictOfSeries
-from typing import Any
-
-
-def _dslIsFlagged(flagger, var, flag=None, comparator=">="):
-    """
-    helper function for `flagGeneric`
-    """
-    return flagger.isFlagged(var.name, flag=flag, comparator=comparator)
-
-
-def _execGeneric(flagger, data, func, field, nodata):
-    # TODO:
-    # - check series.index compatibility
-    # - field is only needed to translate 'this' parameters
-    #    -> maybe we could do the translation on the tree instead
-
-    sig = signature(func)
-    args = []
-    for k, v in sig.parameters.items():
-        k = field if k == "this" else k
-        if k not in data:
-            raise NameError(f"variable '{k}' not found")
-        args.append(data[k])
-
-    globs = {
-        "isflagged": partial(_dslIsFlagged, flagger),
-        "ismissing": lambda var: ((var == nodata) | pd.isnull(var)),
-        "mask": lambda cond: data[cond.name].mask(cond),
-        "this": field,
-        "NODATA": nodata,
-        "GOOD": flagger.GOOD,
-        "BAD": flagger.BAD,
-        "UNFLAGGED": flagger.UNFLAGGED,
-        **ENVIRONMENT,
-    }
-    func.__globals__.update(globs)
-    return func(*args)
-
-
-@register(masking='all')
-def procGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
-    """
-    generate/process data with generically defined functions.
-
-    The functions can depend on on any of the fields present in data.
-
-    Formally, what the function does, is the following:
-
-    1.  Let F be a Callable, depending on fields f_1, f_2,...f_K, (F = F(f_1, f_2,...f_K))
-        Than, for every timestamp t_i that occurs in at least one of the timeseries data[f_j] (outer join),
-        The value v_i is computed via:
-        v_i = data([f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]), if all data[f_j][t_i] do exist
-        v_i = `nodata`, if at least one of the data[f_j][t_i] is missing.
-    2.  The result is stored to data[field] (gets generated if not present)
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, where you want the result from the generic expressions processing to be written to.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    func : Callable
-        The data processing function with parameter names that will be
-        interpreted as data column entries.
-        See the examples section to learn more.
-    nodata : any, default np.nan
-        The value that indicates missing/invalid data
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        The shape of the data may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        The flags shape may have changed relatively to the input flagger.
-
-    Examples
-    --------
-    Some examples on what to pass to the func parameter:
-    To compute the sum of the variables "temperature" and "uncertainty", you would pass the function:
-
-    >>> lambda temperature, uncertainty: temperature + uncertainty
-
-    You also can pass numpy and pandas functions:
-
-    >>> lambda temperature, uncertainty: np.round(temperature) * np.sqrt(uncertainty)
-
-    """
-    data[field] = _execGeneric(flagger, data, func, field, nodata).squeeze()
-    # NOTE:
-    # The flags to `field` will be (re-)set to UNFLAGGED
-    # That leads to the following problem:
-    # flagger.merge merges the given flaggers, if
-    # `field` did already exist before the call to `procGeneric`
-    # but with a differing index, we end up with:
-    # len(data[field]) != len(flagger.getFlags(field))
-    # see: test/funcs/test_generic_functions.py::test_procGenericMultiple
-
-    # TODO:
-    # We need a way to simply overwrite a given flagger column, maybe
-    # an optional keyword to merge ?
-    flagger = flagger.merge(flagger.initFlags(data[field]))
-    return data, flagger
-
-
-@register(masking='all')
-def flagGeneric(data, field, flagger, func, nodata=np.nan, **kwargs):
-    """
-    a function to flag a data column by evaluation of a generic expression.
-
-    The expression can depend on any of the fields present in data.
-
-    Formally, what the function does, is the following:
-
-    Let X be an expression, depending on fields f_1, f_2,...f_K, (X = X(f_1, f_2,...f_K))
-    Than for every timestamp t_i in data[field]:
-    data[field][t_i] is flagged if X(data[f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]) is True.
-
-    Note, that all value series included in the expression to evaluate must be labeled identically to field.
-
-    Note, that the expression is passed in the form of a Callable and that this callables variable names are
-    interpreted as actual names in the data header. See the examples section to get an idea.
-
-    Note, that all the numpy functions are available within the generic expressions.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, where you want the result from the generic expressions evaluation to be projected
-        to.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    func : Callable
-        The expression that is to be evaluated is passed in form of a callable, with parameter names that will be
-        interpreted as data column entries. The Callable must return an boolen array like.
-        See the examples section to learn more.
-    nodata : any, default np.nan
-        The value that indicates missing/invalid data
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-
-    Examples
-    --------
-    Some examples on what to pass to the func parameter:
-    To flag the variable `field`, if the sum of the variables
-    "temperature" and "uncertainty" is below zero, you would pass the function:
-
-    >>> lambda temperature, uncertainty: temperature + uncertainty < 0
-
-    There is the reserved name 'This', that always refers to `field`. So, to flag field if field is negative, you can
-    also pass:
-
-    >>> lambda this: this < 0
-
-    If you want to make dependent the flagging from flags already present in the data, you can use the built-in
-    ``isflagged`` method. For example, to flag the 'temperature', if 'level' is flagged, you would use:
-
-    >>> lambda level: isflagged(level)
-
-    You can furthermore specify a flagging level, you want to compare the flags to. For example, for flagging
-    'temperature', if 'level' is flagged at a level named 'doubtfull' or worse, use:
-
-    >>> lambda level: isflagged(level, flag='doubtfull', comparator='<=')
-
-    If you are unsure about the used flaggers flagging level names, you can use the reserved key words BAD, UNFLAGGED
-    and GOOD, to refer to the worst (BAD), best(GOOD) or unflagged (UNFLAGGED) flagging levels. For example.
-
-    >>> lambda level: isflagged(level, flag=UNFLAGGED, comparator='==')
-
-    Your expression also is allowed to include pandas and numpy functions
-
-    >>> lambda level: np.sqrt(level) > 7
-    """
-    # NOTE:
-    # The naming of the func parameter is pretty confusing
-    # as it actually holds the result of a generic expression
-    mask = _execGeneric(flagger, data, func, field, nodata).squeeze()
-    if np.isscalar(mask):
-        raise TypeError(f"generic expression does not return an array")
-    if not np.issubdtype(mask.dtype, np.bool_):
-        raise TypeError(f"generic expression does not return a boolean array")
-
-    if field not in flagger.getFlags():
-        flagger = flagger.merge(flagger.initFlags(data=pd.Series(index=mask.index, name=field)))
-
-    # if flagger.getFlags(field).empty:
-    #     flagger = flagger.merge(
-    #         flagger.initFlags(
-    #             data=pd.Series(name=field, index=mask.index, dtype=np.float64)))
-    flagger = flagger.setFlags(field=field, loc=mask, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def flagRange(data, field, flagger, min=-np.inf, max=np.inf, **kwargs):
-    """
-    Function flags values not covered by the closed interval [`min`, `max`].
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    min : float
-        Lower bound for valid data.
-    max : float
-        Upper bound for valid data.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    # using .values is very much faster
-    datacol = data[field].values
-    mask = (datacol < min) | (datacol > max)
-    flagger = flagger.setFlags(field, mask, **kwargs)
-    return data, flagger
-
-
-
-@register(masking='field')
-def flagMissing(data, field, flagger, nodata=np.nan, **kwargs):
-    """
-    The function flags all values indicating missing data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    nodata : any, default np.nan
-        A value that defines missing data.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    datacol = data[field]
-    if np.isnan(nodata):
-        mask = datacol.isna()
-    else:
-        mask = datacol == nodata
-
-    flagger = flagger.setFlags(field, loc=mask, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def flagSesonalRange(
-        data, field, flagger, min, max, startmonth=1, endmonth=12, startday=1, endday=31, **kwargs,
-):
-    """
-    Function applies a range check onto data chunks (seasons).
-
-    The data chunks to be tested are defined by annual seasons that range from a starting date,
-    to an ending date, wheras the dates are defined by month and day number.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    min : float
-        Lower bound for valid data.
-    max : float
-        Upper bound for valid data.
-    startmonth : int
-        Starting month of the season to flag.
-    endmonth : int
-        Ending month of the season to flag.
-    startday : int
-        Starting day of the season to flag.
-    endday : int
-        Ending day of the season to flag
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-    if data[field].empty:
-        return data, flagger
-
-    newfield = f"{field}_masked"
-    start = f"{startmonth:02}-{startday:02}T00:00:00"
-    end = f"{endmonth:02}-{endday:02}T00:00:00"
-
-    data, flagger = proc_fork(data, field, flagger, suffix="_masked")
-    data, flagger = modelling_mask(data, newfield, flagger, mode='seasonal', season_start=start, season_end=end,
-                                   include_bounds=True)
-    data, flagger = flagRange(data, newfield, flagger, min=min, max=max, **kwargs)
-    data, flagger = proc_projectFlags(data, field, flagger, method='match', source=newfield)
-    data, flagger = proc_drop(data, newfield, flagger)
-    return data, flagger
-
-
-@register(masking='field')
-def clearFlags(data, field, flagger, **kwargs):
-    flagger = flagger.clearFlags(field, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def forceFlags(data, field, flagger, flag, **kwargs):
-    flagger = flagger.clearFlags(field).setFlags(field, flag=flag, inplace=True, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def flagIsolated(
-        data, field, flagger, gap_window, group_window, **kwargs,
-):
-    """
-    The function flags arbitrary large groups of values, if they are surrounded by sufficiently
-    large data gaps. A gap is defined as group of missing and/or flagged values.
-
-    A series of values x_k,x_(k+1),...,x_(k+n), with associated timestamps t_k,t_(k+1),...,t_(k+n),
-    is considered to be isolated, if:
-
-    1. t_(k+1) - t_n < `group_window`
-    2. None of the x_j with 0 < t_k - t_j < `gap_window`, is valid or unflagged (preceeding gap).
-    3. None of the x_j with 0 < t_j - t_(k+n) < `gap_window`, is valid or unflagged (succeding gap).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    gap_window :
-        The minimum size of the gap before and after a group of valid values, making this group considered an
-        isolated group. See condition (2) and (3)
-    group_window :
-        The maximum temporal extension allowed for a group that is isolated by gaps of size 'gap_window',
-        to be actually flagged as isolated group. See condition (1).
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    gap_window = pd.tseries.frequencies.to_offset(gap_window)
-    group_window = pd.tseries.frequencies.to_offset(group_window)
-
-    col = data[field].mask(flagger.isFlagged(field))
-    mask = col.isnull()
-
-    flags = pd.Series(data=0, index=col.index, dtype=bool)
-    for srs in groupConsecutives(mask):
-        if np.all(~srs):
-            start = srs.index[0]
-            stop = srs.index[-1]
-            if stop - start <= group_window:
-                left = mask[start - gap_window: start].iloc[:-1]
-                if left.all():
-                    right = mask[stop: stop + gap_window].iloc[1:]
-                    if right.all():
-                        flags[start:stop] = True
-
-    flagger = flagger.setFlags(field, flags, **kwargs)
-
-    return data, flagger
-
-
-@register(masking='field')
-def flagDummy(data, field, flagger, **kwargs):
-    """
-    Function does nothing but returning data and flagger.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-    """
-    return data, flagger
-
-
-@register(masking='field')
-def flagForceFail(data, field, flagger, **kwargs):
-    """
-    Function raises a runtime error.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-
-    """
-    raise RuntimeError("Works as expected :D")
-
-
-@register(masking='field')
-def flagUnflagged(data, field, flagger, **kwargs):
-    """
-    Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
-    If there is an entry 'flag' in the kwargs dictionary passed, the
-    function sets the kwargs['flag'] flag to all values flagged better kwargs['flag']
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    kwargs : Dict
-        If kwargs contains 'flag' entry, kwargs['flag] is set, if no entry 'flag' is present,
-        'flagger.UNFLAGGED' is set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-    """
-
-    flag = kwargs.pop('flag', flagger.GOOD)
-    flagger = flagger.setFlags(field, flag=flag, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def flagGood(data, field, flagger, **kwargs):
-    """
-    Function sets the flagger.GOOD flag to all values flagged better then flagger.GOOD.
-
-    Parameters
-    ----------
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-
-    """
-    kwargs.pop('flag', None)
-    return flagUnflagged(data, field, flagger, **kwargs)
-
-
-@register(masking='field')
-def flagManual(data, field, flagger, mdata, mflag: Any = 1, method="plain", **kwargs):
-    """
-    Flag data by given, "manually generated" data.
-
-    The data is flagged at locations where `mdata` is equal to a provided flag (`mflag`).
-    The format of mdata can be an indexed object, like pd.Series, pd.Dataframe or dios.DictOfSeries,
-    but also can be a plain list- or array-like.
-    How indexed mdata is aligned to data is specified via the `method` parameter.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    mdata : {pd.Series, pd.Dataframe, DictOfSeries, str}
-        The "manually generated" data
-    mflag : scalar
-        The flag that indicates data points in `mdata`, of wich the projection in data should be flagged.
-    method : {'plain', 'ontime', 'left-open', 'right-open'}, default plain
-        Defines how mdata is projected on data. Except for the 'plain' method, the methods assume mdata to have an
-        index.
-
-        * 'plain': mdata must have the same length as data and is projected one-to-one on data.
-        * 'ontime': works only with indexed mdata. mdata entries are matched with data entries that have the same index.
-        * 'right-open': mdata defines intervals, values are to be projected on.
-          The intervals are defined by any two consecutive timestamps t_1 and 1_2 in mdata.
-          the value at t_1 gets projected onto all data timestamps t with t_1 <= t < t_2.
-        * 'left-open': like 'right-open', but the projected interval now covers all t with t_1 < t <= t_2.
-
-    Returns
-    -------
-    data, flagger: original data, modified flagger
-    
-    Examples
-    --------
-    An example for mdata
-    >>> mdata = pd.Series([1,0,1], index=pd.to_datetime(['2000-02', '2000-03', '2001-05']))
-    >>> mdata
-    2000-02-01    1
-    2000-03-01    0
-    2001-05-01    1
-    dtype: int64
-
-    On *dayly* data, with the 'ontime' method, only the provided timestamnps are used.
-    Bear in mind that only exact timestamps apply, any offset will result in ignoring
-    the timestamp.
-    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='ontime')
-    >>> fl.isFlagged(field)
-    2000-01-31    False
-    2000-02-01    True
-    2000-02-02    False
-    2000-02-03    False
-    ..            ..
-    2000-02-29    False
-    2000-03-01    True
-    2000-03-02    False
-    Freq: D, dtype: bool
-
-    With the 'right-open' method, the mdata is forward fill:
-    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='right-open')
-    >>> fl.isFlagged(field)
-    2000-01-31    False
-    2000-02-01    True
-    2000-02-02    True
-    ..            ..
-    2000-02-29    True
-    2000-03-01    False
-    2000-03-02    False
-    Freq: D, dtype: bool
-
-    With the 'left-open' method, backward filling is used:
-    >>> _, fl = flagManual(data, field, flagger, mdata, mflag=1, method='left-open')
-    >>> fl.isFlagged(field)
-    2000-01-31    False
-    2000-02-01    False
-    2000-02-02    True
-    ..            ..
-    2000-02-29    True
-    2000-03-01    True
-    2000-03-02    False
-    Freq: D, dtype: bool
-    """
-    dat = data[field]
-    if isinstance(mdata, str):
-        # todo import path type in mdata, use
-        #  s = pd.read_csv(mdata, index_col=N, usecol=[N,N,..]) <- use positional
-        #  use a list-arg in config to get the columns
-        #  at last, fall throug to next checks
-        raise NotImplementedError("giving a path is currently not supported")
-
-    if isinstance(mdata, (pd.DataFrame, DictOfSeries)):
-        mdata = mdata[field]
-
-    hasindex = isinstance(mdata, (pd.Series, pd.DataFrame, DictOfSeries))
-    if not hasindex and method != "plain":
-        raise ValueError("mdata has no index")
-
-    if method == "plain":
-        if hasindex:
-            mdata = mdata.to_numpy()
-        if len(mdata) != len(dat):
-            raise ValueError("mdata must have same length then data")
-        mdata = pd.Series(mdata, index=dat.index)
-    elif method == "ontime":
-        pass  # reindex will do the job later
-    elif method in ["left-open", "right-open"]:
-        mdata = mdata.reindex(dat.index.union(mdata.index))
-
-        # -->)[t0-->)[t1--> (ffill)
-        if method == "right-open":
-            mdata = mdata.ffill()
-
-        # <--t0](<--t1](<-- (bfill)
-        if method == "left-open":
-            mdata = mdata.bfill()
-    else:
-        raise ValueError(method)
-
-    mask = mdata == mflag
-    mask = mask.reindex(dat.index).fillna(False)
-    flagger = flagger.setFlags(field=field, loc=mask, **kwargs)
-    return data, flagger
-
-
-@register(masking='all')
-def flagCrossScoring(data, field, flagger, fields, thresh, cross_stat='modZscore', **kwargs):
-    """
-    Function checks for outliers relatively to the "horizontal" input data axis.
-
-    For `fields` :math:`=[f_1,f_2,...,f_N]` and timestamps :math:`[t_1,t_2,...,t_K]`, the following steps are taken
-    for outlier detection:
-
-    1. All timestamps :math:`t_i`, where there is one :math:`f_k`, with :math:`data[f_K]` having no entry at
-       :math:`t_i`, are excluded from the following process (inner join of the :math:`f_i` fields.)
-    2. for every :math:`0 <= i <= K`, the value
-       :math:`m_j = median(\\{data[f_1][t_i], data[f_2][t_i], ..., data[f_N][t_i]\\})` is calculated
-    2. for every :math:`0 <= i <= K`, the set
-       :math:`\\{data[f_1][t_i] - m_j, data[f_2][t_i] - m_j, ..., data[f_N][t_i] - m_j\\}` is tested for outliers with the
-       specified method (`cross_stat` parameter).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        A dummy parameter.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    fields : str
-        List of fieldnames in data, determining wich variables are to be included into the flagging process.
-    thresh : float
-        Threshold which the outlier score of an value must exceed, for being flagged an outlier.
-    cross_stat : {'modZscore', 'Zscore'}, default 'modZscore'
-        Method used for calculating the outlier scores.
-
-        * ``'modZscore'``: Median based "sigma"-ish approach. See Referenecs [1].
-        * ``'Zscore'``: Score values by how many times the standard deviation they differ from the median.
-          See References [1]
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the input flagger.
-
-    References
-    ----------
-    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-    """
-
-    df = data[fields].loc[data[fields].index_of('shared')].to_df()
-
-    if isinstance(cross_stat, str):
-        if cross_stat == 'modZscore':
-            MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
-            diff_scores = ((0.6745 * (df.subtract(df.median(axis=1), axis=0))).divide(MAD_series, axis=0)).abs()
-        elif cross_stat == 'Zscore':
-            diff_scores = (df.subtract(df.mean(axis=1), axis=0)).divide(df.std(axis=1), axis=0).abs()
-        else:
-            raise ValueError(cross_stat)
-    else:
-        try:
-            stat = getattr(df, cross_stat.__name__)(axis=1)
-        except AttributeError:
-            stat = df.aggregate(cross_stat, axis=1)
-        diff_scores = df.subtract(stat, axis=0).abs()
-
-    mask = diff_scores > thresh
-    for var in fields:
-        flagger = flagger.setFlags(var, mask[var], **kwargs)
-
-    return data, flagger
-
-@register(masking='all')
-def flagDriftFromNorm(data, field, flagger, fields, segment_freq, norm_spread, norm_frac=0.5,
-                      metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
-                                                                       metric='cityblock') / len(x),
-                      linkage_method='single', **kwargs):
-    """
-    The function flags value courses that significantly deviate from a group of normal value courses.
-
-    "Normality" is determined in terms of a maximum spreading distance, that members of a normal group must not exceed.
-    In addition, only a group is considered "normal" if it contains more then `norm_frac` percent of the
-    variables in "fields".
-
-    See the Notes section for a more detailed presentation of the algorithm
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        A dummy parameter.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    fields : str
-        List of fieldnames in data, determining which variables are to be included into the flagging process.
-    segment_freq : str
-        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
-        applied on.
-    norm_spread : float
-        A parameter limiting the maximum "spread" of the timeseries, allowed in the "normal" group. See Notes section
-        for more details.
-    norm_frac : float, default 0.5
-        Has to be in [0,1]. Determines the minimum percentage of variables, the "normal" group has to comprise to be the
-        normal group actually. The higher that value, the more stable the algorithm will be with respect to false
-        positives. Also, nobody knows what happens, if this value is below 0.5.
-    metric : Callable[(numpyp.array, numpy-array), float]
-        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
-        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
-        See the Notes section to get an idea of why this could be a good choice.
-    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        The linkage method used for hierarchical (agglomerative) clustering of the timeseries.
-        See the Notes section for more details.
-        The keyword gets passed on to scipy.hierarchy.linkage. See its documentation to learn more about the different
-        keywords (References [1]).
-        See wikipedia for an introduction to hierarchical clustering (References [2]).
-    kwargs
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the input flagger.
-
-    Notes
-    -----
-    following steps are performed for every data "segment" of length `segment_freq` in order to find the
-    "abnormal" data:
-
-    1. Calculate the distances :math:`d(x_i,x_j)` for all :math:`x_i` in parameter `fields`. (with :math:`d`
-       denoting the distance function
-       passed to the parameter `metric`.
-    2. Calculate a dendogram with a hierarchical linkage algorithm, specified by the parameter `linkage_method`.
-    3. Flatten the dendogram at the level, the agglomeration costs exceed the value given by the parameter `norm_spread`
-    4. check if there is a cluster containing more than `norm_frac` percentage of the variables in fields.
-
-        1. if yes: flag all the variables that are not in that cluster (inside the segment)
-        2. if no: flag nothing
-
-    The main parameter giving control over the algorithms behavior is the `norm_spread` parameter, that determines
-    the maximum spread of a normal group by limiting the costs, a cluster agglomeration must not exceed in every
-    linkage step.
-    For singleton clusters, that costs just equal half the distance, the timeseries in the clusters, have to
-    each other. So, no timeseries can be clustered together, that are more then
-    2*`norm_spread` distanted from each other.
-    When timeseries get clustered together, this new clusters distance to all the other timeseries/clusters is
-    calculated according to the linkage method specified by `linkage_method`. By default, it is the minimum distance,
-    the members of the clusters have to each other.
-    Having that in mind, it is advisable to choose a distance function, that can be well interpreted in the units
-    dimension of the measurement and where the interpretation is invariant over the length of the timeseries.
-    That is, why, the "averaged manhatten metric" is set as the metric default, since it corresponds to the
-    averaged value distance, two timeseries have (as opposed by euclidean, for example).
-
-    References
-    ----------
-    Documentation of the underlying hierarchical clustering algorithm:
-        [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
-    Introduction to Hierarchical clustering:
-        [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
-    """
-
-    data_to_flag = data[fields].to_df()
-    data_to_flag.dropna(inplace=True)
-    segments = data_to_flag.groupby(pd.Grouper(freq=segment_freq))
-    for segment in segments:
-        if segment[1].shape[0] <= 1:
-            continue
-        drifters = detectDeviants(segment[1], metric, norm_spread, norm_frac, linkage_method, 'variables')
-
-        for var in drifters:
-            flagger = flagger.setFlags(fields[var], loc=segment[1].index, **kwargs)
-
-    return data, flagger
-
-@register(masking='all')
-def flagDriftFromReference(data, field, flagger, fields, segment_freq, thresh,
-                      metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
-                                                                    metric='cityblock')/len(x),
-                       **kwargs):
-    """
-    The function flags value courses that deviate from a reference course by a margin exceeding a certain threshold.
-
-    The deviation is measured by the distance function passed to parameter metric.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The reference variable, the deviation from wich determines the flagging.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    fields : str
-        List of fieldnames in data, determining wich variables are to be included into the flagging process.
-    segment_freq : str
-        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
-        applied on.
-    thresh : float
-        The threshod by wich normal variables can deviate from the reference variable at max.
-    metric : Callable[(numpyp.array, numpy-array), float]
-        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
-        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
-        See the Notes section to get an idea of why this could be a good choice.
-    kwargs
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the input flagger.
-
-    Notes
-    -----
-    it is advisable to choose a distance function, that can be well interpreted in the units
-    dimension of the measurement and where the interpretation is invariant over the length of the timeseries.
-    That is, why, the "averaged manhatten metric" is set as the metric default, since it corresponds to the
-    averaged value distance, two timeseries have (as opposed by euclidean, for example).
-    """
-
-    data_to_flag = data[fields].to_df()
-    data_to_flag.dropna(inplace=True)
-    if field not in fields:
-        fields.append(field)
-    var_num = len(fields)
-    segments = data_to_flag.groupby(pd.Grouper(freq=segment_freq))
-
-    for segment in segments:
-
-        if segment[1].shape[0] <= 1:
-            continue
-        for i in range(var_num):
-            dist = metric(segment[1].iloc[:, i].values, segment[1].loc[:, field].values)
-            if dist > thresh:
-                flagger = flagger.setFlags(fields[i], loc=segment[1].index, **kwargs)
-
-    return data, flagger
-
-
-@register(masking='all')
-def flagDriftScale(data, field, flagger, fields_scale1, fields_scale2, segment_freq, norm_spread, norm_frac=0.5,
-                      metric=lambda x, y: scipy.spatial.distance.pdist(np.array([x, y]),
-                                                                                    metric='cityblock')/len(x),
-                      linkage_method='single', **kwargs):
-
-
-    """
-    The function linearly rescales one set of variables to another set of variables with a different scale and then
-    flags value courses that significantly deviate from a group of normal value courses.
-
-    The two sets of variables can be linearly scaled one to another and hence the scaling transformation is performed
-    via linear regression: A linear regression is performed on each pair of variables giving a slope and an intercept.
-    The transformation is then calculated a the median of all the calculated slopes and intercepts.
-
-    Once the transformation is performed, the function flags those values, that deviate from a group of normal values.
-    "Normality" is determined in terms of a maximum spreading distance, that members of a normal group must not exceed.
-    In addition, only a group is considered "normal" if it contains more then `norm_frac` percent of the
-    variables in "fields".
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        A dummy parameter.
-    flagger : saqc.flagger
-        A flagger object, holding flags and additional informations related to `data`.
-    fields_scale1 : str
-        List of fieldnames in data to be included into the flagging process which are scaled according to scaling
-        scheme 1.
-    fields_scale2 : str
-        List of fieldnames in data to be included into the flagging process which are scaled according to scaling
-        scheme 2.
-    segment_freq : str
-        An offset string, determining the size of the seperate datachunks that the algorihm is to be piecewise
-        applied on.
-    norm_spread : float
-        A parameter limiting the maximum "spread" of the timeseries, allowed in the "normal" group. See Notes section
-        for more details.
-    norm_frac : float, default 0.5
-        Has to be in [0,1]. Determines the minimum percentage of variables, the "normal" group has to comprise to be the
-        normal group actually. The higher that value, the more stable the algorithm will be with respect to false
-        positives. Also, nobody knows what happens, if this value is below 0.5.
-    metric : Callable[(numpyp.array, numpy-array), float]
-        A distance function. It should be a function of 2 1-dimensional arrays and return a float scalar value.
-        This value is interpreted as the distance of the two input arrays. The default is the averaged manhatten metric.
-        See the Notes section to get an idea of why this could be a good choice.
-    linkage_method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        The linkage method used for hierarchical (agglomerative) clustering of the timeseries.
-        See the Notes section for more details.
-        The keyword gets passed on to scipy.hierarchy.linkage. See its documentation to learn more about the different
-        keywords (References [1]).
-        See wikipedia for an introduction to hierarchical clustering (References [2]).
-    kwargs
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the input flagger.
-
-    References
-    ----------
-    Documentation of the underlying hierarchical clustering algorithm:
-        [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
-    Introduction to Hierarchical clustering:
-        [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
-    """
-
-    fields = fields_scale1 + fields_scale2
-    data_to_flag = data[fields].to_df()
-    data_to_flag.dropna(inplace=True)
-
-    convert_slope = []
-    convert_intercept = []
-
-    for field1 in fields_scale1:
-        for field2 in fields_scale2:
-            slope, intercept, r_value, p_value, std_err = stats.linregress(data_to_flag[field1], data_to_flag[field2])
-            convert_slope.append(slope)
-            convert_intercept.append(intercept)
-
-    factor_slope = np.median(convert_slope)
-    factor_intercept = np.median(convert_intercept)
-
-    dat = dios.DictOfSeries()
-    for field1 in fields_scale1:
-        dat[field1] = factor_intercept + factor_slope * data_to_flag[field1]
-    for field2 in fields_scale2:
-        dat[field2] = data_to_flag[field2]
-
-    dat_to_flag = dat[fields].to_df()
-
-    segments = dat_to_flag.groupby(pd.Grouper(freq=segment_freq))
-    for segment in segments:
-        if segment[1].shape[0] <= 1:
-            continue
-        drifters = detectDeviants(segment[1], metric, norm_spread, norm_frac, linkage_method, 'variables')
-        for var in drifters:
-            flagger = flagger.setFlags(fields[var], loc=segment[1].index, **kwargs)
-
-    return data, flagger
diff --git a/saqc/funcs/generic.py b/saqc/funcs/generic.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb75ed4aecfe48adfd41b9a01c71f655c3dd4742
--- /dev/null
+++ b/saqc/funcs/generic.py
@@ -0,0 +1,213 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from functools import partial
+from inspect import signature
+
+import dios
+import numpy as np
+import pandas as pd
+
+from saqc.core.register import register
+from saqc.core.visitor import ENVIRONMENT
+
+
+def _dslIsFlagged(flagger, var, flag=None, comparator=">="):
+    """
+    helper function for `flag`
+    """
+    return flagger.isFlagged(var.name, flag=flag, comparator=comparator)
+
+
+def _execGeneric(flagger, data, func, field, nodata):
+    # TODO:
+    # - check series.index compatibility
+    # - field is only needed to translate 'this' parameters
+    #    -> maybe we could do the translation on the tree instead
+
+    sig = signature(func)
+    args = []
+    for k, v in sig.parameters.items():
+        k = field if k == "this" else k
+        if k not in data:
+            raise NameError(f"variable '{k}' not found")
+        args.append(data[k])
+
+    globs = {
+        "isflagged": partial(_dslIsFlagged, flagger),
+        "ismissing": lambda var: ((var == nodata) | pd.isnull(var)),
+        "mask": lambda cond: data[cond.name].mask(cond),
+        "this": field,
+        "NODATA": nodata,
+        "GOOD": flagger.GOOD,
+        "BAD": flagger.BAD,
+        "UNFLAGGED": flagger.UNFLAGGED,
+        **ENVIRONMENT,
+    }
+    func.__globals__.update(globs)
+    return func(*args)
+
+
+@register(masking='all')
+def process(data, field, flagger, func, nodata=np.nan, **kwargs):
+    """
+    generate/process data with generically defined functions.
+
+    The functions can depend on on any of the fields present in data.
+
+    Formally, what the function does, is the following:
+
+    1.  Let F be a Callable, depending on fields f_1, f_2,...f_K, (F = F(f_1, f_2,...f_K))
+        Than, for every timestamp t_i that occurs in at least one of the timeseries data[f_j] (outer join),
+        The value v_i is computed via:
+        v_i = data([f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]), if all data[f_j][t_i] do exist
+        v_i = `nodata`, if at least one of the data[f_j][t_i] is missing.
+    2.  The result is stored to data[field] (gets generated if not present)
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, where you want the result from the generic expressions processing to be written to.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    func : Callable
+        The data processing function with parameter names that will be
+        interpreted as data column entries.
+        See the examples section to learn more.
+    nodata : any, default np.nan
+        The value that indicates missing/invalid data
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        The shape of the data may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        The flags shape may have changed relatively to the input flagger.
+
+    Examples
+    --------
+    Some examples on what to pass to the func parameter:
+    To compute the sum of the variables "temperature" and "uncertainty", you would pass the function:
+
+    >>> lambda temperature, uncertainty: temperature + uncertainty
+
+    You also can pass numpy and pandas functions:
+
+    >>> lambda temperature, uncertainty: np.round(temperature) * np.sqrt(uncertainty)
+
+    """
+    data[field] = _execGeneric(flagger, data, func, field, nodata).squeeze()
+    # NOTE:
+    # The flags to `field` will be (re-)set to UNFLAGGED
+    # That leads to the following problem:
+    # flagger.merge merges the given flaggers, if
+    # `field` did already exist before the call to `procGeneric`
+    # but with a differing index, we end up with:
+    # len(data[field]) != len(flagger.getFlags(field))
+    # see: test/funcs/test_generic_functions.py::test_procGenericMultiple
+
+    # TODO:
+    # We need a way to simply overwrite a given flagger column, maybe
+    # an optional keyword to merge ?
+    flagger = flagger.merge(flagger.initFlags(data[field]))
+    return data, flagger
+
+
+@register(masking='all')
+def flag(data, field, flagger, func, nodata=np.nan, **kwargs):
+    """
+    a function to flag a data column by evaluation of a generic expression.
+
+    The expression can depend on any of the fields present in data.
+
+    Formally, what the function does, is the following:
+
+    Let X be an expression, depending on fields f_1, f_2,...f_K, (X = X(f_1, f_2,...f_K))
+    Than for every timestamp t_i in data[field]:
+    data[field][t_i] is flagged if X(data[f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]) is True.
+
+    Note, that all value series included in the expression to evaluate must be labeled identically to field.
+
+    Note, that the expression is passed in the form of a Callable and that this callables variable names are
+    interpreted as actual names in the data header. See the examples section to get an idea.
+
+    Note, that all the numpy functions are available within the generic expressions.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, where you want the result from the generic expressions evaluation to be projected
+        to.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    func : Callable
+        The expression that is to be evaluated is passed in form of a callable, with parameter names that will be
+        interpreted as data column entries. The Callable must return an boolen array like.
+        See the examples section to learn more.
+    nodata : any, default np.nan
+        The value that indicates missing/invalid data
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+
+    Examples
+    --------
+    Some examples on what to pass to the func parameter:
+    To flag the variable `field`, if the sum of the variables
+    "temperature" and "uncertainty" is below zero, you would pass the function:
+
+    >>> lambda temperature, uncertainty: temperature + uncertainty < 0
+
+    There is the reserved name 'This', that always refers to `field`. So, to flag field if field is negative, you can
+    also pass:
+
+    >>> lambda this: this < 0
+
+    If you want to make dependent the flagging from flags already present in the data, you can use the built-in
+    ``isflagged`` method. For example, to flag the 'temperature', if 'level' is flagged, you would use:
+
+    >>> lambda level: isflagged(level)
+
+    You can furthermore specify a flagging level, you want to compare the flags to. For example, for flagging
+    'temperature', if 'level' is flagged at a level named 'doubtfull' or worse, use:
+
+    >>> lambda level: isflagged(level, flag='doubtfull', comparator='<=')
+
+    If you are unsure about the used flaggers flagging level names, you can use the reserved key words BAD, UNFLAGGED
+    and GOOD, to refer to the worst (BAD), best(GOOD) or unflagged (UNFLAGGED) flagging levels. For example.
+
+    >>> lambda level: isflagged(level, flag=UNFLAGGED, comparator='==')
+
+    Your expression also is allowed to include pandas and numpy functions
+
+    >>> lambda level: np.sqrt(level) > 7
+    """
+    # NOTE:
+    # The naming of the func parameter is pretty confusing
+    # as it actually holds the result of a generic expression
+    mask = _execGeneric(flagger, data, func, field, nodata).squeeze()
+    if np.isscalar(mask):
+        raise TypeError(f"generic expression does not return an array")
+    if not np.issubdtype(mask.dtype, np.bool_):
+        raise TypeError(f"generic expression does not return a boolean array")
+
+    if field not in flagger.getFlags():
+        flagger = flagger.merge(flagger.initFlags(data=pd.Series(index=mask.index, name=field)))
+
+    # if flagger.getFlags(field).empty:
+    #     flagger = flagger.merge(
+    #         flagger.initFlags(
+    #             data=pd.Series(name=field, index=mask.index, dtype=np.float64)))
+    flagger = flagger.setFlags(field=field, loc=mask, **kwargs)
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/harm_functions.py b/saqc/funcs/harm_functions.py
deleted file mode 100644
index 49762412c2e0473d0604b59209739cd7641f0ce5..0000000000000000000000000000000000000000
--- a/saqc/funcs/harm_functions.py
+++ /dev/null
@@ -1,351 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-
-import numpy as np
-import logging
-from saqc.core.register import register
-from saqc.funcs.proc_functions import (
-    proc_interpolateGrid,
-    proc_shift,
-    proc_fork,
-    proc_resample,
-    proc_projectFlags,
-    proc_drop,
-    proc_rename,
-    ORIGINAL_SUFFIX,
-)
-
-logger = logging.getLogger("SaQC")
-
-
-@register(masking='none')
-def harm_shift2Grid(data, field, flagger, freq, method="nshift", to_drop=None, **kwargs):
-    """
-    A method to "regularize" data by shifting data points forward/backward to a regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    Method keywords:
-
-    * ``'nshift'``:  every grid point gets assigned the nearest value in its range (*range = +/-(freq/2)*)
-    * ``'bshift'``:  every grid point gets assigned its first succeeding value - if there is one available in the
-            succeeding sampling interval.
-    * ``'fshift'``:  every grid point gets assigned its ultimately preceding value - if there is one available in
-      the preceeding sampling interval.
-
-    Note: the flags associated with every datapoint will just get shifted with them.
-
-    Note: if there is no valid data (existing and not-na) available in a sampling interval assigned to a regular
-    timestamp by the selected method, nan gets assigned to this timestamp. The associated flag will be of value
-    ``flagger.UNFLAGGED``.
-
-    Note: all data nans get excluded defaultly from shifting. If to_drop is None - all *BAD* flagged values get
-    excluded as well.
-
-    Note: the method will likely and significantly alter values and shape of ``data[field]``. The original data is kept
-    in the data dios and assigned to the fieldname ``field + '_original'``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The field name of the column, holding the data-to-be-regularized.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.freq
-    freq : str
-        The frequency of the grid you want to shift your data to.
-    method : {'nshift', 'bshift', 'fshift'}, default 'nshift'
-        Specifies if datapoints get propagated forwards, backwards or to the nearest grid timestamp.
-        See description above for details
-    to_drop : {List[str], str}, default None
-        Flag types you want to drop before shifting - effectively excluding values that are flagged
-        with a flag in to_drop from the shifting process. Default - results in flagger.BAD
-        values being dropped initially.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    data, flagger = proc_fork(data, field, flagger)
-    data, flagger = proc_shift(
-        data, field, flagger, freq, method, to_drop=to_drop, empty_intervals_flag=flagger.UNFLAGGED, **kwargs
-    )
-    return data, flagger
-
-
-@register(masking='none')
-def harm_aggregate2Grid(
-        data, field, flagger, freq, value_func, flag_func=np.nanmax, method="nagg", to_drop=None, **kwargs
-):
-    """
-    A method to "regularize" data by aggregating (resampling) data at a regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    The data will therefor get aggregated with a function, specified by the `value_func` parameter and
-    the result gets projected onto the new timestamps with a method, specified by "method".
-
-    The following method (keywords) are available:
-
-    * ``'nagg'``: (aggreagtion to nearest) - all values in the range (+/- freq/2) of a grid point get aggregated with
-      `agg_func`. and assigned to it. Flags get aggregated by `flag_func` and assigned the same way.
-    * ``'bagg'``: (backwards aggregation) - all values in a sampling interval get aggregated with agg_func and the
-      result gets assigned to the last regular timestamp. Flags get aggregated by `flag_func` and assigned the same way.
-    * ``'fagg'``: (forward aggregation) - all values in a sampling interval get aggregated with agg_func and the result
-      gets assigned to the next regular timestamp. Flags get aggregated by `flag_func` and assigned the same way.
-
-    Note, that, if there is no valid data (exisitng and not-na) available in a sampling interval assigned to a regular
-    timestamp by the selected method, nan gets assigned to this timestamp. The associated flag will be of value
-    ``flagger.UNFLAGGED``.
-
-    Note: the method will likely and significantly alter values and shape of ``data[field]``. The original data is kept
-    in the data dios and assigned to the fieldname ``field + '_original'``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-regularized.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.freq
-    freq : str
-        The sampling frequency the data is to be aggregated (resampled) at.
-    value_func : Callable
-        The function you want to use for aggregation.
-    flag_func : Callable
-        The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
-        (usually ordered categorical).
-    method : {'fagg', 'bagg', 'nagg'}, default 'nagg'
-        Specifies which intervals to be aggregated for a certain timestamp. (preceeding, succeeding or
-        "surrounding" interval). See description above for more details.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before aggregation - effectively excluding values that are flagged
-        with a flag in to_drop from the aggregation process. Default results in flagger.BAD
-        values being dropped initially.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    data, flagger = proc_fork(data, field, flagger)
-    data, flagger = proc_resample(
-        data,
-        field,
-        flagger,
-        freq,
-        agg_func=value_func,
-        flag_agg_func=flag_func,
-        method=method,
-        empty_intervals_flag=flagger.UNFLAGGED,
-        to_drop=to_drop,
-        all_na_2_empty=True,
-        **kwargs,
-    )
-    return data, flagger
-
-
-@register(masking='none')
-def harm_linear2Grid(data, field, flagger, freq, to_drop=None, **kwargs):
-    """
-    A method to "regularize" data by interpolating linearly the data at regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    Interpolated values will get assigned the worst flag within freq-range.
-
-    Note: the method will likely and significantly alter values and shape of ``data[field]``. The original data is kept
-    in the data dios and assigned to the fieldname ``field + '_original'``.
-
-    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
-    not-na) datapoint preceeding them and one succeeding them within freq range.
-    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
-    ``flagger.UNFLAGGED``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-regularized.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.freq
-    freq : str
-        An offset string. The frequency of the grid you want to interpolate your data at.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in to_drop from the interpolation process. Default results in flagger.BAD
-        values being dropped initially.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    data, flagger = proc_fork(data, field, flagger)
-    data, flagger = proc_interpolateGrid(
-        data, field, flagger, freq, "time", to_drop=to_drop, empty_intervals_flag=flagger.UNFLAGGED, **kwargs
-    )
-    return data, flagger
-
-
-@register(masking='none')
-def harm_interpolate2Grid(data, field, flagger, freq, method, order=1, to_drop=None, **kwargs,):
-    """
-    A method to "regularize" data by interpolating the data at regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    Interpolated values will get assigned the worst flag within freq-range.
-
-    There are available all the interpolations from the pandas.Series.interpolate method and they are called by
-    the very same keywords.
-
-    Note, that, to perform a timestamp aware, linear interpolation, you have to pass ``'time'`` as `method`,
-    and NOT ``'linear'``.
-
-    Note: the `method` will likely and significantly alter values and shape of ``data[field]``. The original data is
-    kept in the data dios and assigned to the fieldname ``field + '_original'``.
-
-    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
-    not-na) datapoint preceeding them and one succeeding them within freq range.
-    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
-    ``flagger.UNFLAGGED``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-regularized.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.freq
-    freq : str
-        An offset string. The frequency of the grid you want to interpolate your data at.
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
-        The interpolation method you want to apply.
-    order : int, default 1
-        If your selected interpolation method can be performed at different *orders* - here you pass the desired
-        order.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in `to_drop` from the interpolation process. Default results in ``flagger.BAD``
-        values being dropped initially.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    data, flagger = proc_fork(data, field, flagger)
-    data, flagger = proc_interpolateGrid(
-        data,
-        field,
-        flagger,
-        freq,
-        method=method,
-        inter_order=order,
-        to_drop=to_drop,
-        empty_intervals_flag=flagger.UNFLAGGED,
-        **kwargs,
-    )
-    return data, flagger
-
-
-@register(masking='none')
-def harm_deharmonize(data, field, flagger, method, to_drop=None, **kwargs):
-    """
-    The Function function "undoes" regularization, by regaining the original data and projecting the
-    flags calculated for the regularized data onto the original ones.
-
-    Afterwards the regularized data is removed from the data dios and ``'field'`` will be associated
-    with the original data "again".
-
-    Wherever the flags in the original data are "better" then the regularized flags projected on them,
-    they get overridden with this regularized flags value.
-
-    Which regularized flags are to be projected on which original flags, is controlled by the "method" parameters.
-
-    Generally, if you regularized with the method "X", you should pass the method "inverse_X" to the deharmonization.
-    If you regularized with an interpolation, the method "inverse_interpolation" would be the appropriate choice.
-    Also you should pass the same drop flags keyword.
-
-    The deharm methods in detail:
-    ("original_flags" are associated with the original data that is to be regained,
-    "regularized_flags" are associated with the regularized data that is to be "deharmonized",
-    "freq" refers to the regularized datas sampling frequencie)
-
-    * ``'inverse_nagg'``: all original_flags within the range *+/- freq/2* of a regularized_flag, get assigned this
-      regularized flags value. (if regularized_flags > original_flag)
-    * ``'inverse_bagg'``: all original_flags succeeding a regularized_flag within the range of "freq", get assigned this
-      regularized flags value. (if regularized_flag > original_flag)
-    * ``'inverse_fagg'``: all original_flags preceeding a regularized_flag within the range of "freq", get assigned this
-      regularized flags value. (if regularized_flag > original_flag)
-
-    * ``'inverse_interpolation'``: all original_flags within the range *+/- freq* of a regularized_flag, get assigned this
-      regularized flags value (if regularized_flag > original_flag).
-
-    * ``'inverse_nshift'``: That original_flag within the range +/- *freq/2*, that is nearest to a regularized_flag,
-      gets the regularized flags value. (if regularized_flag > original_flag)
-    * ``'inverse_bshift'``: That original_flag succeeding a source flag within the range freq, that is nearest to a
-      regularized_flag, gets assigned this regularized flags value. (if regularized_flag > original_flag)
-    * ``'inverse_nshift'``: That original_flag preceeding a regularized flag within the range freq, that is nearest to a
-      regularized_flag, gets assigned this regularized flags value. (if source_flag > original_flag)
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-deharmonized.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.freq
-    method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift',
-            'inverse_interpolation'}
-        The method used for projection of regularized flags onto original flags. See description above for more
-        details.
-    to_drop : {List[str], str}, default None
-        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
-        with a flag in to_drop from the interpolation process. Default results in flagger.BAD
-        values being dropped initially.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    newfield = str(field) + ORIGINAL_SUFFIX
-    data, flagger = proc_projectFlags(data, newfield, flagger, method, source=field, to_drop=to_drop, **kwargs)
-    data, flagger = proc_drop(data, field, flagger)
-    data, flagger = proc_rename(data, newfield, flagger, field)
-    return data, flagger
diff --git a/saqc/funcs/interpolation.py b/saqc/funcs/interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdb9b53a9e8a4d756f54c1c0391f7665b66d42d0
--- /dev/null
+++ b/saqc/funcs/interpolation.py
@@ -0,0 +1,362 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import numpy as np
+import pandas as pd
+
+from saqc.core.register import register
+from saqc.lib.tools import toSequence, evalFreqStr, dropper
+from saqc.lib.ts_operators import interpolateNANs
+
+
+@register(masking='field')
+def interpolateByRolling(
+    data, field, flagger, winsz, func=np.median, center=True, min_periods=0, interpol_flag="UNFLAGGED", **kwargs
+):
+    """
+    Interpolates missing values (nan values present in the data) by assigning them the aggregation result of
+    a window surrounding them.
+
+    Note, that in the current implementation, center=True can only be used with integer window sizes - furthermore
+    note, that integer window sizes can yield screwed aggregation results for not-harmonized or irregular data.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-interpolated.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    winsz : int, str
+        The size of the window, the aggregation is computed from. Either counted in periods number (Integer passed),
+        or defined by a total temporal extension (offset String passed).
+    func : Callable
+        The function used for aggregation.
+    center : bool, default True
+        Wheather or not the window, the aggregation is computed of, is centered around the value to be interpolated.
+    min_periods : int
+        Minimum number of valid (not np.nan) values that have to be available in a window for its aggregation to be
+        computed.
+    interpol_flag : {'GOOD', 'BAD', 'UNFLAGGED', str}, default 'UNFLAGGED'
+        Flag that is to be inserted for the interpolated values. You can either pass one of the three major flag-classes
+        or specify directly a certain flag from the passed flagger.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+    data = data.copy()
+    datcol = data[field]
+    roller = datcol.rolling(window=winsz, center=center, min_periods=min_periods)
+    try:
+        func_name = func.__name__
+        if func_name[:3] == "nan":
+            func_name = func_name[3:]
+        rolled = getattr(roller, func_name)()
+    except AttributeError:
+        rolled = roller.apply(func)
+
+    na_mask = datcol.isna()
+    interpolated = na_mask & ~rolled.isna()
+    datcol[na_mask] = rolled[na_mask]
+    data[field] = datcol
+
+    if interpol_flag:
+        if interpol_flag in ["BAD", "UNFLAGGED", "GOOD"]:
+            interpol_flag = getattr(flagger, interpol_flag)
+        flagger = flagger.setFlags(field, loc=interpolated, force=True, flag=interpol_flag, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='field')
+def interpolateInvalid(
+    data,
+    field,
+    flagger,
+    method,
+    inter_order=2,
+    inter_limit=2,
+    interpol_flag="UNFLAGGED",
+    downgrade_interpolation=False,
+    not_interpol_flags=None,
+    **kwargs
+):
+
+    """
+    Function to interpolate nan values in the data.
+
+    There are available all the interpolation methods from the pandas.interpolate method and they are applicable by
+    the very same key words, that you would pass to the ``pd.Series.interpolate``'s method parameter.
+
+    Note, that the `inter_limit` keyword really restricts the interpolation to chunks, not containing more than
+    `inter_limit` successive nan entries.
+
+    Note, that the function differs from ``proc_interpolateGrid``, in its behaviour to ONLY interpolate nan values that
+    were already present in the data passed.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-interpolated.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
+        The interpolation method you want to apply.
+    inter_order : int, default 2
+        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
+        order.
+    inter_limit : int, default 2
+        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated.
+    interpol_flag : {'GOOD', 'BAD', 'UNFLAGGED', str}, default 'UNFLAGGED'
+        Flag that is to be inserted for the interpolated values. You can either pass one of the three major flag-classes
+        or specify directly a certain flag from the passed flagger.
+    downgrade_interpolation : bool, default False
+        If interpolation can not be performed at `inter_order` - (not enough values or not implemented at this order) -
+        automaticalyy try to interpolate at order `inter_order` :math:`- 1`.
+    not_interpol_flags : {None, str, List[str]}, default None
+        A list of flags or a single Flag, marking values, you want NOT to be interpolated.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+    data = data.copy()
+    inter_data = interpolateNANs(
+        data[field],
+        method,
+        order=inter_order,
+        inter_limit=inter_limit,
+        downgrade_interpolation=downgrade_interpolation,
+        return_chunk_bounds=False,
+    )
+    interpolated = data[field].isna() & inter_data.notna()
+
+    if not_interpol_flags:
+        for f in toSequence(not_interpol_flags):
+            if f in ["BAD", "UNFLAGGED", "GOOD"]:
+                f = getattr(flagger, interpol_flag)
+            is_flagged = flagger.isFlagged(flag=f)[field]
+            cond = is_flagged & interpolated
+            inter_data.mask(cond, np.nan, inplace=True)
+        interpolated &= inter_data.notna()
+
+    if interpol_flag:
+        if interpol_flag in ["BAD", "UNFLAGGED", "GOOD"]:
+            interpol_flag = getattr(flagger, interpol_flag)
+        flagger = flagger.setFlags(field, loc=interpolated, force=True, flag=interpol_flag, **kwargs)
+
+    data[field] = inter_data
+    return data, flagger
+
+
+@register(masking='field')
+def interpolateIndex(
+        data,
+        field,
+        flagger,
+        freq,
+        method,
+        inter_order=2,
+        to_drop=None,
+        downgrade_interpolation=False,
+        empty_intervals_flag=None,
+        grid_field=None,
+        inter_limit=2,
+        freq_check=None,
+        **kwargs):
+
+    """
+    Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
+
+    Note, that the interpolation will only be calculated, for grid timestamps that have a preceding AND a succeeding
+    valid data value within "freq" range.
+
+    Note, that the function differs from proc_interpolateMissing, by returning a whole new data set, only containing
+    samples at the interpolated, equidistant timestamps (of frequency "freq").
+
+    Note, it is possible to interpolate unregular "grids" (with no frequencies). In fact, any date index
+    can be target of the interpolation. Just pass the field name of the variable, holding the index
+    you want to interpolate, to "grid_field". 'freq' is then use to determine the maximum gap size for
+    a grid point to be interpolated.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-interpolated.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    freq : str
+        An Offset String, interpreted as the frequency of
+        the grid you want to interpolate your data at.
+    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
+        The interpolation method you want to apply.
+    inter_order : integer, default 2
+        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
+        order.
+    to_drop : {None, str, List[str]}, default None
+        Flags that refer to values you want to drop before interpolation - effectively excluding grid points from
+        interpolation, that are only surrounded by values having a flag in them, that is listed in drop flags. Default
+        results in the flaggers *BAD* flag to be the drop_flag.
+    downgrade_interpolation : bool, default False
+        If interpolation can not be performed at `inter_order` - (not enough values or not implemented at this order) -
+        automatically try to interpolate at order `inter_order` :math:`- 1`.
+    empty_intervals_flag : str, default None
+        A Flag, that you want to assign to those values in the resulting equidistant sample grid, that were not
+        surrounded by valid data in the original dataset, and thus were not interpolated. Default automatically assigns
+        ``flagger.BAD`` flag to those values.
+    grid_field : String, default None
+        Use the timestamp of another variable as (not necessarily regular) "grid" to be interpolated.
+    inter_limit : Integer, default 2
+        Maximum number of consecutive Grid values allowed for interpolation. If set
+        to *n*, chunks of *n* and more consecutive grid values, where there is no value in between, wont be
+        interpolated.
+    freq_check : {None, 'check', 'auto'}, default None
+
+        * ``None``: do not validate frequency-string passed to `freq`
+        * ``'check'``: estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
+          if no uniform sampling rate could be estimated
+        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    datcol = data[field]
+    datcol = datcol.copy()
+    flagscol = flagger.getFlags(field)
+    freq = evalFreqStr(freq, freq_check, datcol.index)
+    if empty_intervals_flag is None:
+        empty_intervals_flag = flagger.BAD
+
+    drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
+    drop_mask |= flagscol.isna()
+    drop_mask |= datcol.isna()
+    datcol[drop_mask] = np.nan
+    datcol.dropna(inplace=True)
+    freq = evalFreqStr(freq, freq_check, datcol.index)
+    if datcol.empty:
+        data[field] = datcol
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+        return data, flagger
+    # account for annoying case of subsequent frequency aligned values, differing exactly by the margin
+    # 2*freq:
+    spec_case_mask = datcol.index.to_series()
+    spec_case_mask = spec_case_mask - spec_case_mask.shift(1)
+    spec_case_mask = spec_case_mask == 2 * pd.Timedelta(freq)
+    spec_case_mask = spec_case_mask[spec_case_mask]
+    spec_case_mask = spec_case_mask.resample(freq).asfreq().dropna()
+
+    if not spec_case_mask.empty:
+        spec_case_mask = spec_case_mask.tshift(-1, freq)
+
+    # prepare grid interpolation:
+    if grid_field is None:
+        grid_index = pd.date_range(start=datcol.index[0].floor(freq), end=datcol.index[-1].ceil(freq), freq=freq,
+                                   name=datcol.index.name)
+    else:
+        grid_index = data[grid_field].index
+
+
+    aligned_start = datcol.index[0] == grid_index[0]
+    aligned_end = datcol.index[-1] == grid_index[-1]
+    datcol = datcol.reindex(datcol.index.join(grid_index, how="outer",))
+
+    # do the interpolation
+    inter_data, chunk_bounds = interpolateNANs(
+        datcol, method, order=inter_order, inter_limit=inter_limit, downgrade_interpolation=downgrade_interpolation,
+        return_chunk_bounds=True
+    )
+
+    if grid_field is None:
+        # override falsely interpolated values:
+        inter_data[spec_case_mask.index] = np.nan
+
+    # store interpolated grid
+    inter_data = inter_data[grid_index]
+    data[field] = inter_data
+
+    # flags reshaping (dropping data drops):
+    flagscol.drop(flagscol[drop_mask].index, inplace=True)
+
+    if grid_field is not None:
+        # only basic flag propagation supported for custom grids (take worst from preceeding/succeeding)
+        preceeding = flagscol.reindex(grid_index, method='ffill', tolerance=freq)
+        succeeding = flagscol.reindex(grid_index, method='bfill', tolerance=freq)
+        # check for too big gaps in the source data and drop the values interpolated in those too big gaps
+        na_mask = preceeding.isna() | succeeding.isna()
+        na_mask = na_mask[na_mask]
+        preceeding.drop(na_mask.index, inplace=True)
+        succeeding.drop(na_mask.index, inplace=True)
+        inter_data.drop(na_mask.index, inplace=True)
+        data[field] = inter_data
+        mask = succeeding > preceeding
+        preceeding.loc[mask] = succeeding.loc[mask]
+        flagscol = preceeding
+        flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(flagger_new)
+        return data, flagger
+
+    # for freq defined grids, max-aggregate flags of every grid points freq-ranged surrounding
+    # hack ahead! Resampling with overlapping intervals:
+    # 1. -> no rolling over categories allowed in pandas, so we translate manually:
+    cats = pd.CategoricalIndex(flagger.dtype.categories, ordered=True)
+    cats_dict = {cats[i]: i for i in range(0, len(cats))}
+    flagscol = flagscol.replace(cats_dict)
+    # 3. -> combine resample+rolling to resample with overlapping intervals:
+    flagscol = flagscol.resample(freq).max()
+    initial = flagscol[0]
+    flagscol = flagscol.rolling(2, center=True, closed="neither").max()
+    flagscol[0] = initial
+    cats_dict = {num: key for (key, num) in cats_dict.items()}
+    flagscol = flagscol.astype(int, errors="ignore").replace(cats_dict)
+    flagscol[flagscol.isna()] = empty_intervals_flag
+    # ...hack done
+
+    # we might miss the flag for interpolated data grids last entry (if we miss it - the datapoint is always nan
+    # - just settling a convention here(resulting GRID should start BEFORE first valid data entry and range to AFTER
+    # last valid data)):
+    if inter_data.shape[0] > flagscol.shape[0]:
+        flagscol = flagscol.append(pd.Series(empty_intervals_flag, index=[datcol.index[-1]]))
+
+    # Additional consistency operation: we have to block first/last interpolated datas flags - since they very
+    # likely represent chunk starts/ends (except data start and or end timestamp were grid-aligned before Grid
+    # interpolation already.)
+    if np.isnan(inter_data[0]) and not aligned_start:
+        chunk_bounds = chunk_bounds.insert(0, inter_data.index[0])
+    if np.isnan(inter_data[-1]) and not aligned_end:
+        chunk_bounds = chunk_bounds.append(pd.DatetimeIndex([inter_data.index[-1]]))
+    chunk_bounds = chunk_bounds.unique()
+    flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+
+    # block chunk ends of interpolation
+    flags_to_block = pd.Series(np.nan, index=chunk_bounds).astype(flagger_new.dtype)
+    flagger_new = flagger_new.setFlags(field, loc=chunk_bounds, flag=flags_to_block, force=True, inplace=True)
+
+    flagger = flagger.slice(drop=field).merge(flagger_new, subset=[field], inplace=True)
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/modelling.py b/saqc/funcs/modelling.py
deleted file mode 100644
index 59f169c521583b41b83c5781741ae1efa5836f05..0000000000000000000000000000000000000000
--- a/saqc/funcs/modelling.py
+++ /dev/null
@@ -1,576 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import numpy as np
-import numba
-from saqc.core.register import register
-from saqc.lib.ts_operators import (
-    polyRoller,
-    polyRollerNoMissing,
-    polyRollerNumba,
-    polyRollerNoMissingNumba,
-    polyRollerIrregular,
-    count
-)
-from saqc.lib.tools import seasonalMask, customRoller
-import logging
-
-logger = logging.getLogger("SaQC")
-
-
-@register(masking='field')
-def modelling_polyFit(data, field, flagger, winsz, polydeg, numba="auto", eval_flags=True, min_periods=0, **kwargs):
-    """
-    Function fits a polynomial model to the data and returns the residues.
-
-    The residue for value x is calculated by fitting a polynomial of degree "polydeg" to a data slice
-    of size "winsz", wich has x at its center.
-
-    Note, that the residues will be stored to the `field` field of the input data, so that the original data, the
-    polynomial is fitted to, gets overridden.
-
-    Note, that, if data[field] is not alligned to an equidistant frequency grid, the window size passed,
-    has to be an offset string. Also numba boost options don`t apply for irregularly sampled
-    timeseries.
-
-    Note, that calculating the residues tends to be quite costy, because a function fitting is perfomed for every
-    sample. To improve performance, consider the following possibillities:
-
-    In case your data is sampled at an equidistant frequency grid:
-
-    (1) If you know your data to have no significant number of missing values, or if you do not want to
-        calculate residues for windows containing missing values any way, performance can be increased by setting
-        min_periods=winsz.
-
-    (2) If your data consists of more then around 200000 samples, setting numba=True, will boost the
-        calculations up to a factor of 5 (for samplesize > 300000) - however for lower sample sizes,
-        numba will slow down the calculations, also, up to a factor of 5, for sample_size < 50000.
-        By default (numba='auto'), numba is set to true, if the data sample size exceeds 200000.
-
-    in case your data is not sampled at an equidistant frequency grid:
-
-    (1) Harmonization/resampling of your data will have a noticable impact on polyfittings performance - since
-        numba_boost doesnt apply for irregularly sampled data in the current implementation.
-
-    Note, that in the current implementation, the initial and final winsz/2 values do not get fitted.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-modelled.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    winsz : {str, int}
-        The size of the window you want to use for fitting. If an integer is passed, the size
-        refers to the number of periods for every fitting window. If an offset string is passed,
-        the size refers to the total temporal extension. The window will be centered around the vaule-to-be-fitted.
-        For regularly sampled timeseries the period number will be casted down to an odd number if
-        even.
-    polydeg : int
-        The degree of the polynomial used for fitting
-    numba : {True, False, "auto"}, default "auto"
-        Wheather or not to apply numbas just-in-time compilation onto the poly fit function. This will noticably
-        increase the speed of calculation, if the sample size is sufficiently high.
-        If "auto" is selected, numba compatible fit functions get applied for data consisiting of > 200000 samples.
-    eval_flags : bool, default True
-        Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
-        flag present in the interval, the data for its calculation was obtained from.
-    min_periods : {int, np.nan}, default 0
-        The minimum number of periods, that has to be available in every values fitting surrounding for the polynomial
-        fit to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
-        regardless of the number of values present (results in overfitting for too sparse intervals). To automatically
-        set the minimum number of periods to the number of values in an offset defined window size, pass np.nan.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-
-    """
-    if data[field].empty:
-        return data, flagger
-    data = data.copy()
-    to_fit = data[field]
-    flags = flagger.getFlags(field)
-    if not to_fit.index.freqstr:
-        if isinstance(winsz, int):
-            raise NotImplementedError("Integer based window size is not supported for not-harmonized" "sample series.")
-        # get interval centers
-        centers = np.floor((to_fit.rolling(pd.Timedelta(winsz) / 2, closed="both", min_periods=min_periods).count()))
-        centers = centers.drop(centers[centers.isna()].index)
-        centers = centers.astype(int)
-        residues = to_fit.rolling(pd.Timedelta(winsz), closed="both", min_periods=min_periods).apply(
-            polyRollerIrregular, args=(centers, polydeg)
-        )
-
-        def center_func(x, y=centers):
-            pos = x.index[int(len(x) - y[x.index[-1]])]
-            return y.index.get_loc(pos)
-
-        centers_iloc = centers.rolling(winsz, closed="both").apply(center_func, raw=False).astype(int)
-        temp = residues.copy()
-        for k in centers_iloc.iteritems():
-            residues.iloc[k[1]] = temp[k[0]]
-        residues[residues.index[0] : residues.index[centers_iloc[0]]] = np.nan
-        residues[residues.index[centers_iloc[-1]] : residues.index[-1]] = np.nan
-    else:
-        if isinstance(winsz, str):
-            winsz = int(np.floor(pd.Timedelta(winsz) / pd.Timedelta(to_fit.index.freqstr)))
-        if winsz % 2 == 0:
-            winsz = int(winsz - 1)
-        if numba == "auto":
-            if to_fit.shape[0] < 200000:
-                numba = False
-            else:
-                numba = True
-
-        val_range = np.arange(0, winsz)
-        center_index = int(np.floor(winsz / 2))
-        if min_periods < winsz:
-            if min_periods > 0:
-                to_fit = to_fit.rolling(winsz, min_periods=min_periods, center=True).apply(
-                    lambda x, y: x[y], raw=True, args=(center_index,)
-                )
-
-            # we need a missing value marker that is not nan, because nan values dont get passed by pandas rolling
-            # method
-            miss_marker = to_fit.min()
-            miss_marker = np.floor(miss_marker - 1)
-            na_mask = to_fit.isna()
-            to_fit[na_mask] = miss_marker
-            if numba:
-                residues = to_fit.rolling(winsz).apply(
-                    polyRollerNumba,
-                    args=(miss_marker, val_range, center_index, polydeg),
-                    raw=True,
-                    engine="numba",
-                    engine_kwargs={"no_python": True},
-                )
-                # due to a tiny bug - rolling with center=True doesnt work when using numba engine.
-                residues = residues.shift(-int(center_index))
-            else:
-                residues = to_fit.rolling(winsz, center=True).apply(
-                    polyRoller, args=(miss_marker, val_range, center_index, polydeg), raw=True
-                )
-            residues[na_mask] = np.nan
-        else:
-            # we only fit fully populated intervals:
-            if numba:
-                residues = to_fit.rolling(winsz).apply(
-                    polyRollerNoMissingNumba,
-                    args=(val_range, center_index, polydeg),
-                    engine="numba",
-                    engine_kwargs={"no_python": True},
-                    raw=True,
-                )
-                # due to a tiny bug - rolling with center=True doesnt work when using numba engine.
-                residues = residues.shift(-int(center_index))
-            else:
-                residues = to_fit.rolling(winsz, center=True).apply(
-                    polyRollerNoMissing, args=(val_range, center_index, polydeg), raw=True
-                )
-
-    residues = residues - to_fit
-    data[field] = residues
-    if eval_flags:
-        num_cats, codes = flags.factorize()
-        num_cats = pd.Series(num_cats, index=flags.index).rolling(winsz, center=True, min_periods=min_periods).max()
-        nan_samples = num_cats[num_cats.isna()]
-        num_cats.drop(nan_samples.index, inplace=True)
-        to_flag = pd.Series(codes[num_cats.astype(int)], index=num_cats.index)
-        to_flag = to_flag.align(nan_samples)[0]
-        to_flag[nan_samples.index] = flags[nan_samples.index]
-        flagger = flagger.setFlags(field, to_flag.values, **kwargs)
-
-    return data, flagger
-
-
-@register(masking='field')
-def modelling_rollingMean(data, field, flagger, winsz, eval_flags=True, min_periods=0, center=True, **kwargs):
-    """
-    Models the data with the rolling mean and returns the residues.
-
-    Note, that the residues will be stored to the `field` field of the input data, so that the data that is modelled
-    gets overridden.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-modelled.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    winsz : {int, str}
-        The size of the window you want to roll with. If an integer is passed, the size
-        refers to the number of periods for every fitting window. If an offset string is passed,
-        the size refers to the total temporal extension.
-        For regularly sampled timeseries, the period number will be casted down to an odd number if
-        center = True.
-    eval_flags : bool, default True
-        Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
-        flag present in the interval, the data for its calculation was obtained from.
-        Currently not implemented in combination with not-harmonized timeseries.
-    min_periods : int, default 0
-        The minimum number of periods, that has to be available in every values fitting surrounding for the mean
-        fitting to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
-        regardless of the number of values present.
-    center : bool, default True
-        Wheather or not to center the window the mean is calculated of around the reference value. If False,
-        the reference value is placed to the right of the window (classic rolling mean with lag.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    data = data.copy()
-    to_fit = data[field]
-    flags = flagger.getFlags(field)
-    if to_fit.empty:
-        return data, flagger
-
-    # starting with the annoying case: finding the rolling interval centers of not-harmonized input time series:
-    if (to_fit.index.freqstr is None) and center:
-        if isinstance(winsz, int):
-            raise NotImplementedError(
-                "Integer based window size is not supported for not-harmonized"
-                'sample series when rolling with "center=True".'
-            )
-        # get interval centers
-        centers = np.floor((to_fit.rolling(pd.Timedelta(winsz) / 2, closed="both", min_periods=min_periods).count()))
-        centers = centers.drop(centers[centers.isna()].index)
-        centers = centers.astype(int)
-        means = to_fit.rolling(pd.Timedelta(winsz), closed="both", min_periods=min_periods).mean()
-
-        def center_func(x, y=centers):
-            pos = x.index[int(len(x) - y[x.index[-1]])]
-            return y.index.get_loc(pos)
-
-        centers_iloc = centers.rolling(winsz, closed="both").apply(center_func, raw=False).astype(int)
-        temp = means.copy()
-        for k in centers_iloc.iteritems():
-            means.iloc[k[1]] = temp[k[0]]
-        # last values are false, due to structural reasons:
-        means[means.index[centers_iloc[-1]] : means.index[-1]] = np.nan
-
-    # everything is more easy if data[field] is harmonized:
-    else:
-        if isinstance(winsz, str):
-            winsz = int(np.floor(pd.Timedelta(winsz) / pd.Timedelta(to_fit.index.freqstr)))
-        if (winsz % 2 == 0) & center:
-            winsz = int(winsz - 1)
-
-        means = to_fit.rolling(window=winsz, center=center, closed="both").mean()
-
-    residues = means - to_fit
-    data[field] = residues
-    if eval_flags:
-        num_cats, codes = flags.factorize()
-        num_cats = pd.Series(num_cats, index=flags.index).rolling(winsz, center=True, min_periods=min_periods).max()
-        nan_samples = num_cats[num_cats.isna()]
-        num_cats.drop(nan_samples.index, inplace=True)
-        to_flag = pd.Series(codes[num_cats.astype(int)], index=num_cats.index)
-        to_flag = to_flag.align(nan_samples)[0]
-        to_flag[nan_samples.index] = flags[nan_samples.index]
-        flagger = flagger.setFlags(field, to_flag.values, **kwargs)
-
-    return data, flagger
-
-
-def modelling_mask(data, field, flagger, mode, mask_var=None, season_start=None, season_end=None,
-                   include_bounds=True):
-    """
-    This function realizes masking within saqc.
-
-    Due to some inner saqc mechanics, it is not straight forwardly possible to exclude
-    values or datachunks from flagging routines. This function replaces flags with np.nan
-    value, wherever values are to get masked. Furthermore, the masked values get replaced by
-    np.nan, so that they dont effect calculations.
-
-    Here comes a recipe on how to apply a flagging function only on a masked chunk of the variable field:
-
-    1. dublicate "field" in the input data (proc_fork)
-    2. mask the dublicated data (modelling_mask)
-    3. apply the tests you only want to be applied onto the masked data chunks (saqc_tests)
-    4. project the flags, calculated on the dublicated and masked data onto the original field data
-        (proc_projectFlags or flagGeneric)
-    5. drop the dublicated data (proc_drop)
-
-    To see an implemented example, checkout flagSeasonalRange in the saqc.functions module
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-masked.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    mode : {"seasonal", "mask_var"}
-        The masking mode.
-        - "seasonal": parameters "season_start", "season_end" are evaluated to generate a seasonal (periodical) mask
-        - "mask_var": data[mask_var] is expected to be a boolean valued timeseries and is used as mask.
-    mask_var : {None, str}, default None
-        Only effective if mode == "mask_var"
-        Fieldname of the column, holding the data that is to be used as mask. (must be moolean series)
-        Neither the series` length nor its labels have to match data[field]`s index and length. An inner join of the
-        indices will be calculated and values get masked where the values of the inner join are "True".
-    season_start : {None, str}, default None
-        Only effective if mode == "seasonal"
-        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
-        Has to be of same length as `season_end` parameter.
-        See examples section below for some examples.
-    season_end : {None, str}, default None
-        Only effective if mode == "seasonal"
-        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
-        Has to be of same length as `season_end` parameter.
-        See examples section below for some examples.
-    include_bounds : boolean
-        Wheather or not to include the mask defining bounds to the mask.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-
-
-    Examples
-    --------
-    The `season_start` and `season_end` parameters provide a conveniant way to generate seasonal / date-periodic masks.
-    They have to be strings of the forms: "mm-ddTHH:MM:SS", "ddTHH:MM:SS" , "HH:MM:SS", "MM:SS" or "SS"
-    (mm=month, dd=day, HH=hour, MM=minute, SS=second)
-    Single digit specifications have to be given with leading zeros.
-    `season_start` and `seas   on_end` strings have to be of same length (refer to the same periodicity)
-    The highest date unit gives the period.
-    For example:
-
-    >>> season_start = "01T15:00:00"
-    >>> season_end = "13T17:30:00"
-
-    Will result in all values sampled between 15:00 at the first and  17:30 at the 13th of every month get masked
-
-    >>> season_start = "01:00"
-    >>> season_end = "04:00"
-
-    All the values between the first and 4th minute of every hour get masked.
-
-    >>> season_start = "01-01T00:00:00"
-    >>> season_end = "01-03T00:00:00"
-
-    Mask january and february of evcomprosed in theery year. masking is inclusive always, so in this case the mask will
-    include 00:00:00 at the first of march. To exclude this one, pass:
-
-    >>> season_start = "01-01T00:00:00"
-    >>> season_end = "02-28T23:59:59"
-
-    To mask intervals that lap over a seasons frame, like nights, or winter, exchange sequence of season start and
-    season end. For example, to mask night hours between 22:00:00 in the evening and 06:00:00 in the morning, pass:
-
-    >>> season_start = "22:00:00"
-    >>> season_end = "06:00:00"
-
-    When inclusive_selection="season", all above examples work the same way, only that you now
-    determine wich values NOT TO mask (=wich values are to constitute the "seasons").
-    """
-    data = data.copy()
-    datcol_idx = data[field].index
-
-    if mode == 'seasonal':
-        to_mask = seasonalMask(datcol_idx, season_start, season_end, include_bounds)
-    elif mode == 'mask_var':
-        idx = data[mask_var].index.intersection(datcol_idx)
-        to_mask = data.loc[idx, mask_var]
-    else:
-        raise ValueError("Keyword passed as masking mode is unknown ({})!".format(mode))
-
-    data.aloc[to_mask, field] = np.nan
-    flagger = flagger.setFlags(field, loc=to_mask, flag=np.nan, force=True)
-
-    return data, flagger
-
-
-@numba.jit(parallel=True, nopython=True)
-def _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
-    stat_arr = np.zeros(num_val)
-    thresh_arr = np.zeros(num_val)
-    for win_i in numba.prange(0, num_val-1):
-        x = data_arr[bwd_start[win_i]:split[win_i]]
-        y = data_arr[split[win_i]:fwd_end[win_i]]
-        stat_arr[win_i] = stat_func(x, y)
-        thresh_arr[win_i] = thresh_func(x, y)
-    return stat_arr, thresh_arr
-
-
-def _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func, num_val):
-    stat_arr = np.zeros(num_val)
-    thresh_arr = np.zeros(num_val)
-    for win_i in range(0, num_val-1):
-        x = data_arr[bwd_start[win_i]:split[win_i]]
-        y = data_arr[split[win_i]:fwd_end[win_i]]
-        stat_arr[win_i] = stat_func(x, y)
-        thresh_arr[win_i] = thresh_func(x, y)
-    return stat_arr, thresh_arr
-
-
-def _reduceCPCluster(stat_arr, thresh_arr, start, end, obj_func, num_val):
-    out_arr = np.zeros(shape=num_val, dtype=bool)
-    for win_i in numba.prange(0, num_val):
-        s, e = start[win_i], end[win_i]
-        x = stat_arr[s:e]
-        y = thresh_arr[s:e]
-        pos = s + obj_func(x, y) + 1
-        out_arr[s:e] = False
-        out_arr[pos] = True
-    return out_arr
-
-
-@register(masking='field')
-def modelling_changePointCluster(data, field, flagger, stat_func, thresh_func, bwd_window, min_periods_bwd,
-                                 fwd_window=None, min_periods_fwd=None, closed='both', try_to_jit=True,
-                                 reduce_window=None, reduce_func=lambda x, y: x.argmax(), flag_changepoints=False,
-                                 model_by_resids=False, **kwargs):
-    """
-    Assigns label to the data, aiming to reflect continous regimes of the processes the data is assumed to be
-    generated by.
-    The regime change points detection is based on a sliding window search.
-
-    Note, that the cluster labels will be stored to the `field` field of the input data, so that the data that is
-    clustered gets overridden.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The reference variable, the deviation from wich determines the flagging.
-    flagger : saqc.flagger
-        A flagger object, holding flags and additional informations related to `data`.
-    stat_func : Callable[numpy.array, numpy.array]
-        A function that assigns a value to every twin window. Left window content will be passed to first variable,
-        right window content will be passed to the second.
-    thresh_func : Callable[numpy.array, numpy.array]
-        A function that determines the value level, exceeding wich qualifies a timestamps stat func value as denoting a
-        changepoint.
-    bwd_window : str
-        The left (backwards facing) windows temporal extension (freq-string).
-    min_periods_bwd : {str, int}
-        Minimum number of periods that have to be present in a backwards facing window, for a changepoint test to be
-        performed.
-    fwd_window : {Non/home/luenensc/PyPojects/testSpace/flagBasicMystery.pye, str}, default None
-        The right (fo/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyrward facing) windows temporal extension (freq-string).
-    min_periods_fwd : {None, str, int}, default None
-        Minimum numbe/home/luenensc/PyPojects/testSpace/flagBasicMystery.pyr of periods that have to be present in a forward facing window, for a changepoint test to be
-        performed.
-    closed : {'right', 'left', 'both', 'neither'}, default 'both'
-        Determines the closure of the sliding windows.
-    reduce_window : {None, False, str}, default None
-        The sliding window search method is not an exact CP search method and usually there wont be
-        detected a single changepoint, but a "region" of change around a changepoint.
-        If `agg_range` is not False, for every window of size `agg_range`, there
-        will be selected the value with index `reduce_func(x, y)` and the others will be dropped.
-        If `reduce_window` is None, the reduction window size equals the
-        twin window size the changepoints have been detected with.
-    reduce_func : Callable[numpy.array, numpy.array], default lambda x, y: x.argmax()
-        A function that must return an index value upon input of two arrays x and y.
-        First input parameter will hold the result from the stat_func evaluation for every
-        reduction window. Second input parameter holds the result from the thresh_func evaluation.
-        The default reduction function just selects the value that maximizes the stat_func.
-    flag_changepoints : bool, default False
-        If true, the points, where there is a change in data modelling regime detected get flagged bad.
-    model_by_resids _ bool, default False
-        If True, the data is replaced by the stat_funcs results instead of regime labels.
-
-    Returns
-    -------
-
-    """
-    data = data.copy()
-    data_ser = data[field].dropna()
-    center = False
-    var_len = data_ser.shape[0]
-    if fwd_window is None:
-        fwd_window = bwd_window
-    if min_periods_fwd is None:
-        min_periods_fwd = min_periods_bwd
-    if reduce_window is None:
-        reduce_window = f"{int(pd.Timedelta(bwd_window).total_seconds() + pd.Timedelta(fwd_window).total_seconds())}s"
-
-    roller = customRoller(data_ser, window=bwd_window)
-    bwd_start, bwd_end = roller.window.get_window_bounds(len(data_ser), min_periods=min_periods_bwd, closed=closed)
-
-    roller = customRoller(data_ser, window=fwd_window, forward=True)
-    fwd_start, fwd_end = roller.window.get_window_bounds(len(data_ser), min_periods=min_periods_fwd, closed=closed)
-
-    min_mask = ~((fwd_end - fwd_start <= min_periods_fwd) | (bwd_end - bwd_start <= min_periods_bwd))
-    fwd_end = fwd_end[min_mask]
-    split = bwd_end[min_mask]
-    bwd_start = bwd_start[min_mask]
-    masked_index = data_ser.index[min_mask]
-    check_len = len(fwd_end)
-    data_arr = data_ser.values
-
-    if try_to_jit:
-        jit_sf = numba.jit(stat_func, nopython=True)
-        jit_tf = numba.jit(thresh_func, nopython=True)
-        try:
-            jit_sf(data_arr[bwd_start[0]:bwd_end[0]], data_arr[fwd_start[0]:fwd_end[0]])
-            jit_tf(data_arr[bwd_start[0]:bwd_end[0]], data_arr[fwd_start[0]:fwd_end[0]])
-            stat_func = jit_sf
-            thresh_func = jit_tf
-            try_to_jit = True
-        except numba.core.errors.TypingError:
-            try_to_jit = False
-            logging.warning('Could not jit passed statistic - omitting jitting!')
-
-    if try_to_jit:
-        stat_arr, thresh_arr = _slidingWindowSearchNumba(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func,
-                                                    check_len)
-    else:
-        stat_arr, thresh_arr = _slidingWindowSearch(data_arr, bwd_start, fwd_end, split, stat_func, thresh_func,
-                                                    check_len)
-    result_arr = stat_arr > thresh_arr
-
-    if model_by_resids:
-        residues = pd.Series(np.nan, index=data[field].index)
-        residues[masked_index] = stat_arr
-        data[field] = residues
-        flagger = flagger.setFlags(field, flag=flagger.UNFLAGGED, force=True, **kwargs)
-        return data, flagger
-
-    det_index = masked_index[result_arr]
-    detected = pd.Series(True, index=det_index)
-    if reduce_window is not False:
-        l = detected.shape[0]
-        roller = customRoller(detected, window=reduce_window)
-        start, end = roller.window.get_window_bounds(num_values=l, min_periods=1, closed='both', center=True)
-
-        detected = _reduceCPCluster(stat_arr[result_arr], thresh_arr[result_arr], start, end, reduce_func, l)
-        det_index = det_index[detected]
-
-    cluster = pd.Series(False, index=data[field].index)
-    cluster[det_index] = True
-    cluster = cluster.cumsum()
-    # (better to start cluster labels with number one)
-    cluster += 1
-    data[field] = cluster
-    flagger = flagger.setFlags(field, flag=flagger.UNFLAGGED, force=True, **kwargs)
-    if flag_changepoints:
-        flagger = flagger.setFlags(field, loc=det_index)
-    return data, flagger
diff --git a/saqc/funcs/spikes_detection.py b/saqc/funcs/outliers.py
similarity index 65%
rename from saqc/funcs/spikes_detection.py
rename to saqc/funcs/outliers.py
index 39e34bf1fa6a44e70c0bb395a9922090cae104c5..6a4cc1ebbd55f78a0ab80849a9c14344c1835b44 100644
--- a/saqc/funcs/spikes_detection.py
+++ b/saqc/funcs/outliers.py
@@ -1,114 +1,98 @@
 #! /usr/bin/env python
 # -*- coding: utf-8 -*-
 
-
+import saqc.lib.ts_operators as ts_ops
 import numpy as np
 import pandas as pd
-from scipy.signal import savgol_filter
-from scipy.stats import zscore
+
 from scipy.optimize import curve_fit
 from saqc.core.register import register
 import numpy.polynomial.polynomial as poly
 import numba
-import saqc.lib.ts_operators as ts_ops
 from saqc.lib.tools import (
-    retrieveTrustworthyOriginal,
-    offset2seconds,
-    slidingWindowIndices,
-    findIndex,
-    toSequence,
-    customRoller
+    customRoller,
+    findIndex
 )
+from saqc.funcs.scores import assignKNNScore
 from outliers import smirnov_grubbs
 
-def _stray(
-    val_frame,
+
+@register(masking='field')
+def flagByStray(
+    data,
+    field,
+    flagger,
     partition_freq=None,
     partition_min=11,
-    scoring_method="kNNMaxGap",
-    n_neighbors=10,
     iter_start=0.5,
     alpha=0.05,
-    trafo=lambda x: x
-
+    **kwargs
 ):
     """
-    Find outliers in multi dimensional observations.
+    Flag outliers in 1-dimensional (score) data with the STRAY Algorithm.
 
-    The general idea is to assigning scores to every observation based on the observations neighborhood in the space
-    of observations. Then, the gaps between the (greatest) scores are tested for beeing drawn from the same
-    distribution, as the majority of the scores.
-
-    See the References section for a link to a detailed description of the algorithm.
-
-    Note, that the flagging result depends on the size of the partition under test and the distribution of the outliers
-    in it. For "normalish" and/or slightly "erratic" datasets, 5000 - 10000, periods turned out to be a good guess.
-
-    Note, that no normalizations/transformations are applied to the different components (data columns)
-    - those are expected to be applied previously, if necessary.
+    Find more information on the algorithm in References [1].
 
     Parameters
     ----------
-    val_frame : (N,M) ndarray
-        Input NxM array of observations, where N is the number of observations and M the number of components per
-        observation.
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
     partition_freq : {None, str, int}, default None
-        Determines the size of the data partitions, the data is decomposed into. Each partition is checked seperately
-        for outliers. If a String is passed, it has to be an offset string and it results in partitioning the data into
-        parts of according temporal length. If an integer is passed, the data is simply split up into continous chunks
-        of `partition_freq` periods. if ``None`` is passed (default), all the data will be tested in one run.
+        partition_freq : {np.inf, float, str}, default np.inf
+        Determines the segmentation of the data into partitions, the kNN algorithm is
+        applied onto individually.
+
+        * ``np.inf``: Apply Scoring on whole data set at once
+        * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
+        * Offset String : Apply scoring on successive partitions of temporal extension matching the passed offset
+          string
+
     partition_min : int, default 0
         Minimum number of periods per partition that have to be present for a valid outlier dettection to be made in
         this partition. (Only of effect, if `partition_freq` is an integer.) Partition min value must always be
         greater then the nn_neighbors value.
-    scoring_method : {'kNNSum', 'kNNMaxGap'}, default 'kNNMaxGap'
-        Scoring method applied.
-        `'kNNSum'`: Assign to every point the sum of the distances to its 'n_neighbors' nearest neighbors.
-        `'kNNMaxGap'`: Assign to every point the distance to the neighbor with the "maximum gap" to its predecessor
-        in the hierarchy of the `n_neighbors` nearest neighbors. (see reference section for further descriptions)
-    n_neighbors : int, default 10
-        Number of neighbors included in the scoring process for every datapoint.
     iter_start : float, default 0.5
         Float in [0,1] that determines which percentage of data is considered "normal". 0.5 results in the stray
         algorithm to search only the upper 50 % of the scores for the cut off point. (See reference section for more
         information)
     alpha : float, default 0.05
-        Niveau of significance by which it is tested, if a score might be drawn from another distribution, than the
+        Level of significance by which it is tested, if a score might be drawn from another distribution, than the
         majority of the data.
 
     References
     ----------
-    Detailed description of the Stray algorithm is covered here:
-
     [1] Talagala, P. D., Hyndman, R. J., & Smith-Miles, K. (2019). Anomaly detection in high dimensional data.
         arXiv preprint arXiv:1908.04000.
     """
 
-    kNNfunc = getattr(ts_ops, scoring_method)
-    # partitioning
+    scores = data[field]
+    scores.dropna(inplace=True)
+    if scores.empty:
+        return data, flagger
+
     if not partition_freq:
-        partition_freq = val_frame.shape[0]
+        partition_freq = scores.shape[0]
 
     if isinstance(partition_freq, str):
-        partitions = val_frame.groupby(pd.Grouper(freq=partition_freq))
+        partitions = scores.groupby(pd.Grouper(freq=partition_freq))
     else:
-        grouper_series = pd.Series(data=np.arange(0, val_frame.shape[0]), index=val_frame.index)
+        grouper_series = pd.Series(data=np.arange(0, scores.shape[0]), index=scores.index)
         grouper_series = grouper_series.transform(lambda x: int(np.floor(x / partition_freq)))
-        partitions = val_frame.groupby(grouper_series)
+        partitions = scores.groupby(grouper_series)
 
     # calculate flags for every partition
     to_flag = []
     for _, partition in partitions:
         if partition.empty | (partition.shape[0] < partition_min):
             continue
-        partition = partition.apply(trafo)
         sample_size = partition.shape[0]
-        nn_neighbors = min(n_neighbors, max(sample_size, 2))
-        resids = kNNfunc(partition.values, n_neighbors=nn_neighbors - 1, algorithm="ball_tree")
-        sorted_i = resids.argsort()
-        resids = resids[sorted_i]
+        sorted_i = partition.values.argsort()
+        resids = partition.values[sorted_i]
         gaps = np.append(0, np.diff(resids))
-
         tail_size = int(max(min(50, np.floor(sample_size / 4)), 2))
         tail_indices = np.arange(2, tail_size + 1)
         i_start = int(max(np.floor(sample_size * iter_start), 1) + 1)
@@ -117,13 +101,110 @@ def _stray(
             ghat[i] = sum((tail_indices / (tail_size - 1)) * gaps[i - tail_indices + 1])
 
         log_alpha = np.log(1 / alpha)
+        trigger_flagging = False
         for iter_index in range(i_start - 1, sample_size):
             if gaps[iter_index] > log_alpha * ghat[iter_index]:
+                trigger_flagging = True
                 break
 
-        to_flag = np.append(to_flag, list(partition.index[sorted_i[iter_index:]]))
+        if trigger_flagging:
+            flagger = flagger.setFlags(field, loc=partition.index[sorted_i[iter_index:]])
 
-    return to_flag
+    return data, flagger
+
+
+def _evalStrayLabels(
+    data, field, flagger, fields, reduction_range, reduction_drop_flagged=False, reduction_thresh=3.5,
+        reduction_min_periods=1, at_least_one=True
+):
+    """
+    The function "reduces" an observations flag to components of it, by applying MAD (See references)
+    test onto every components temporal surrounding.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the labels to be evaluated.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    fields : list[str]
+        A list of strings, holding the column names of the variables, the stray labels shall be
+        projected onto.
+    val_frame : (N,M) pd.DataFrame
+        Input NxM DataFrame of observations, where N is the number of observations and M the number of components per
+        observation.
+    to_flag_frame : pandas.DataFrame
+        Input dataframe of observations to be tested, where N is the number of observations and M the number
+        of components per observation.
+    reduction_range : {None, str}
+        An offset string, denoting the range of the temporal surrounding to include into the MAD testing.
+        If ``None`` is passed, no testing will be performed and all fields will have the stray flag projected.
+    reduction_drop_flagged : bool, default False
+        Wheather or not to drop flagged values other than the value under test, from the temporal surrounding
+        before checking the value with MAD.
+    reduction_thresh : float, default 3.5
+        The `critical` value, controlling wheather the MAD score is considered referring to an outlier or not.
+        Higher values result in less rigid flagging. The default value is widely used in the literature. See references
+        section for more details ([1]).
+    at_least_one : bool, default True
+        If none of the variables, the outlier label shall be reduced to, is an outlier with regard
+        to the test, all (True) or none (False) of the variables are flagged outliers
+
+    References
+    ----------
+    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
+    """
+
+    val_frame = data[fields].to_df()
+    stray_detects = flagger.isFlagged()[field]
+    stray_detects = stray_detects[stray_detects]
+    to_flag_frame = pd.DataFrame(False, columns=fields, index=stray_detects.index)
+    to_flag_index = to_flag_frame.index
+    if reduction_range is None:
+        for field in to_flag_frame.columns:
+            flagger = flagger.setFlags(field, loc=to_flag_index)
+        return data, flagger
+
+    for var in fields:
+        for index in enumerate(to_flag_index):
+            index_slice = slice(index[1] - pd.Timedelta(reduction_range), index[1] + pd.Timedelta(reduction_range))
+
+            test_slice = val_frame[var][index_slice].dropna()
+            # check, wheather value under test is sufficiently centered:
+            first_valid = test_slice.first_valid_index()
+            last_valid = test_slice.last_valid_index()
+            min_range = pd.Timedelta(reduction_range)/4
+            polydeg = 2
+            if ((pd.Timedelta(index[1] - first_valid) < min_range) |
+                (pd.Timedelta(last_valid - index[1]) < min_range)):
+                polydeg = 0
+            if reduction_drop_flagged:
+                test_slice = test_slice.drop(to_flag_index, errors='ignore')
+            if test_slice.shape[0] >= reduction_min_periods:
+                x = (test_slice.index.values.astype(float))
+                x_0 = x[0]
+                x = (x - x_0)/10**12
+                polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
+                testval = poly.polyval((float(index[1].to_numpy()) - x_0)/10**12, polyfitted)
+                testval = val_frame[var][index[1]] - testval
+                resids = test_slice.values - poly.polyval(x, polyfitted)
+                med_resids = np.median(resids)
+                MAD = np.median(np.abs(resids - med_resids))
+                crit_val = 0.6745 * (abs(med_resids - testval)) / MAD
+                if crit_val > reduction_thresh:
+                    to_flag_frame.loc[index[1], var] = True
+            else:
+                to_flag_frame.loc[index[1], var] = True
+
+    if at_least_one:
+        to_flag_frame[~to_flag_frame.any(axis=1)] = True
+
+    for field in to_flag_frame.columns:
+        flagger = flagger.setFlags(field, loc=to_flag_frame[field][to_flag_frame[field]].index)
+
+    return data, flagger
 
 
 def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.5, alpha=0.05, bin_frac=10):
@@ -237,94 +318,20 @@ def _expFit(val_frame, scoring_method="kNNMaxGap", n_neighbors=10, iter_start=0.
     return val_frame.index[sorted_i[iter_index:]]
 
 
-def _reduceMVflags(
-    val_frame, fields, flagger, to_flag_frame, reduction_range, reduction_drop_flagged=False, reduction_thresh=3.5,
-        reduction_min_periods=1
-):
-    """
-    Function called by "spikes_flagMultivarScores" to reduce the number of false positives that result from
-    the algorithms confinement to only flag complete observations (all of its variables/components).
-
-    The function "reduces" an observations flag to components of it, by applying MAD (See references)
-    test onto every components temporal surrounding.
-
-    Parameters
-    ----------
-    val_frame : (N,M) pd.DataFrame
-        Input NxM DataFrame of observations, where N is the number of observations and M the number of components per
-        observation.
-    fields : str
-        Fieldnames of the components in `val_frame` that are to be tested for outlierishnes.
-    to_flag_frame : (K,M) pd.DataFrame
-        Input dataframe of observations to be tested, where N is the number of observations and M the number
-        of components per observation.
-    reduction_range : str
-        An offset string, denoting the range of the temporal surrounding to include into the MAD testing.
-    reduction_drop_flagged : bool, default False
-        Wheather or not to drop flagged values other than the value under test, from the temporal surrounding
-        before checking the value with MAD.
-    reduction_thresh : float, default 3.5
-        The `critical` value, controlling wheather the MAD score is considered referring to an outlier or not.
-        Higher values result in less rigid flagging. The default value is widely used in the literature. See references
-        section for more details ([1]).
-
-    References
-    ----------
-    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-    """
-
-    to_flag_frame[:] = False
-    to_flag_index = to_flag_frame.index
-    for var in fields:
-        for index in enumerate(to_flag_index):
-            index_slice = slice(index[1] - pd.Timedelta(reduction_range), index[1] + pd.Timedelta(reduction_range))
-
-            test_slice = val_frame[var][index_slice].dropna()
-            # check, wheather value under test is sufficiently centered:
-            first_valid = test_slice.first_valid_index()
-            last_valid = test_slice.last_valid_index()
-            min_range = pd.Timedelta(reduction_range)/4
-            polydeg = 2
-            if ((pd.Timedelta(index[1] - first_valid) < min_range) |
-                (pd.Timedelta(last_valid - index[1]) < min_range)):
-                polydeg = 0
-            if reduction_drop_flagged:
-                test_slice = test_slice.drop(to_flag_index, errors='ignore')
-            if test_slice.shape[0] >= reduction_min_periods:
-                x = (test_slice.index.values.astype(float))
-                x_0 = x[0]
-                x = (x - x_0)/10**12
-                polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
-                testval = poly.polyval((float(index[1].to_numpy()) - x_0)/10**12, polyfitted)
-                testval = val_frame[var][index[1]] - testval
-                resids = test_slice.values - poly.polyval(x, polyfitted)
-                med_resids = np.median(resids)
-                MAD = np.median(np.abs(resids - med_resids))
-                crit_val = 0.6745 * (abs(med_resids - testval)) / MAD
-                if crit_val > reduction_thresh:
-                    to_flag_frame.loc[index[1], var] = True
-            else:
-                to_flag_frame.loc[index[1], var] = True
-
-    return to_flag_frame
-
-
 @register(masking='all')
-def spikes_flagMultivarScores(
+def flagMVScores(
     data,
     field,
     flagger,
     fields,
-    trafo=np.log,
+    trafo=lambda x: x,
     alpha=0.05,
     n_neighbors=10,
-    scoring_method="kNNMaxGap",
+    scoring_func=np.sum,
     iter_start=0.5,
-    threshing="stray",
-    expfit_binning="auto",
-    stray_partition=None,
+    stray_partition=np.inf,
     stray_partition_min=11,
-    post_reduction=False,
+    trafo_on_partition=True,
     reduction_range=None,
     reduction_drop_flagged=False,
     reduction_thresh=3.5,
@@ -349,32 +356,20 @@ def spikes_flagMultivarScores(
         A flagger object, holding flags and additional Informations related to `data`.
     fields : List[str]
         List of fieldnames, corresponding to the variables that are to be included into the flagging process.
-    trafo : callable, default np.log
+    trafo : callable, default lambda x:x
         Transformation to be applied onto every column before scoring. Will likely get deprecated soon. Its better
-        to transform the data in a processing step, preceeeding the call to ``flagMultivarScores``.
+        to transform the data in a processing step, preceeeding the call to ``flagMVScores``.
     alpha : float, default 0.05
         Level of significance by which it is tested, if an observations score might be drawn from another distribution
         than the majority of the observation.
     n_neighbors : int, default 10
         Number of neighbors included in the scoring process for every datapoint.
-    scoring_method : {'kNNSum', 'kNNMaxGap'}, default 'kNNMaxGap'
-        Scoring method applied.
-        ``'kNNSum'``: Assign to every point the sum of the distances to its 'n_neighbors' nearest neighbors.
-        ``'kNNMaxGap'``: Assign to every point the distance to the neighbor with the "maximum gap" to its predecessor
-        in the hierarchy of the `n_neighbors` nearest neighbors. (see reference section for further descriptions)
+    scoring_func : Callable[numpy.array, float], default np.sum
+        The function that maps the set of every points k-nearest neighbor distances onto a certain scoring.
     iter_start : float, default 0.5
         Float in [0,1] that determines which percentage of data is considered "normal". 0.5 results in the threshing
         algorithm to search only the upper 50 % of the scores for the cut off point. (See reference section for more
         information)
-    threshing : {'stray', 'expfit'}, default 'stray'
-        A string, denoting the threshing algorithm to be applied on the observations scores.
-        See the documentations of the algorithms (``_stray``, ``_expfit``) and/or the references sections paragraph [2]
-        for more informations on the algorithms.
-    expfit_binning : {int, str}, default 'auto'
-        Controls the binning for the histogram in the ``expfit`` algorithms fitting step.
-        If an integer is passed, the residues will equidistantly be covered by `bin_frac` bins, ranging from the
-        minimum to the maximum of the residues. If a string is passed, it will be passed on to the
-        ``numpy.histogram_bin_edges`` method.
     stray_partition : {None, str, int}, default None
         Only effective when `threshing` = 'stray'.
         Determines the size of the data partitions, the data is decomposed into. Each partition is checked seperately
@@ -385,23 +380,25 @@ def spikes_flagMultivarScores(
         Only effective when `threshing` = 'stray'.
         Minimum number of periods per partition that have to be present for a valid outlier detection to be made in
         this partition. (Only of effect, if `stray_partition` is an integer.)
-    post_reduction : bool, default False
-        Wheather or not it should be tried to reduce the flag of an observation to one or more of its components. See
-        documentation of `_reduceMVflags` for more details.
+    trafo_on_partition : bool, default True
+        Whether or not to apply the passed transformation on every partition the algorithm is applied on, separately.
     reduction_range : {None, str}, default None
-        Only effective when `post_reduction` = True
+        If not None, it is tried to reduce the stray result onto single outlier components of the input fields.
         An offset string, denoting the range of the temporal surrounding to include into the MAD testing while trying
         to reduce flags.
     reduction_drop_flagged : bool, default False
-        Only effective when `post_reduction` = True
-        Wheather or not to drop flagged values other than the value under test from the temporal surrounding
+        Only effective when `reduction_range` is not ``None``.
+        Whether or not to drop flagged values other than the value under test from the temporal surrounding
         before checking the value with MAD.
     reduction_thresh : float, default 3.5
-        Only effective when `post_reduction` = True
+        Only effective when `reduction_range` is not ``None``.
         The `critical` value, controlling wheather the MAD score is considered referring to an outlier or not.
         Higher values result in less rigid flagging. The default value is widely considered apropriate in the
         literature.
-
+    reduction_min_periods : int, 1
+        Only effective when `reduction_range` is not ``None``.
+        Minimum number of meassurements necessarily present in a reduction interval for reduction actually to be
+        performed.
 
     Returns
     -------
@@ -442,74 +439,28 @@ def spikes_flagMultivarScores(
     the observation belonging to this gap, and all the observations belonging to gaps larger then this gap, get flagged
     outliers. See description of the `threshing` parameter for more details. Although [2] gives a fully detailed
     overview over the `stray` algorithm.
-
-    References
-    ----------
-    Odd Water Algorithm:
-
-    [1] Talagala, P.D. et al (2019): A Feature-Based Procedure for Detecting Technical Outliers in Water-Quality Data
-        From In Situ Sensors. Water Ressources Research, 55(11), 8547-8568.
-
-    A detailed description of the stray algorithm:
-
-    [2] Talagala, P. D., Hyndman, R. J., & Smith-Miles, K. (2019). Anomaly detection in high dimensional data.
-        arXiv preprint arXiv:1908.04000.
-
-    A detailed description of the MAD outlier scoring:
-
-    [3] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
     """
-
-    # data fransformation/extraction
-    data = data.copy()
-    fields = toSequence(fields)
-    val_frame = data[fields]
-    val_frame = val_frame.loc[val_frame.index_of("shared")].to_df()
-    val_frame.dropna(inplace=True)
-    val_frame = val_frame.apply(trafo)
-
-    if val_frame.empty:
-        return data, flagger
-
-    if threshing == "stray":
-        to_flag_index = _stray(
-            val_frame,
-            partition_freq=stray_partition,
-            partition_min=stray_partition_min,
-            scoring_method=scoring_method,
-            n_neighbors=n_neighbors,
-            iter_start=iter_start,
-            alpha=alpha
-        )
-
-    else:
-        val_frame = val_frame.apply(trafo)
-        to_flag_index = _expFit(val_frame,
-                                scoring_method=scoring_method,
-                                n_neighbors=n_neighbors,
+    data, flagger = assignKNNScore(data, 'dummy', flagger, fields, n_neighbors=n_neighbors, trafo=trafo,
+                                   trafo_on_partition=trafo_on_partition, scoring_func=scoring_func,
+                                   target_field='kNN_scores', partition_freq=stray_partition,
+                                   kNN_algorithm='ball_tree', partition_min=stray_partition_min, **kwargs)
+
+    data, flagger = flagByStray(data, 'kNN_scores', flagger,
+                                partition_freq=stray_partition,
+                                partition_min=stray_partition_min,
                                 iter_start=iter_start,
-                                alpha=alpha,
-                                bin_frac=expfit_binning)
-
-    to_flag_frame = pd.DataFrame({var_name: True for var_name in fields}, index=to_flag_index)
-    if post_reduction:
-        val_frame = data[toSequence(fields)].to_df()
-        to_flag_frame = _reduceMVflags(val_frame, fields, flagger, to_flag_frame, reduction_range,
-                                       reduction_drop_flagged=reduction_drop_flagged,
-                                       reduction_thresh=reduction_thresh,
-                                       reduction_min_periods=reduction_min_periods)
+                                alpha=alpha, **kwargs)
 
-
-    for var in fields:
-        to_flag_ind = to_flag_frame.loc[:, var]
-        to_flag_ind = to_flag_ind[to_flag_ind].index
-        flagger = flagger.setFlags(var, to_flag_ind, **kwargs)
+    data, flagger = _evalStrayLabels(
+        data, 'kNN_scores', flagger, fields, reduction_range=reduction_range,
+        reduction_drop_flagged=reduction_drop_flagged, reduction_thresh=reduction_thresh,
+        reduction_min_periods=reduction_min_periods, **kwargs)
 
     return data, flagger
 
 
 @register(masking='field')
-def spikes_flagRaise(
+def flagRaise(
     data,
     field,
     flagger,
@@ -675,154 +626,7 @@ def spikes_flagRaise(
 
 
 @register(masking='field')
-def spikes_flagSlidingZscore(
-    data, field, flagger, window, offset, count=1, polydeg=1, z=3.5, method="modZ", **kwargs,
-):
-    """
-    An outlier detection in a sliding window. The method for detection can be a simple Z-score or the more robust
-    modified Z-score, as introduced here [1].
-
-    The steps are:
-    1.  a window of size `window` is cut from the data
-    2.  the data is fit by a polynomial of the given degree `polydeg`
-    3.  the outlier `method` detect potential outlier
-    4.  the window is continued by `offset` to the next data-slot.
-    5.  processing continue at 1. until end of data.
-    6.  all potential outlier, that are detected `count`-many times, are promoted to real outlier and flagged by the `flagger`
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    window: {int, str}
-        Integer or offset string (see [2]). The size of the window the outlier detection is run in.
-    offset: {int, str}
-        Integer or offset string (see [2]). Stepsize the window is set further. default: 1h
-    count: int, default 1
-        Number of times a value has to be classified an outlier in different windows, to be finally flagged an outlier.
-    polydeg : int, default 1
-        The degree for the polynomial that is fitted to the data in order to calculate the residues.
-    z : float, default 3.5
-        The value the (mod.) Z-score is tested against. Defaulting to 3.5 (Recommendation of [1])
-    method: {'modZ', zscore}, default  'modZ'
-        See section `Z-Scores and Modified Z-Scores` in [1].
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-    [2] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
-
-    """
-
-    use_offset = False
-    dx_s = offset
-    winsz_s = window
-    # check param consistency
-    if isinstance(window, str) or isinstance(offset, str):
-        if isinstance(window, str) and isinstance(offset, str):
-            use_offset = True
-            dx_s = offset2seconds(offset)
-            winsz_s = offset2seconds(window)
-        else:
-            raise TypeError(
-                f"`window` and `offset` must both be an offset or both be numeric, {window} and {offset} was passed"
-            )
-
-    # check params
-    if polydeg < 0:
-        raise ValueError("polydeg must be positive")
-    if z < 0:
-        raise ValueError("z must be positive")
-    if count <= 0:
-        raise ValueError("count must be positive and not zero")
-
-    if dx_s >= winsz_s and count == 1:
-        pass
-    elif dx_s >= winsz_s and count > 1:
-        ValueError("If stepsize `offset` is bigger that the window-size, every value is seen just once, so use count=1")
-    elif count > winsz_s // dx_s:
-        raise ValueError(
-            f"Adjust `offset`, `stepsize` or `window`. A single data point is "
-            f"seen `floor(window / offset) = {winsz_s // dx_s}` times, but count is set to {count}"
-        )
-
-    # prepare the method
-    method = method.lower()
-    if method == "modz":
-
-        def _calc(residual):
-            diff = np.abs(residual - np.median(residual))
-            mad = np.median(diff)
-            return (mad > 0) & (0.6745 * diff > z * mad)
-
-    elif method == "zscore":
-
-        def _calc(residual):
-            score = zscore(residual, ddof=1)
-            return np.abs(score) > z
-
-    else:
-        raise NotImplementedError
-    method = _calc
-
-    # prepare data, work on numpy arrays for the fulfilling pleasure of performance
-    d = data[field].dropna()
-    if d.empty:
-        return data, flagger
-    all_indices = np.arange(len(d.index))
-    x = (d.index - d.index[0]).total_seconds().values
-    y = d.values
-    counters = np.full(len(d.index), count)
-
-    if use_offset:
-        _loopfun = slidingWindowIndices
-    else:
-
-        def _loopfun(arr, wsz, step):
-            for i in range(0, len(arr) - wsz + 1, step):
-                yield i, i + wsz
-
-    for start, end in _loopfun(d.index, window, offset):
-        # mask points that have been already discarded
-        mask = counters[start:end] > 0
-        indices = all_indices[all_indices[start:end][mask]]
-        xchunk = x[indices]
-        ychunk = y[indices]
-
-        if xchunk.size == 0:
-            continue
-
-        # get residual
-        coef = poly.polyfit(xchunk, ychunk, polydeg)
-        model = poly.polyval(xchunk, coef)
-        residual = ychunk - model
-
-        score = method(residual)
-
-        # count`em in
-        goneMad = score.nonzero()[0]
-        counters[indices[goneMad]] -= 1
-
-    outlier = np.where(counters <= 0)[0]
-    loc = d[outlier].index
-    flagger = flagger.setFlags(field, loc=loc, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def spikes_flagMad(data, field, flagger, window, z=3.5, **kwargs):
+def flagMAD(data, field, flagger, window, z=3.5, **kwargs):
 
     """
 
@@ -881,7 +685,7 @@ def spikes_flagMad(data, field, flagger, window, z=3.5, **kwargs):
 
 
 @register(masking='field')
-def spikes_flagBasic(data, field, flagger, thresh, tolerance, window, numba_kickin=200000, **kwargs):
+def flagOffset(data, field, flagger, thresh, tolerance, window, numba_kickin=200000, **kwargs):
     """
     A basic outlier test that is designed to work for harmonized and not harmonized data.
 
@@ -986,165 +790,7 @@ def spikes_flagBasic(data, field, flagger, thresh, tolerance, window, numba_kick
 
 
 @register(masking='field')
-def spikes_flagSpektrumBased(
-    data,
-    field,
-    flagger,
-    raise_factor=0.15,
-    deriv_factor=0.2,
-    noise_func="CoVar",
-    noise_window="12h",
-    noise_thresh=1,
-    smooth_window=None,
-    smooth_poly_deg=2,
-    **kwargs,
-):
-    """
-
-    Function detects and flags spikes in input data series by evaluating its derivatives and applying some
-    conditions to it. A datapoint is considered a spike, if:
-
-    (1) the quotient to its preceeding datapoint exceeds a certain bound
-    (controlled by param `raise_factor`)
-    (2) the quotient of the datas second derivate at the preceeding and subsequent timestamps is close enough to 1.
-    (controlled by param `deriv_factor`)
-    (3) the surrounding data is not too noisy. (Coefficient of Variation[+/- noise_window] < 1)
-    (controlled by param `noise_thresh`)
-
-    Note, that the data-to-be-flagged is supposed to be sampled at an equidistant frequency grid
-
-    Note, that the derivative is calculated after applying a Savitsky-Golay filter to the data.
-
-    Parameters
-    ----------
-
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    raise_factor : float, default 0.15
-        Minimum relative value difference between two values to consider the latter as a spike candidate.
-        See condition (1) (or reference [2]).
-    deriv_factor : float, default 0.2
-        See condition (2) (or reference [2]).
-    noise_func : {'CoVar', 'rVar'}, default 'CoVar'
-        Function to calculate noisiness of the data surrounding potential spikes.
-
-        * ``'CoVar'``: Coefficient of Variation
-        * ``'rVar'``: Relative Variance
-
-    noise_window : str, default '12h'
-        An offset string that determines the range of the time window of the "surrounding" data of a potential spike.
-        See condition (3) (or reference [2]).
-    noise_thresh : float, default 1
-        Upper threshold for noisiness of data surrounding potential spikes. See condition (3) (or reference [2]).
-    smooth_window : {None, str}, default None
-        Size of the smoothing window of the Savitsky-Golay filter.
-        The default value ``None`` results in a window of two times the sampling rate (i.e. containing three values).
-    smooth_poly_deg : int, default 2
-        Degree of the polynomial used for fitting with the Savitsky-Golay filter.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-
-    References
-    ----------
-    This Function is a generalization of the Spectrum based Spike flagging mechanism as presented in:
-
-    [1] Dorigo, W. et al: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-
-    Notes
-    -----
-    A value is flagged a spike, if:
-
-    * The quotient to its preceding data point exceeds a certain bound:
-
-      * :math:`|\\frac{x_k}{x_{k-1}}| > 1 +` ``raise_factor``, or
-      * :math:`|\\frac{x_k}{x_{k-1}}| < 1 -` ``raise_factor``
-
-    * The quotient of the second derivative :math:`x''`, at the preceding
-      and subsequent timestamps is close enough to 1:
-
-      * :math:`|\\frac{x''_{k-1}}{x''_{k+1}} | > 1 -` ``deriv_factor``, and
-      * :math:`|\\frac{x''_{k-1}}{x''_{k+1}} | < 1 +` ``deriv_factor``
-
-    * The dataset :math:`X = x_i, ..., x_{k-1}, x_{k+1}, ..., x_j`, with
-      :math:`|t_{k-1} - t_i| = |t_j - t_{k+1}| =` ``noise_window`` fulfills the
-      following condition:
-
-      * ``noise_func``:math:`(X) <` ``noise_thresh``
-
-    """
-
-    dataseries, data_rate = retrieveTrustworthyOriginal(data, field, flagger)
-    noise_func_map = {"covar": pd.Series.var, "rvar": pd.Series.std}
-    noise_func = noise_func_map[noise_func.lower()]
-
-    if smooth_window is None:
-        smooth_window = 3 * pd.Timedelta(data_rate)
-    else:
-        smooth_window = pd.Timedelta(smooth_window)
-
-    quotient_series = dataseries / dataseries.shift(+1)
-    spikes = (quotient_series > (1 + raise_factor)) | (quotient_series < (1 - raise_factor))
-    spikes = spikes[spikes == True]
-
-    # loop through spikes: (loop may sound ugly - but since the number of spikes is supposed to not exceed the
-    # thousands for year data - a loop going through all the spikes instances is much faster than
-    # a rolling window, rolling all through a stacked year dataframe )
-
-    # calculate some values, repeatedly needed in the course of the loop:
-
-    filter_window_seconds = smooth_window.seconds
-    smoothing_periods = int(np.ceil((filter_window_seconds / data_rate.n)))
-    lower_dev_bound = 1 - deriv_factor
-    upper_dev_bound = 1 + deriv_factor
-
-    if smoothing_periods % 2 == 0:
-        smoothing_periods += 1
-
-    for spike in spikes.index:
-        start_slice = spike - smooth_window
-        end_slice = spike + smooth_window
-
-        scnd_derivate = savgol_filter(
-            dataseries[start_slice:end_slice], window_length=smoothing_periods, polyorder=smooth_poly_deg, deriv=2,
-        )
-
-        length = scnd_derivate.size
-        test_ratio_1 = np.abs(scnd_derivate[int(((length + 1) / 2) - 2)] / scnd_derivate[int(((length + 1) / 2))])
-
-        if lower_dev_bound < test_ratio_1 < upper_dev_bound:
-            # apply noise condition:
-            start_slice = spike - pd.Timedelta(noise_window)
-            end_slice = spike + pd.Timedelta(noise_window)
-            test_slice = dataseries[start_slice:end_slice].drop(spike)
-            test_ratio_2 = np.abs(noise_func(test_slice) / test_slice.mean())
-            # not a spike, we want to flag, if condition not satisfied:
-            if test_ratio_2 > noise_thresh:
-                spikes[spike] = False
-
-        # not a spike, we want to flag, if condition not satisfied
-        else:
-            spikes[spike] = False
-
-    spikes = spikes[spikes == True]
-
-    flagger = flagger.setFlags(field, spikes.index, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def spikes_flagGrubbs(data, field, flagger, winsz, alpha=0.05, min_periods=8, check_lagged=False, **kwargs):
+def flagByGrubbs(data, field, flagger, winsz, alpha=0.05, min_periods=8, check_lagged=False, **kwargs):
     """
     The function flags values that are regarded outliers due to the grubbs test.
 
@@ -1229,3 +875,109 @@ def spikes_flagGrubbs(data, field, flagger, winsz, alpha=0.05, min_periods=8, ch
 
     flagger = flagger.setFlags(field, loc=to_flag, **kwargs)
     return data, flagger
+
+
+@register(masking='field')
+def flagRange(data, field, flagger, min=-np.inf, max=np.inf, **kwargs):
+    """
+    Function flags values not covered by the closed interval [`min`, `max`].
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-flagged.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    min : float
+        Lower bound for valid data.
+    max : float
+        Upper bound for valid data.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+    """
+
+    # using .values is very much faster
+    datacol = data[field].values
+    mask = (datacol < min) | (datacol > max)
+    flagger = flagger.setFlags(field, mask, **kwargs)
+    return data, flagger
+
+
+@register(masking='all')
+def flagCrossStatistic(data, field, flagger, fields, thresh, cross_stat='modZscore', **kwargs):
+    """
+    Function checks for outliers relatively to the "horizontal" input data axis.
+
+    For `fields` :math:`=[f_1,f_2,...,f_N]` and timestamps :math:`[t_1,t_2,...,t_K]`, the following steps are taken
+    for outlier detection:
+
+    1. All timestamps :math:`t_i`, where there is one :math:`f_k`, with :math:`data[f_K]` having no entry at
+       :math:`t_i`, are excluded from the following process (inner join of the :math:`f_i` fields.)
+    2. for every :math:`0 <= i <= K`, the value
+       :math:`m_j = median(\\{data[f_1][t_i], data[f_2][t_i], ..., data[f_N][t_i]\\})` is calculated
+    2. for every :math:`0 <= i <= K`, the set
+       :math:`\\{data[f_1][t_i] - m_j, data[f_2][t_i] - m_j, ..., data[f_N][t_i] - m_j\\}` is tested for outliers with the
+       specified method (`cross_stat` parameter).
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        A dummy parameter.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional informations related to `data`.
+    fields : str
+        List of fieldnames in data, determining wich variables are to be included into the flagging process.
+    thresh : float
+        Threshold which the outlier score of an value must exceed, for being flagged an outlier.
+    cross_stat : {'modZscore', 'Zscore'}, default 'modZscore'
+        Method used for calculating the outlier scores.
+
+        * ``'modZscore'``: Median based "sigma"-ish approach. See Referenecs [1].
+        * ``'Zscore'``: Score values by how many times the standard deviation they differ from the median.
+          See References [1]
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the input flagger.
+
+    References
+    ----------
+    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
+    """
+
+    df = data[fields].loc[data[fields].index_of('shared')].to_df()
+
+    if isinstance(cross_stat, str):
+        if cross_stat == 'modZscore':
+            MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
+            diff_scores = ((0.6745 * (df.subtract(df.median(axis=1), axis=0))).divide(MAD_series, axis=0)).abs()
+        elif cross_stat == 'Zscore':
+            diff_scores = (df.subtract(df.mean(axis=1), axis=0)).divide(df.std(axis=1), axis=0).abs()
+        else:
+            raise ValueError(cross_stat)
+    else:
+        try:
+            stat = getattr(df, cross_stat.__name__)(axis=1)
+        except AttributeError:
+            stat = df.aggregate(cross_stat, axis=1)
+        diff_scores = df.subtract(stat, axis=0).abs()
+
+    mask = diff_scores > thresh
+    for var in fields:
+        flagger = flagger.setFlags(var, mask[var], **kwargs)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/pattern_rec.py b/saqc/funcs/pattern.py
similarity index 96%
rename from saqc/funcs/pattern_rec.py
rename to saqc/funcs/pattern.py
index 83f392df76a9c0dfc5a1a1af2e7fce108c92caf9..d1ebeee42a1a99226f29948715daf7761efa4615 100644
--- a/saqc/funcs/pattern_rec.py
+++ b/saqc/funcs/pattern.py
@@ -12,7 +12,7 @@ from saqc.lib.tools import customRoller
 
 
 @register(masking='field')
-def flagPattern_wavelet(data, field, flagger, ref_field, widths=(1, 2, 4, 8), waveform='mexh', **kwargs):
+def flagPatternByDTW(data, field, flagger, ref_field, widths=(1, 2, 4, 8), waveform='mexh', **kwargs):
     """
     Pattern recognition via wavelets.
 
@@ -89,7 +89,7 @@ def flagPattern_wavelet(data, field, flagger, ref_field, widths=(1, 2, 4, 8), wa
 
 
 @register(masking='field')
-def flagPattern_dtw(data, field, flagger, ref_field, max_distance=0.03, normalize=True, **kwargs):
+def flagPatternByWavelet(data, field, flagger, ref_field, max_distance=0.03, normalize=True, **kwargs):
     """ Pattern Recognition via Dynamic Time Warping.
 
     The steps are:
diff --git a/saqc/funcs/proc_functions.py b/saqc/funcs/proc_functions.py
deleted file mode 100644
index aa6974c781e90c49520c63300bfae6c15af426ae..0000000000000000000000000000000000000000
--- a/saqc/funcs/proc_functions.py
+++ /dev/null
@@ -1,1265 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import pandas as pd
-import numpy as np
-from saqc.core.register import register
-from saqc.lib.ts_operators import interpolateNANs, aggregate2Freq, shift2Freq, expModelFunc
-from saqc.funcs.breaks_detection import breaks_flagRegimeAnomaly
-from saqc.funcs.modelling import modelling_changePointCluster
-from saqc.lib.tools import toSequence, mergeDios, dropper, mutateIndex, detectDeviants, evalFreqStr
-import dios
-import functools
-from scipy.optimize import curve_fit
-from sklearn.linear_model import LinearRegression
-from sklearn.utils import resample
-
-
-ORIGINAL_SUFFIX = "_original"
-
-METHOD2ARGS = {
-    "inverse_fshift": ("backward", pd.Timedelta),
-    "inverse_bshift": ("forward", pd.Timedelta),
-    "inverse_nshift": ("nearest", lambda x: pd.Timedelta(x) / 2),
-    "inverse_fagg": ("bfill", pd.Timedelta),
-    "inverse_bagg": ("ffill", pd.Timedelta),
-    "inverse_nagg": ("nearest", lambda x: pd.Timedelta(x) / 2),
-    "match": (None, lambda x: "0min"),
-}
-
-
-@register(masking='field')
-def proc_rollingInterpolateMissing(
-    data, field, flagger, winsz, func=np.median, center=True, min_periods=0, interpol_flag="UNFLAGGED", **kwargs
-):
-    """
-    Interpolates missing values (nan values present in the data) by assigning them the aggregation result of
-    a window surrounding them.
-
-    Note, that in the current implementation, center=True can only be used with integer window sizes - furthermore
-    note, that integer window sizes can yield screwed aggregation results for not-harmonized or irregular data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    winsz : int, str
-        The size of the window, the aggregation is computed from. Either counted in periods number (Integer passed),
-        or defined by a total temporal extension (offset String passed).
-    func : Callable
-        The function used for aggregation.
-    center : bool, default True
-        Wheather or not the window, the aggregation is computed of, is centered around the value to be interpolated.
-    min_periods : int
-        Minimum number of valid (not np.nan) values that have to be available in a window for its aggregation to be
-        computed.
-    interpol_flag : {'GOOD', 'BAD', 'UNFLAGGED', str}, default 'UNFLAGGED'
-        Flag that is to be inserted for the interpolated values. You can either pass one of the three major flag-classes
-        or specify directly a certain flag from the passed flagger.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    data = data.copy()
-    datcol = data[field]
-    roller = datcol.rolling(window=winsz, center=center, min_periods=min_periods)
-    try:
-        func_name = func.__name__
-        if func_name[:3] == "nan":
-            func_name = func_name[3:]
-        rolled = getattr(roller, func_name)()
-    except AttributeError:
-        rolled = roller.apply(func)
-
-    na_mask = datcol.isna()
-    interpolated = na_mask & ~rolled.isna()
-    datcol[na_mask] = rolled[na_mask]
-    data[field] = datcol
-
-    if interpol_flag:
-        if interpol_flag in ["BAD", "UNFLAGGED", "GOOD"]:
-            interpol_flag = getattr(flagger, interpol_flag)
-        flagger = flagger.setFlags(field, loc=interpolated, force=True, flag=interpol_flag, **kwargs)
-
-    return data, flagger
-
-
-@register(masking='field')
-def proc_interpolateMissing(
-    data,
-    field,
-    flagger,
-    method,
-    inter_order=2,
-    inter_limit=2,
-    interpol_flag="UNFLAGGED",
-    downgrade_interpolation=False,
-    not_interpol_flags=None,
-    **kwargs
-):
-
-    """
-    Function to interpolate nan values in the data.
-
-    There are available all the interpolation methods from the pandas.interpolate method and they are applicable by
-    the very same key words, that you would pass to the ``pd.Series.interpolate``'s method parameter.
-
-    Note, that the `inter_limit` keyword really restricts the interpolation to chunks, not containing more than
-    `inter_limit` successive nan entries.
-
-    Note, that the function differs from ``proc_interpolateGrid``, in its behaviour to ONLY interpolate nan values that
-    were already present in the data passed.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
-        The interpolation method you want to apply.
-    inter_order : int, default 2
-        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
-        order.
-    inter_limit : int, default 2
-        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated.
-    interpol_flag : {'GOOD', 'BAD', 'UNFLAGGED', str}, default 'UNFLAGGED'
-        Flag that is to be inserted for the interpolated values. You can either pass one of the three major flag-classes
-        or specify directly a certain flag from the passed flagger.
-    downgrade_interpolation : bool, default False
-        If interpolation can not be performed at `inter_order` - (not enough values or not implemented at this order) -
-        automaticalyy try to interpolate at order `inter_order` :math:`- 1`.
-    not_interpol_flags : {None, str, List[str]}, default None
-        A list of flags or a single Flag, marking values, you want NOT to be interpolated.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-    data = data.copy()
-    inter_data = interpolateNANs(
-        data[field],
-        method,
-        order=inter_order,
-        inter_limit=inter_limit,
-        downgrade_interpolation=downgrade_interpolation,
-        return_chunk_bounds=False,
-    )
-    interpolated = data[field].isna() & inter_data.notna()
-
-    if not_interpol_flags:
-        for f in toSequence(not_interpol_flags):
-            if f in ["BAD", "UNFLAGGED", "GOOD"]:
-                f = getattr(flagger, interpol_flag)
-            is_flagged = flagger.isFlagged(flag=f)[field]
-            cond = is_flagged & interpolated
-            inter_data.mask(cond, np.nan, inplace=True)
-        interpolated &= inter_data.notna()
-
-    if interpol_flag:
-        if interpol_flag in ["BAD", "UNFLAGGED", "GOOD"]:
-            interpol_flag = getattr(flagger, interpol_flag)
-        flagger = flagger.setFlags(field, loc=interpolated, force=True, flag=interpol_flag, **kwargs)
-
-    data[field] = inter_data
-    return data, flagger
-
-
-@register(masking='field')
-def proc_interpolateGrid(
-        data,
-        field,
-        flagger,
-        freq,
-        method,
-        inter_order=2,
-        to_drop=None,
-        downgrade_interpolation=False,
-        empty_intervals_flag=None,
-        grid_field=None,
-        inter_limit=2,
-        freq_check=None,
-        **kwargs):
-
-    """
-    Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
-
-    Note, that the interpolation will only be calculated, for grid timestamps that have a preceding AND a succeeding
-    valid data value within "freq" range.
-
-    Note, that the function differs from proc_interpolateMissing, by returning a whole new data set, only containing
-    samples at the interpolated, equidistant timestamps (of frequency "freq").
-
-    Note, it is possible to interpolate unregular "grids" (with no frequencies). In fact, any date index
-    can be target of the interpolation. Just pass the field name of the variable, holding the index
-    you want to interpolate, to "grid_field". 'freq' is then use to determine the maximum gap size for
-    a grid point to be interpolated.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-interpolated.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    freq : str
-        An Offset String, interpreted as the frequency of
-        the grid you want to interpolate your data at.
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
-        The interpolation method you want to apply.
-    inter_order : integer, default 2
-        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
-        order.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before interpolation - effectively excluding grid points from
-        interpolation, that are only surrounded by values having a flag in them, that is listed in drop flags. Default
-        results in the flaggers *BAD* flag to be the drop_flag.
-    downgrade_interpolation : bool, default False
-        If interpolation can not be performed at `inter_order` - (not enough values or not implemented at this order) -
-        automatically try to interpolate at order `inter_order` :math:`- 1`.
-    empty_intervals_flag : str, default None
-        A Flag, that you want to assign to those values in the resulting equidistant sample grid, that were not
-        surrounded by valid data in the original dataset, and thus were not interpolated. Default automatically assigns
-        ``flagger.BAD`` flag to those values.
-    grid_field : String, default None
-        Use the timestamp of another variable as (not necessarily regular) "grid" to be interpolated.
-    inter_limit : Integer, default 2
-        Maximum number of consecutive Grid values allowed for interpolation. If set
-        to *n*, chunks of *n* and more consecutive grid values, where there is no value in between, wont be
-        interpolated.
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
-          if no uniform sampling rate could be estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    datcol = data[field]
-    datcol = datcol.copy()
-    flagscol = flagger.getFlags(field)
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-    if empty_intervals_flag is None:
-        empty_intervals_flag = flagger.BAD
-
-    drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
-    drop_mask |= flagscol.isna()
-    drop_mask |= datcol.isna()
-    datcol[drop_mask] = np.nan
-    datcol.dropna(inplace=True)
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-    if datcol.empty:
-        data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
-        return data, flagger
-    # account for annoying case of subsequent frequency aligned values, differing exactly by the margin
-    # 2*freq:
-    spec_case_mask = datcol.index.to_series()
-    spec_case_mask = spec_case_mask - spec_case_mask.shift(1)
-    spec_case_mask = spec_case_mask == 2 * pd.Timedelta(freq)
-    spec_case_mask = spec_case_mask[spec_case_mask]
-    spec_case_mask = spec_case_mask.resample(freq).asfreq().dropna()
-
-    if not spec_case_mask.empty:
-        spec_case_mask = spec_case_mask.tshift(-1, freq)
-
-    # prepare grid interpolation:
-    if grid_field is None:
-        grid_index = pd.date_range(start=datcol.index[0].floor(freq), end=datcol.index[-1].ceil(freq), freq=freq,
-                                   name=datcol.index.name)
-    else:
-        grid_index = data[grid_field].index
-
-
-    aligned_start = datcol.index[0] == grid_index[0]
-    aligned_end = datcol.index[-1] == grid_index[-1]
-    datcol = datcol.reindex(datcol.index.join(grid_index, how="outer",))
-
-    # do the interpolation
-    inter_data, chunk_bounds = interpolateNANs(
-        datcol, method, order=inter_order, inter_limit=inter_limit, downgrade_interpolation=downgrade_interpolation,
-        return_chunk_bounds=True
-    )
-
-    if grid_field is None:
-        # override falsely interpolated values:
-        inter_data[spec_case_mask.index] = np.nan
-
-    # store interpolated grid
-    inter_data = inter_data[grid_index]
-    data[field] = inter_data
-
-    # flags reshaping (dropping data drops):
-    flagscol.drop(flagscol[drop_mask].index, inplace=True)
-
-    if grid_field is not None:
-        # only basic flag propagation supported for custom grids (take worst from preceeding/succeeding)
-        preceeding = flagscol.reindex(grid_index, method='ffill', tolerance=freq)
-        succeeding = flagscol.reindex(grid_index, method='bfill', tolerance=freq)
-        # check for too big gaps in the source data and drop the values interpolated in those too big gaps
-        na_mask = preceeding.isna() | succeeding.isna()
-        na_mask = na_mask[na_mask]
-        preceeding.drop(na_mask.index, inplace=True)
-        succeeding.drop(na_mask.index, inplace=True)
-        inter_data.drop(na_mask.index, inplace=True)
-        data[field] = inter_data
-        mask = succeeding > preceeding
-        preceeding.loc[mask] = succeeding.loc[mask]
-        flagscol = preceeding
-        flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(flagger_new)
-        return data, flagger
-
-    # for freq defined grids, max-aggregate flags of every grid points freq-ranged surrounding
-    # hack ahead! Resampling with overlapping intervals:
-    # 1. -> no rolling over categories allowed in pandas, so we translate manually:
-    cats = pd.CategoricalIndex(flagger.dtype.categories, ordered=True)
-    cats_dict = {cats[i]: i for i in range(0, len(cats))}
-    flagscol = flagscol.replace(cats_dict)
-    # 3. -> combine resample+rolling to resample with overlapping intervals:
-    flagscol = flagscol.resample(freq).max()
-    initial = flagscol[0]
-    flagscol = flagscol.rolling(2, center=True, closed="neither").max()
-    flagscol[0] = initial
-    cats_dict = {num: key for (key, num) in cats_dict.items()}
-    flagscol = flagscol.astype(int, errors="ignore").replace(cats_dict)
-    flagscol[flagscol.isna()] = empty_intervals_flag
-    # ...hack done
-
-    # we might miss the flag for interpolated data grids last entry (if we miss it - the datapoint is always nan
-    # - just settling a convention here(resulting GRID should start BEFORE first valid data entry and range to AFTER
-    # last valid data)):
-    if inter_data.shape[0] > flagscol.shape[0]:
-        flagscol = flagscol.append(pd.Series(empty_intervals_flag, index=[datcol.index[-1]]))
-
-    # Additional consistency operation: we have to block first/last interpolated datas flags - since they very
-    # likely represent chunk starts/ends (except data start and or end timestamp were grid-aligned before Grid
-    # interpolation already.)
-    if np.isnan(inter_data[0]) and not aligned_start:
-        chunk_bounds = chunk_bounds.insert(0, inter_data.index[0])
-    if np.isnan(inter_data[-1]) and not aligned_end:
-        chunk_bounds = chunk_bounds.append(pd.DatetimeIndex([inter_data.index[-1]]))
-    chunk_bounds = chunk_bounds.unique()
-    flagger_new = flagger.initFlags(inter_data).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-
-    # block chunk ends of interpolation
-    flags_to_block = pd.Series(np.nan, index=chunk_bounds).astype(flagger_new.dtype)
-    flagger_new = flagger_new.setFlags(field, loc=chunk_bounds, flag=flags_to_block, force=True, inplace=True)
-
-    flagger = flagger.slice(drop=field).merge(flagger_new, subset=[field], inplace=True)
-    return data, flagger
-
-
-@register(masking='field')
-def proc_resample(
-    data,
-    field,
-    flagger,
-    freq,
-    agg_func=np.mean,
-    method="bagg",
-    max_invalid_total_d=np.inf,
-    max_invalid_consec_d=np.inf,
-    max_invalid_consec_f=np.inf,
-    max_invalid_total_f=np.inf,
-    flag_agg_func=max,
-    empty_intervals_flag=None,
-    to_drop=None,
-    all_na_2_empty=False,
-    freq_check=None,
-    **kwargs
-):
-    """
-    Function to resample the data. Afterwards the data will be sampled at regular (equidistant) timestamps
-    (or Grid points). Sampling intervals therefor get aggregated with a function, specifyed by 'agg_func' parameter and
-    the result gets projected onto the new timestamps with a method, specified by "method". The following method
-    (keywords) are available:
-
-    * ``'nagg'``: all values in the range (+/- `freq`/2) of a grid point get aggregated with agg_func and assigned to it.
-    * ``'bagg'``: all values in a sampling interval get aggregated with agg_func and the result gets assigned to the last
-      grid point.
-    * ``'fagg'``: all values in a sampling interval get aggregated with agg_func and the result gets assigned to the next
-      grid point.
-
-
-    Note, that. if possible, functions passed to agg_func will get projected internally onto pandas.resample methods,
-    wich results in some reasonable performance boost - however, for this to work, you should pass functions that have
-    the __name__ attribute initialised and the according methods name assigned to it.
-    Furthermore, you shouldnt pass numpys nan-functions
-    (``nansum``, ``nanmean``,...) because those for example, have ``__name__ == 'nansum'`` and they will thus not
-    trigger ``resample.func()``, but the slower ``resample.apply(nanfunc)``. Also, internally, no nans get passed to
-    the functions anyway, so that there is no point in passing the nan functions.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-resampled.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    freq : str
-        An Offset String, that will be interpreted as the frequency you want to resample your data with.
-    agg_func : Callable
-        The function you want to use for aggregation.
-    method: {'fagg', 'bagg', 'nagg'}, default 'bagg'
-        Specifies which intervals to be aggregated for a certain timestamp. (preceding, succeeding or
-        "surrounding" interval). See description above for more details.
-    max_invalid_total_d : {np.inf, int}, np.inf
-        Maximum number of invalid (nan) datapoints, allowed per resampling interval. If max_invalid_total_d is
-        exceeded, the interval gets resampled to nan. By default (``np.inf``), there is no bound to the number of nan
-        values in an interval and only intervals containing ONLY nan values or those, containing no values at all,
-        get projected onto nan
-    max_invalid_consec_d : {np.inf, int}, default np.inf
-        Maximum number of consecutive invalid (nan) data points, allowed per resampling interval.
-        If max_invalid_consec_d is exceeded, the interval gets resampled to nan. By default (np.inf),
-        there is no bound to the number of consecutive nan values in an interval and only intervals
-        containing ONLY nan values, or those containing no values at all, get projected onto nan.
-    max_invalid_total_f : {np.inf, int}, default np.inf
-        Same as `max_invalid_total_d`, only applying for the flags. The flag regarded as "invalid" value,
-        is the one passed to empty_intervals_flag (default=``flagger.BAD``).
-        Also this is the flag assigned to invalid/empty intervals.
-    max_invalid_total_f : {np.inf, int}, default np.inf
-        Same as `max_invalid_total_f`, only applying onto flags. The flag regarded as "invalid" value, is the one passed
-        to empty_intervals_flag (default=flagger.BAD). Also this is the flag assigned to invalid/empty intervals.
-    flag_agg_func : Callable, default: max
-        The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
-        (usually ordered categorical).
-    empty_intervals_flag : {None, str}, default None
-        A Flag, that you want to assign to invalid intervals. Invalid are those intervals, that contain nan values only,
-        or no values at all. Furthermore the empty_intervals_flag is the flag, serving as "invalid" identifyer when
-        checking for `max_total_invalid_f` and `max_consec_invalid_f patterns`. Default triggers ``flagger.BAD`` to be
-        assigned.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before resampling - effectively excluding values that are flagged
-        with a flag in to_drop from the resampling process - this means that they also will not be counted in the
-        the `max_consec`/`max_total evaluation`. `to_drop` = ``None`` results in NO flags being dropped initially.
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
-          if no uniform sampling rate could be estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-
-    data = data.copy()
-    datcol = data[field]
-    flagscol = flagger.getFlags(field)
-    if empty_intervals_flag is None:
-        empty_intervals_flag = flagger.BAD
-
-    drop_mask = dropper(field, to_drop, flagger, [])
-    datcol.drop(datcol[drop_mask].index, inplace=True)
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-    flagscol.drop(flagscol[drop_mask].index, inplace=True)
-    if all_na_2_empty:
-        if datcol.dropna().empty:
-            datcol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
-
-    if datcol.empty:
-        # for consistency reasons - return empty data/flags column when there is no valid data left
-        # after filtering.
-        data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
-        return data, flagger
-
-    datcol = aggregate2Freq(
-        datcol,
-        method,
-        freq,
-        agg_func,
-        fill_value=np.nan,
-        max_invalid_total=max_invalid_total_d,
-        max_invalid_consec=max_invalid_consec_d,
-    )
-    flagscol = aggregate2Freq(
-        flagscol,
-        method,
-        freq,
-        flag_agg_func,
-        fill_value=empty_intervals_flag,
-        max_invalid_total=max_invalid_total_f,
-        max_invalid_consec=max_invalid_consec_f,
-    )
-
-    # data/flags reshaping:
-    data[field] = datcol
-    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
-    return data, flagger
-
-
-@register(masking='field')
-def proc_shift(data, field, flagger, freq, method, to_drop=None, empty_intervals_flag=None, freq_check=None, **kwargs):
-    """
-    Function to shift data points to regular (equidistant) timestamps.
-    Values get shifted according to the keyword passed to the `method` parameter.
-
-    * ``'nshift'``: every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
-    * ``'bshift'``:  every grid point gets assigned its first succeeding value - if there is one available in the
-      succeeding sampling interval.
-    * ``'fshift'``:  every grid point gets assigned its ultimately preceeding value - if there is one available in
-      the preceeding sampling interval.
-
-    Note: all data nans get excluded defaultly from shifting. If `to_drop` is ``None``, - all *BAD* flagged values get
-    excluded as well.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-shifted.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    freq : str
-        An frequency Offset String that will be interpreted as the sampling rate you want the data to be shifted to.
-    method: {'fagg', 'bagg', 'nagg'}, default 'nshift'
-        Specifies if datapoints get propagated forwards, backwards or to the nearest grid timestamp. See function
-        description for more details.
-    empty_intervals_flag : {None, str}, default None
-        A Flag, that you want to assign to grid points, where no values are avaible to be shifted to.
-        Default triggers flagger.BAD to be assigned.
-    to_drop : {None, str, List[str]}, default None
-        Flags that refer to values you want to drop before shifting - effectively, excluding values that are flagged
-        with a flag in to_drop from the shifting process. Default - to_drop = None  - results in flagger.BAD
-        values being dropped initially.
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
-          or if no uniform sampling rate could be estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-    data = data.copy()
-    datcol = data[field]
-    flagscol = flagger.getFlags(field)
-
-    if empty_intervals_flag is None:
-        empty_intervals_flag = flagger.BAD
-
-    drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
-    drop_mask |= datcol.isna()
-    datcol[drop_mask] = np.nan
-    datcol.dropna(inplace=True)
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-    if datcol.empty:
-        data[field] = datcol
-        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
-        return data, flagger
-
-    flagscol.drop(drop_mask[drop_mask].index, inplace=True)
-
-    datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
-    flagscol = shift2Freq(flagscol, method, freq, fill_value=empty_intervals_flag)
-    data[field] = datcol
-    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
-    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
-    return data, flagger
-
-
-@register(masking='field')
-def proc_transform(data, field, flagger, func, **kwargs):
-    """
-    Function to transform data columns with a transformation that maps series onto series of the same length.
-
-    Note, that flags get preserved.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-transformed.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    func : Callable
-        Function to transform data[field] with.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-    """
-    data = data.copy()
-    # NOTE: avoiding pd.Series.transform() in the line below, because transform does process columns element wise
-    # (so interpolati   ons wouldn't work)
-    new_col = pd.Series(func(data[field]), index=data[field].index)
-    data[field] = new_col
-    return data, flagger
-
-
-@register(masking='field')
-def proc_projectFlags(data, field, flagger, method, source, freq=None, to_drop=None, freq_check=None, **kwargs):
-
-    """
-    The Function projects flags of "source" onto flags of "field". Wherever the "field" flags are "better" then the
-    source flags projected on them, they get overridden with this associated source flag value.
-
-    Which "field"-flags are to be projected on which source flags, is controlled by the "method" and "freq"
-    parameters.
-
-    method: (field_flag in associated with "field", source_flags associated with "source")
-
-    'inverse_nagg' - all field_flags within the range +/- freq/2 of a source_flag, get assigned this source flags value.
-        (if source_flag > field_flag)
-    'inverse_bagg' - all field_flags succeeding a source_flag within the range of "freq", get assigned this source flags
-        value. (if source_flag > field_flag)
-    'inverse_fagg' - all field_flags preceeding a source_flag within the range of "freq", get assigned this source flags
-        value. (if source_flag > field_flag)
-
-    'inverse_interpolation' - all field_flags within the range +/- freq of a source_flag, get assigned this source flags value.
-        (if source_flag > field_flag)
-
-    'inverse_nshift' - That field_flag within the range +/- freq/2, that is nearest to a source_flag, gets the source
-        flags value. (if source_flag > field_flag)
-    'inverse_bshift' - That field_flag succeeding a source flag within the range freq, that is nearest to a
-        source_flag, gets assigned this source flags value. (if source_flag > field_flag)
-    'inverse_nshift' - That field_flag preceeding a source flag within the range freq, that is nearest to a
-        source_flag, gets assigned this source flags value. (if source_flag > field_flag)
-
-    'match' - any field_flag with a timestamp matching a source_flags timestamp gets this source_flags value
-    (if source_flag > field_flag)
-
-    Note, to undo or backtrack a resampling/shifting/interpolation that has been performed with a certain method,
-    you can just pass the associated "inverse" method. Also you should pass the same drop flags keyword.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to project the source-flags onto.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift'}
-        The method used for projection of source flags onto field flags. See description above for more details.
-    source : str
-        The source source of flags projection.
-    freq : {None, str},default None
-        The freq determines the projection range for the projection method. See above description for more details.
-        Defaultly (None), the sampling frequency of source is used.
-    to_drop : {None, str, List[str]}, default None
-        Flags referring to values that are to drop before flags projection. Relevant only when projecting with an
-        inverted shift method. Defaultly flagger.BAD is listed.
-    freq_check : {None, 'check', 'auto'}, default None
-        - None: do not validate frequency-string passed to `freq`
-        - 'check': estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
-            if no uniform sampling rate could be estimated
-        - 'auto': estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values and shape may have changed relatively to the flagger input.
-    """
-    flagscol, metacols = flagger.getFlags(source, full=True)
-    if flagscol.empty:
-        return data, flagger
-    target_datcol = data[field]
-    target_flagscol, target_metacols = flagger.getFlags(field, full=True)
-
-    if (freq is None) and (method != "match"):
-        freq_check = 'auto'
-
-    freq = evalFreqStr(freq, freq_check, flagscol.index)
-
-    if method[-13:] == "interpolation":
-        backprojected = flagscol.reindex(target_flagscol.index, method="bfill", tolerance=freq)
-        fwrdprojected = flagscol.reindex(target_flagscol.index, method="ffill", tolerance=freq)
-        b_replacement_mask = (backprojected > target_flagscol) & (backprojected >= fwrdprojected)
-        f_replacement_mask = (fwrdprojected > target_flagscol) & (fwrdprojected > backprojected)
-        target_flagscol.loc[b_replacement_mask] = backprojected.loc[b_replacement_mask]
-        target_flagscol.loc[f_replacement_mask] = fwrdprojected.loc[f_replacement_mask]
-
-        backprojected_meta = {}
-        fwrdprojected_meta = {}
-        for meta_key in target_metacols.keys():
-            backprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='bfill',
-                                                                      tolerance=freq)
-            fwrdprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='ffill',
-                                                                      tolerance=freq)
-            target_metacols[meta_key].loc[b_replacement_mask] = backprojected_meta[meta_key].loc[b_replacement_mask]
-            target_metacols[meta_key].loc[f_replacement_mask] = fwrdprojected_meta[meta_key].loc[f_replacement_mask]
-
-    if method[-3:] == "agg" or method == "match":
-        # Aggregation - Inversion
-        projection_method = METHOD2ARGS[method][0]
-        tolerance = METHOD2ARGS[method][1](freq)
-        flagscol = flagscol.reindex(target_flagscol.index, method=projection_method, tolerance=tolerance)
-        replacement_mask = flagscol > target_flagscol
-        target_flagscol.loc[replacement_mask] = flagscol.loc[replacement_mask]
-        for meta_key in target_metacols.keys():
-            metacols[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method=projection_method,
-                                                            tolerance=tolerance)
-            target_metacols[meta_key].loc[replacement_mask] = metacols[meta_key].loc[replacement_mask]
-
-    if method[-5:] == "shift":
-        # NOTE: although inverting a simple shift seems to be a less complex operation, it has quite some
-        # code assigned to it and appears to be more verbose than inverting aggregation -
-        # that owes itself to the problem of BAD/invalid values blocking a proper
-        # shift inversion and having to be outsorted before shift inversion and re-inserted afterwards.
-        #
-        # starting with the dropping and its memorization:
-
-        drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
-        drop_mask |= target_datcol.isna()
-        target_flagscol_drops = target_flagscol[drop_mask]
-        target_flagscol.drop(drop_mask[drop_mask].index, inplace=True)
-
-        # shift inversion
-        projection_method = METHOD2ARGS[method][0]
-        tolerance = METHOD2ARGS[method][1](freq)
-        flags_merged = pd.merge_asof(
-            flagscol,
-            pd.Series(target_flagscol.index.values, index=target_flagscol.index, name="pre_index"),
-            left_index=True,
-            right_index=True,
-            tolerance=tolerance,
-            direction=projection_method,
-        )
-        flags_merged.dropna(subset=["pre_index"], inplace=True)
-        flags_merged = flags_merged.set_index(["pre_index"]).squeeze()
-
-        # write flags to target
-        replacement_mask = flags_merged > target_flagscol.loc[flags_merged.index]
-        target_flagscol.loc[replacement_mask[replacement_mask].index] = flags_merged.loc[replacement_mask]
-
-        # reinsert drops
-        target_flagscol = target_flagscol.reindex(target_flagscol.index.join(target_flagscol_drops.index, how="outer"))
-        target_flagscol.loc[target_flagscol_drops.index] = target_flagscol_drops.values
-
-        for meta_key in target_metacols.keys():
-            target_metadrops = target_metacols[meta_key][drop_mask]
-            target_metacols[meta_key].drop(drop_mask[drop_mask].index, inplace=True)
-            meta_merged = pd.merge_asof(
-                metacols[meta_key],
-                pd.Series(target_metacols[meta_key].index.values, index=target_metacols[meta_key].index,
-                          name="pre_index"),
-                left_index=True,
-                right_index=True,
-                tolerance=tolerance,
-                direction=projection_method,
-            )
-            meta_merged.dropna(subset=["pre_index"], inplace=True)
-            meta_merged = meta_merged.set_index(["pre_index"]).squeeze()
-            # reinsert drops
-            target_metacols[meta_key][replacement_mask[replacement_mask].index] = meta_merged[replacement_mask]
-            target_metacols[meta_key] = target_metacols[meta_key].reindex(
-                target_metacols[meta_key].index.join(target_metadrops.index, how="outer"))
-            target_metacols[meta_key].loc[target_metadrops.index] = target_metadrops.values
-
-    flagger = flagger.setFlags(field, flag=target_flagscol, with_extra=True, **target_metacols)
-    return data, flagger
-
-
-@register(masking='none')
-def proc_fork(data, field, flagger, suffix=ORIGINAL_SUFFIX, **kwargs):
-    """
-    The function generates a copy of the data "field" and inserts it under the name field + suffix into the existing
-    data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to fork (copy).
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    suffix: str
-        Substring to append to the forked data variables name.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        data shape may have changed relatively to the flagger input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags shape may have changed relatively to the flagger input.
-    """
-    return proc_copy(data, field, flagger, newfield=str(field) + suffix, **kwargs)
-
-
-@register(masking='none')
-def proc_copy(data, field, flagger, newfield, **kwargs):
-    """
-    The function generates a copy of the data "field" and inserts it under the name field + suffix into the existing
-    data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to fork (copy).
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    suffix: str
-        Substring to append to the forked data variables name.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        data shape may have changed relatively to the flagger input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags shape may have changed relatively to the flagger input.
-    """
-
-    if newfield in flagger.flags.columns.union(data.columns):
-        raise ValueError(f"{field}: field already exist")
-
-    flags, extras = flagger.getFlags(field, full=True)
-    newflagger = flagger.replaceField(newfield, flags=flags, **extras)
-    newdata = data.copy()
-    newdata[newfield] = data[field].copy()
-    return newdata, newflagger
-
-
-@register(masking='none')
-def proc_drop(data, field, flagger, **kwargs):
-    """
-    The function drops field from the data dios and the flagger.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to drop.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        data shape may have changed relatively to the flagger input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags shape may have changed relatively to the flagger input.
-    """
-
-    data = data.copy()
-    del data[field]
-    flagger = flagger.replaceField(field, flags=None)
-    return data, flagger
-
-
-@register(masking='none')
-def proc_rename(data, field, flagger, new_name, **kwargs):
-    """
-    The function renames field to new name (in both, the flagger and the data).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to rename.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    new_name : str
-        String, field is to be replaced with.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-    """
-    # store
-    s = data[field]
-    f, e = flagger.getFlags(field, full=True)
-
-    # delete
-    data = data.copy()
-    del data[field]
-    flagger = flagger.replaceField(field, flags=None)
-
-    # insert
-    data[new_name] = s
-    flagger = flagger.replaceField(new_name, inplace=True, flags=f, **e)
-
-    return data, flagger
-
-
-def _drift_fit(x, shift_target, cal_mean):
-    x_index = x.index - x.index[0]
-    x_data = x_index.total_seconds().values
-    x_data = x_data / x_data[-1]
-    y_data = x.values
-    origin_mean = np.mean(y_data[:cal_mean])
-    target_mean = np.mean(y_data[-cal_mean:])
-
-    def modelWrapper(x, c, a=origin_mean, target_mean=target_mean):
-        # final fitted curves val = target mean
-        b = (target_mean - a) / (np.exp(c) - 1)
-        return expModelFunc(x, a, b, c)
-
-    dataFitFunc = functools.partial(modelWrapper, a=origin_mean, target_mean=target_mean)
-
-    try:
-        fitParas, _ = curve_fit(dataFitFunc, x_data, y_data, bounds=([0], [np.inf]))
-        dataFit = dataFitFunc(x_data, fitParas[0])
-        b_val = (shift_target - origin_mean) / (np.exp(fitParas[0]) - 1)
-        dataShiftFunc = functools.partial(expModelFunc, a=origin_mean, b=b_val, c=fitParas[0])
-        dataShift = dataShiftFunc(x_data)
-    except RuntimeError:
-        dataFit = np.array([0] * len(x_data))
-        dataShift = np.array([0] * len(x_data))
-
-    return dataFit, dataShift
-
-
-@register(masking='all')
-def proc_seefoExpDriftCorrecture(data, field, flagger, maint_data_field, cal_mean=5, flag_maint_period=False,
-                                 check_maint='1h', **kwargs):
-    """
-    The function fits an exponential model to chunks of data[field].
-    It is assumed, that between maintenance events, there is a drift effect shifting the meassurements in a way, that
-    can be described by the model M:
-
-    M(t, a, b, c) = a + b(exp(c*t))
-
-    Where as the values y_0 and y_1, describing the mean value directly after the last maintenance event (y_0) and
-    directly before the next maintenance event (y_1), impose the following additional conditions on the drift model:.
-
-    M(0, a, b, c) = y0
-    M(1, a, b, c) = y1
-
-    Solving the equation, one obtains the one-parameter Model:
-
-    M_drift(t, c) = y0 + [(y1 - y0)/(exp(c) - )] * (exp(c*t) - 1)
-
-    For every datachunk in between maintenance events.
-
-    After having found the optimal parameter c*, the correction is performed by bending the fitted curve M_drift(t, c*),
-    in a way that it matches y2 at t=1 (,with y2 being the mean value observed directly after the end of the next
-    maintenance event).
-    This bended curve is given by:
-
-    M_shift(t, c*) = M(t, y0, [(y1 - y0)/(exp(c*) - )], c*)
-
-    And the new values at t are computed via:
-
-    new_vals(t) = old_vals(t) + M_shift(t) - M_drift(t)
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to correct.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    maint_data_field : str
-        The fieldname of the datacolumn holding the maintenance information.
-        The maint data is to expected to have following form:
-        The series' timestamp itself represents the beginning of a
-        maintenance event, wheras the values represent the endings of the maintenance intervals.
-    cal_mean : int, default 5
-        The number of values the mean is computed over, for obtaining the value level directly after and
-        directly before maintenance event. This values are needed for shift calibration. (see above description)
-    flag_maint_period : bool, default False
-        Wheather or not to flag BAD the values directly obtained while maintenance.
-    check_maint : bool, default True
-        Wheather or not to check, if the reported maintenance intervals match are plausible
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-    """
-
-
-    # 1: extract fit intervals:
-    if data[maint_data_field].empty:
-        return data, flagger
-    data = data.copy()
-    to_correct = data[field]
-    maint_data = data[maint_data_field]
-    drift_frame = pd.DataFrame({"drift_group": np.nan, to_correct.name: to_correct.values}, index=to_correct.index)
-
-    # group the drift frame
-    for k in range(0, maint_data.shape[0] - 1):
-        # assign group numbers for the timespans in between one maintenance ending and the beginning of the next
-        # maintenance time itself remains np.nan assigned
-        drift_frame.loc[maint_data.values[k] : pd.Timestamp(maint_data.index[k + 1]), "drift_group"] = k
-    drift_grouper = drift_frame.groupby("drift_group")
-    # define target values for correction
-    shift_targets = drift_grouper.aggregate(lambda x: x[:cal_mean].mean()).shift(-1)
-
-    for k, group in drift_grouper:
-        dataSeries = group[to_correct.name]
-        dataFit, dataShiftTarget = _drift_fit(dataSeries, shift_targets.loc[k, :][0], cal_mean)
-        dataFit = pd.Series(dataFit, index=group.index)
-        dataShiftTarget = pd.Series(dataShiftTarget, index=group.index)
-        dataShiftVektor = dataShiftTarget - dataFit
-        shiftedData = dataSeries + dataShiftVektor
-        to_correct[shiftedData.index] = shiftedData
-
-    if flag_maint_period:
-        to_flag = drift_frame["drift_group"]
-        to_flag = to_flag.drop(to_flag[: maint_data.index[0]].index)
-        to_flag = to_flag[to_flag.isna()]
-        flagger = flagger.setFlags(field, loc=to_flag, **kwargs)
-
-    data[field] = to_correct
-
-    return data, flagger
-
-
-@register
-def proc_seefoLinearDriftCorrecture(data, field, flagger, x_field, y_field, **kwargs):
-    """
-    Train a linear model that predicts data[y_field] by x_1*(data[x_field]) + x_0. (Least squares fit)
-
-    Then correct the data[field] via:
-
-    data[field] = data[field]*x_1 + x_0
-
-    Note, that data[x_field] and data[y_field] must be of equal length.
-    (Also, you might want them to be sampled at same timestamps.)
-
-    Parameters
-    ----------
-    x_field : String
-        Field name of x - data.
-    y_field : String
-        Field name of y - data.
-
-    """
-    data = data.copy()
-    datcol = data[field]
-    reg = LinearRegression()
-    reg.fit(data[x_field].values.reshape(-1,1), data[y_field].values)
-    datcol = (datcol * reg.coef_[0]) + reg.intercept_
-    data[field] = datcol
-    return data, flagger
-
-
-@register(masking='all')
-def proc_correctRegimeAnomaly(data, field, flagger, cluster_field, model, regime_transmission=None, x_date=False):
-    """
-    Function fits the passed model to the different regimes in data[field] and tries to correct
-    those values, that have assigned a negative label by data[cluster_field].
-
-    Currently, the only correction mode supported is the "parameter propagation."
-
-    This means, any regime :math:`z`, labeled negatively and being modeled by the parameters p, gets corrected via:
-
-    :math:`z_{correct} = z + (m(p^*) - m(p))`,
-
-    where :math:`p^*` denotes the parameter set belonging to the fit of the nearest not-negatively labeled cluster.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to correct.
-    flagger : saqc.flagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    clusterfield : str
-        A string denoting the field in data, holding the cluster label for the data you want to correct.
-    model : Callable
-        The model function to be fitted to the regimes.
-        It must be a function of the form :math:`f(x, *p)`, where :math:`x` is the ``numpy.array`` holding the
-        independent variables and :math:`p` are the model parameters that are to be obtained by fitting.
-        Depending on the `x_date` parameter, independent variable x will either be the timestamps
-        of every regime transformed to seconds from epoch, or it will be just seconds, counting the regimes length.
-    regime_transmission : {None, str}, default None:
-        If an offset string is passed, a data chunk of length `regime_transimission` right at the
-        start and right at the end is ignored when fitting the model. This is to account for the
-        unreliability of data near the changepoints of regimes.
-    x_date : bool, default False
-        If True, use "seconds from epoch" as x input to the model func, instead of "seconds from regime start".
-
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger
-        The flagger object, holding flags and additional Informations related to `data`.
-    """
-
-    cluster_ser = data[cluster_field]
-    unique_successive = pd.unique(cluster_ser.values)
-    data_ser = data[field]
-    regimes = data_ser.groupby(cluster_ser)
-    para_dict = {}
-    x_dict = {}
-    x_mask = {}
-    if regime_transmission is not None:
-        # get seconds
-        regime_transmission = pd.Timedelta(regime_transmission).total_seconds()
-    for label, regime in regimes:
-        if x_date is False:
-            # get seconds data:
-            xdata = (regime.index - regime.index[0]).to_numpy(dtype=float)*10**(-9)
-        else:
-            # get seconds from epoch data
-            xdata = regime.index.to_numpy(dtype=float)*10**(-9)
-        ydata = regime.values
-        valid_mask = ~np.isnan(ydata)
-        if regime_transmission is not None:
-            valid_mask &= (xdata > xdata[0] + regime_transmission)
-            valid_mask &= (xdata < xdata[-1] - regime_transmission)
-        try:
-            p, pcov = curve_fit(model, xdata[valid_mask], ydata[valid_mask])
-        except (RuntimeError, ValueError):
-            p = np.array([np.nan])
-        para_dict[label] = p
-        x_dict[label] = xdata
-        x_mask[label] = valid_mask
-
-    first_normal = unique_successive > 0
-    first_valid = np.array([~pd.isna(para_dict[unique_successive[i]]).any() for i in range(0, unique_successive.shape[0])])
-    first_valid = np.where(first_normal & first_valid)[0][0]
-    last_valid = 1
-
-    for k in range(0, unique_successive.shape[0]):
-        if unique_successive[k] < 0 & (not pd.isna(para_dict[unique_successive[k]]).any()):
-            ydata = data_ser[regimes.groups[unique_successive[k]]].values
-            xdata = x_dict[unique_successive[k]]
-            ypara = para_dict[unique_successive[k]]
-            if k > 0:
-                target_para = para_dict[unique_successive[k-last_valid]]
-            else:
-                # first regime has no "last valid" to its left, so we use first valid to the right:
-                target_para = para_dict[unique_successive[k + first_valid]]
-            y_shifted = ydata + (model(xdata, *target_para) - model(xdata, *ypara))
-            data_ser[regimes.groups[unique_successive[k]]] = y_shifted
-            if k > 0:
-                last_valid += 1
-        elif pd.isna(para_dict[unique_successive[k]]).any() & (k > 0):
-            last_valid += 1
-        else:
-            last_valid = 1
-
-    data[field] = data_ser
-    return data, flagger
-
-
-@register(masking='all')
-def proc_offsetCorrecture(data, field, flagger, max_mean_jump, normal_spread, search_winsz, min_periods,
-                          regime_transmission=None):
-    """
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to correct.
-    flagger : saqc.flagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    max_mean_jump : float
-        when searching for changepoints in mean - this is the threshold a mean difference in the
-        sliding window search must exceed to trigger changepoint detection.
-    normal_spread : float
-        threshold denoting the maximum, regimes are allowed to abolutely differ in their means
-        to form the "normal group" of values.
-    search_winsz : str
-        Size of the adjacent windows that are used to search for the mean changepoints.
-    min_periods : int
-        Minimum number of periods a search window has to contain, for the result of the changepoint
-        detection to be considered valid.
-    regime_transmission : {None, str}, default None:
-        If an offset string is passed, a data chunk of length `regime_transimission` right from the
-        start and right before the end of any regime is ignored when calculating a regimes mean for data correcture.
-        This is to account for the unrelyability of data near the changepoints of regimes.
-
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flagger : saqc.flagger
-        The flagger object, holding flags and additional Informations related to `data`.
-
-    """
-
-    data, flagger = proc_fork(data, field, flagger, '_CPcluster')
-    data, flagger = modelling_changePointCluster(data, field + '_CPcluster', flagger,
-                                                 lambda x, y: np.abs(np.mean(x) - np.mean(y)),
-                                                 lambda x, y: max_mean_jump,
-                                                 bwd_window=search_winsz,
-                                                 min_periods_bwd=min_periods)
-    data, flagger = breaks_flagRegimeAnomaly(data, field, flagger, field + '_CPcluster', normal_spread, set_flags=False)
-    data, flagger = proc_correctRegimeAnomaly(data, field, flagger, field + '_CPcluster',
-                                              lambda x, p1: np.array([p1] * x.shape[0]),
-                                              regime_transmission=regime_transmission)
-    data, flagger = proc_drop(data, field + '_CPcluster', flagger)
-
-    return data, flagger
diff --git a/saqc/funcs/resampling.py b/saqc/funcs/resampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..952ad0556cc3c17022a76ba59af97866b8793937
--- /dev/null
+++ b/saqc/funcs/resampling.py
@@ -0,0 +1,705 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+import dios
+
+import numpy as np
+import logging
+
+import pandas as pd
+
+from saqc.core.register import register
+from saqc.funcs.tools import copy, drop, rename
+from saqc.funcs.interpolation import interpolateIndex
+from saqc.lib.tools import dropper, evalFreqStr
+from saqc.lib.ts_operators import shift2Freq, aggregate2Freq
+
+logger = logging.getLogger("SaQC")
+
+
+METHOD2ARGS = {
+    "inverse_fshift": ("backward", pd.Timedelta),
+    "inverse_bshift": ("forward", pd.Timedelta),
+    "inverse_nshift": ("nearest", lambda x: pd.Timedelta(x) / 2),
+    "inverse_fagg": ("bfill", pd.Timedelta),
+    "inverse_bagg": ("ffill", pd.Timedelta),
+    "inverse_nagg": ("nearest", lambda x: pd.Timedelta(x) / 2),
+    "match": (None, lambda x: "0min"),
+}
+
+
+@register(masking='none')
+def aggregate(
+        data, field, flagger, freq, value_func, flag_func=np.nanmax, method="nagg", to_drop=None, **kwargs
+):
+    """
+    A method to "regularize" data by aggregating (resampling) data at a regular timestamp.
+
+    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
+
+    The data will therefor get aggregated with a function, specified by the `value_func` parameter and
+    the result gets projected onto the new timestamps with a method, specified by "method".
+
+    The following method (keywords) are available:
+
+    * ``'nagg'``: (aggreagtion to nearest) - all values in the range (+/- freq/2) of a grid point get aggregated with
+      `agg_func`. and assigned to it. Flags get aggregated by `flag_func` and assigned the same way.
+    * ``'bagg'``: (backwards aggregation) - all values in a sampling interval get aggregated with agg_func and the
+      result gets assigned to the last regular timestamp. Flags get aggregated by `flag_func` and assigned the same way.
+    * ``'fagg'``: (forward aggregation) - all values in a sampling interval get aggregated with agg_func and the result
+      gets assigned to the next regular timestamp. Flags get aggregated by `flag_func` and assigned the same way.
+
+    Note, that, if there is no valid data (exisitng and not-na) available in a sampling interval assigned to a regular
+    timestamp by the selected method, nan gets assigned to this timestamp. The associated flag will be of value
+    ``flagger.UNFLAGGED``.
+
+    Note: the method will likely and significantly alter values and shape of ``data[field]``. The original data is kept
+    in the data dios and assigned to the fieldname ``field + '_original'``.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-regularized.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.freq
+    freq : str
+        The sampling frequency the data is to be aggregated (resampled) at.
+    value_func : Callable
+        The function you want to use for aggregation.
+    flag_func : Callable
+        The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
+        (usually ordered categorical).
+    method : {'fagg', 'bagg', 'nagg'}, default 'nagg'
+        Specifies which intervals to be aggregated for a certain timestamp. (preceeding, succeeding or
+        "surrounding" interval). See description above for more details.
+    to_drop : {List[str], str}, default None
+        Flagtypes you want to drop before aggregation - effectively excluding values that are flagged
+        with a flag in to_drop from the aggregation process. Default results in flagger.BAD
+        values being dropped initially.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    data, flagger = copy(data, field, flagger, field + '_original')
+    data, flagger = resample(
+        data,
+        field,
+        flagger,
+        freq,
+        agg_func=value_func,
+        flag_agg_func=flag_func,
+        method=method,
+        empty_intervals_flag=flagger.UNFLAGGED,
+        to_drop=to_drop,
+        all_na_2_empty=True,
+        **kwargs,
+    )
+    return data, flagger
+
+
+@register(masking='none')
+def linear(data, field, flagger, freq, to_drop=None, **kwargs):
+    """
+    A method to "regularize" data by interpolating linearly the data at regular timestamp.
+
+    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
+
+    Interpolated values will get assigned the worst flag within freq-range.
+
+    Note: the method will likely and significantly alter values and shape of ``data[field]``. The original data is kept
+    in the data dios and assigned to the fieldname ``field + '_original'``.
+
+    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
+    not-na) datapoint preceeding them and one succeeding them within freq range.
+    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
+    ``flagger.UNFLAGGED``.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-regularized.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.freq
+    freq : str
+        An offset string. The frequency of the grid you want to interpolate your data at.
+    to_drop : {List[str], str}, default None
+        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
+        with a flag in to_drop from the interpolation process. Default results in flagger.BAD
+        values being dropped initially.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    data, flagger = copy(data, field, flagger, field + '_original')
+    data, flagger = interpolateIndex(
+        data, field, flagger, freq, "time", to_drop=to_drop, empty_intervals_flag=flagger.UNFLAGGED, **kwargs
+    )
+    return data, flagger
+
+
+@register(masking='none')
+def interpolate(data, field, flagger, freq, method, order=1, to_drop=None, **kwargs, ):
+    """
+    A method to "regularize" data by interpolating the data at regular timestamp.
+
+    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
+
+    Interpolated values will get assigned the worst flag within freq-range.
+
+    There are available all the interpolations from the pandas.Series.interpolate method and they are called by
+    the very same keywords.
+
+    Note, that, to perform a timestamp aware, linear interpolation, you have to pass ``'time'`` as `method`,
+    and NOT ``'linear'``.
+
+    Note: the `method` will likely and significantly alter values and shape of ``data[field]``. The original data is
+    kept in the data dios and assigned to the fieldname ``field + '_original'``.
+
+    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
+    not-na) datapoint preceeding them and one succeeding them within freq range.
+    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
+    ``flagger.UNFLAGGED``.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-regularized.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.freq
+    freq : str
+        An offset string. The frequency of the grid you want to interpolate your data at.
+    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
+        The interpolation method you want to apply.
+    order : int, default 1
+        If your selected interpolation method can be performed at different *orders* - here you pass the desired
+        order.
+    to_drop : {List[str], str}, default None
+        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
+        with a flag in `to_drop` from the interpolation process. Default results in ``flagger.BAD``
+        values being dropped initially.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    data, flagger = copy(data, field, flagger, field + '_original')
+    data, flagger = interpolateIndex(
+        data,
+        field,
+        flagger,
+        freq,
+        method=method,
+        inter_order=order,
+        to_drop=to_drop,
+        empty_intervals_flag=flagger.UNFLAGGED,
+        **kwargs,
+    )
+    return data, flagger
+
+
+@register(masking='none')
+def mapToOriginal(data, field, flagger, method, to_drop=None, **kwargs):
+    """
+    The Function function "undoes" regularization, by regaining the original data and projecting the
+    flags calculated for the regularized data onto the original ones.
+
+    Afterwards the regularized data is removed from the data dios and ``'field'`` will be associated
+    with the original data "again".
+
+    Wherever the flags in the original data are "better" then the regularized flags projected on them,
+    they get overridden with this regularized flags value.
+
+    Which regularized flags are to be projected on which original flags, is controlled by the "method" parameters.
+
+    Generally, if you regularized with the method "X", you should pass the method "inverse_X" to the deharmonization.
+    If you regularized with an interpolation, the method "inverse_interpolation" would be the appropriate choice.
+    Also you should pass the same drop flags keyword.
+
+    The deharm methods in detail:
+    ("original_flags" are associated with the original data that is to be regained,
+    "regularized_flags" are associated with the regularized data that is to be "deharmonized",
+    "freq" refers to the regularized datas sampling frequencie)
+
+    * ``'inverse_nagg'``: all original_flags within the range *+/- freq/2* of a regularized_flag, get assigned this
+      regularized flags value. (if regularized_flags > original_flag)
+    * ``'inverse_bagg'``: all original_flags succeeding a regularized_flag within the range of "freq", get assigned this
+      regularized flags value. (if regularized_flag > original_flag)
+    * ``'inverse_fagg'``: all original_flags preceeding a regularized_flag within the range of "freq", get assigned this
+      regularized flags value. (if regularized_flag > original_flag)
+
+    * ``'inverse_interpolation'``: all original_flags within the range *+/- freq* of a regularized_flag, get assigned this
+      regularized flags value (if regularized_flag > original_flag).
+
+    * ``'inverse_nshift'``: That original_flag within the range +/- *freq/2*, that is nearest to a regularized_flag,
+      gets the regularized flags value. (if regularized_flag > original_flag)
+    * ``'inverse_bshift'``: That original_flag succeeding a source flag within the range freq, that is nearest to a
+      regularized_flag, gets assigned this regularized flags value. (if regularized_flag > original_flag)
+    * ``'inverse_nshift'``: That original_flag preceeding a regularized flag within the range freq, that is nearest to a
+      regularized_flag, gets assigned this regularized flags value. (if source_flag > original_flag)
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-deharmonized.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.freq
+    method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift',
+            'inverse_interpolation'}
+        The method used for projection of regularized flags onto original flags. See description above for more
+        details.
+    to_drop : {List[str], str}, default None
+        Flagtypes you want to drop before interpolation - effectively excluding values that are flagged
+        with a flag in to_drop from the interpolation process. Default results in flagger.BAD
+        values being dropped initially.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    newfield = str(field) + '_original'
+    data, flagger = reindexFlags(data, newfield, flagger, method, source=field, to_drop=to_drop, **kwargs)
+    data, flagger = drop(data, field, flagger)
+    data, flagger = rename(data, newfield, flagger, field)
+    return data, flagger
+
+
+@register(masking='none')
+def shift(data, field, flagger, freq, method='nshift', to_drop=None, empty_intervals_flag=None, freq_check=None,
+          **kwargs):
+
+    data, flagger = copy(data, field, flagger, field + '_original')
+    data, flagger = _shift(data, field, flagger, freq, method=method, to_drop=to_drop,
+                          empty_intervals_flag=empty_intervals_flag, freq_check=freq_check, **kwargs)
+    return data, flagger
+
+
+@register(masking='none')
+def _shift(data, field, flagger, freq, method='nshift', to_drop=None, empty_intervals_flag=None, freq_check=None,
+          **kwargs):
+    """
+    Function to shift data points to regular (equidistant) timestamps.
+    Values get shifted according to the keyword passed to the `method` parameter.
+
+    * ``'nshift'``: every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
+    * ``'bshift'``:  every grid point gets assigned its first succeeding value - if there is one available in the
+      succeeding sampling interval.
+    * ``'fshift'``:  every grid point gets assigned its ultimately preceeding value - if there is one available in
+      the preceeding sampling interval.
+
+    Note: all data nans get excluded defaultly from shifting. If `to_drop` is ``None``, - all *BAD* flagged values get
+    excluded as well.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-shifted.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    freq : str
+        An frequency Offset String that will be interpreted as the sampling rate you want the data to be shifted to.
+    method: {'fagg', 'bagg', 'nagg'}, default 'nshift'
+        Specifies if datapoints get propagated forwards, backwards or to the nearest grid timestamp. See function
+        description for more details.
+    empty_intervals_flag : {None, str}, default None
+        A Flag, that you want to assign to grid points, where no values are avaible to be shifted to.
+        Default triggers flagger.UNFLAGGED to be assigned.
+    to_drop : {None, str, List[str]}, default None
+        Flags that refer to values you want to drop before shifting - effectively, excluding values that are flagged
+        with a flag in to_drop from the shifting process. Default - to_drop = None  - results in flagger.BAD
+        values being dropped initially.
+    freq_check : {None, 'check', 'auto'}, default None
+
+        * ``None``: do not validate frequency-string passed to `freq`
+        * ``'check'``: estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
+          or if no uniform sampling rate could be estimated
+        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+    data = data.copy()
+    datcol = data[field]
+    flagscol = flagger.getFlags(field)
+
+    if empty_intervals_flag is None:
+        empty_intervals_flag = flagger.UNFLAGGED
+
+    drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
+    drop_mask |= datcol.isna()
+    datcol[drop_mask] = np.nan
+    datcol.dropna(inplace=True)
+    freq = evalFreqStr(freq, freq_check, datcol.index)
+    if datcol.empty:
+        data[field] = datcol
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+        return data, flagger
+
+    flagscol.drop(drop_mask[drop_mask].index, inplace=True)
+
+    datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
+    flagscol = shift2Freq(flagscol, method, freq, fill_value=empty_intervals_flag)
+    data[field] = datcol
+    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+    return data, flagger
+
+
+@register(masking='field')
+def resample(
+    data,
+    field,
+    flagger,
+    freq,
+    agg_func=np.mean,
+    method="bagg",
+    max_invalid_total_d=np.inf,
+    max_invalid_consec_d=np.inf,
+    max_invalid_consec_f=np.inf,
+    max_invalid_total_f=np.inf,
+    flag_agg_func=max,
+    empty_intervals_flag=None,
+    to_drop=None,
+    all_na_2_empty=False,
+    freq_check=None,
+    **kwargs
+):
+    """
+    Function to resample the data. Afterwards the data will be sampled at regular (equidistant) timestamps
+    (or Grid points). Sampling intervals therefor get aggregated with a function, specifyed by 'agg_func' parameter and
+    the result gets projected onto the new timestamps with a method, specified by "method". The following method
+    (keywords) are available:
+
+    * ``'nagg'``: all values in the range (+/- `freq`/2) of a grid point get aggregated with agg_func and assigned to it.
+    * ``'bagg'``: all values in a sampling interval get aggregated with agg_func and the result gets assigned to the last
+      grid point.
+    * ``'fagg'``: all values in a sampling interval get aggregated with agg_func and the result gets assigned to the next
+      grid point.
+
+
+    Note, that. if possible, functions passed to agg_func will get projected internally onto pandas.resample methods,
+    wich results in some reasonable performance boost - however, for this to work, you should pass functions that have
+    the __name__ attribute initialised and the according methods name assigned to it.
+    Furthermore, you shouldnt pass numpys nan-functions
+    (``nansum``, ``nanmean``,...) because those for example, have ``__name__ == 'nansum'`` and they will thus not
+    trigger ``resample.func()``, but the slower ``resample.apply(nanfunc)``. Also, internally, no nans get passed to
+    the functions anyway, so that there is no point in passing the nan functions.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-resampled.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    freq : str
+        An Offset String, that will be interpreted as the frequency you want to resample your data with.
+    agg_func : Callable
+        The function you want to use for aggregation.
+    method: {'fagg', 'bagg', 'nagg'}, default 'bagg'
+        Specifies which intervals to be aggregated for a certain timestamp. (preceding, succeeding or
+        "surrounding" interval). See description above for more details.
+    max_invalid_total_d : {np.inf, int}, np.inf
+        Maximum number of invalid (nan) datapoints, allowed per resampling interval. If max_invalid_total_d is
+        exceeded, the interval gets resampled to nan. By default (``np.inf``), there is no bound to the number of nan
+        values in an interval and only intervals containing ONLY nan values or those, containing no values at all,
+        get projected onto nan
+    max_invalid_consec_d : {np.inf, int}, default np.inf
+        Maximum number of consecutive invalid (nan) data points, allowed per resampling interval.
+        If max_invalid_consec_d is exceeded, the interval gets resampled to nan. By default (np.inf),
+        there is no bound to the number of consecutive nan values in an interval and only intervals
+        containing ONLY nan values, or those containing no values at all, get projected onto nan.
+    max_invalid_total_f : {np.inf, int}, default np.inf
+        Same as `max_invalid_total_d`, only applying for the flags. The flag regarded as "invalid" value,
+        is the one passed to empty_intervals_flag (default=``flagger.BAD``).
+        Also this is the flag assigned to invalid/empty intervals.
+    max_invalid_total_f : {np.inf, int}, default np.inf
+        Same as `max_invalid_total_f`, only applying onto flags. The flag regarded as "invalid" value, is the one passed
+        to empty_intervals_flag (default=flagger.BAD). Also this is the flag assigned to invalid/empty intervals.
+    flag_agg_func : Callable, default: max
+        The function you want to aggregate the flags with. It should be capable of operating on the flags dtype
+        (usually ordered categorical).
+    empty_intervals_flag : {None, str}, default None
+        A Flag, that you want to assign to invalid intervals. Invalid are those intervals, that contain nan values only,
+        or no values at all. Furthermore the empty_intervals_flag is the flag, serving as "invalid" identifyer when
+        checking for `max_total_invalid_f` and `max_consec_invalid_f patterns`. Default triggers ``flagger.BAD`` to be
+        assigned.
+    to_drop : {None, str, List[str]}, default None
+        Flags that refer to values you want to drop before resampling - effectively excluding values that are flagged
+        with a flag in to_drop from the resampling process - this means that they also will not be counted in the
+        the `max_consec`/`max_total evaluation`. `to_drop` = ``None`` results in NO flags being dropped initially.
+    freq_check : {None, 'check', 'auto'}, default None
+
+        * ``None``: do not validate frequency-string passed to `freq`
+        * ``'check'``: estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
+          if no uniform sampling rate could be estimated
+        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values and shape may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+
+    data = data.copy()
+    datcol = data[field]
+    flagscol = flagger.getFlags(field)
+    if empty_intervals_flag is None:
+        empty_intervals_flag = flagger.BAD
+
+    drop_mask = dropper(field, to_drop, flagger, [])
+    datcol.drop(datcol[drop_mask].index, inplace=True)
+    freq = evalFreqStr(freq, freq_check, datcol.index)
+    flagscol.drop(flagscol[drop_mask].index, inplace=True)
+    if all_na_2_empty:
+        if datcol.dropna().empty:
+            datcol = pd.Series([], index=pd.DatetimeIndex([]), name=field)
+
+    if datcol.empty:
+        # for consistency reasons - return empty data/flags column when there is no valid data left
+        # after filtering.
+        data[field] = datcol
+        reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+        flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+        return data, flagger
+
+    datcol = aggregate2Freq(
+        datcol,
+        method,
+        freq,
+        agg_func,
+        fill_value=np.nan,
+        max_invalid_total=max_invalid_total_d,
+        max_invalid_consec=max_invalid_consec_d,
+    )
+    flagscol = aggregate2Freq(
+        flagscol,
+        method,
+        freq,
+        flag_agg_func,
+        fill_value=empty_intervals_flag,
+        max_invalid_total=max_invalid_total_f,
+        max_invalid_consec=max_invalid_consec_f,
+    )
+
+    # data/flags reshaping:
+    data[field] = datcol
+    reshaped_flagger = flagger.initFlags(datcol).setFlags(field, flag=flagscol, force=True, inplace=True, **kwargs)
+    flagger = flagger.slice(drop=field).merge(reshaped_flagger, subset=[field], inplace=True)
+    return data, flagger
+
+
+@register(masking='field')
+def reindexFlags(data, field, flagger, method, source, freq=None, to_drop=None, freq_check=None, **kwargs):
+
+    """
+    The Function projects flags of "source" onto flags of "field". Wherever the "field" flags are "better" then the
+    source flags projected on them, they get overridden with this associated source flag value.
+
+    Which "field"-flags are to be projected on which source flags, is controlled by the "method" and "freq"
+    parameters.
+
+    method: (field_flag in associated with "field", source_flags associated with "source")
+
+    'inverse_nagg' - all field_flags within the range +/- freq/2 of a source_flag, get assigned this source flags value.
+        (if source_flag > field_flag)
+    'inverse_bagg' - all field_flags succeeding a source_flag within the range of "freq", get assigned this source flags
+        value. (if source_flag > field_flag)
+    'inverse_fagg' - all field_flags preceeding a source_flag within the range of "freq", get assigned this source flags
+        value. (if source_flag > field_flag)
+
+    'inverse_interpolation' - all field_flags within the range +/- freq of a source_flag, get assigned this source flags value.
+        (if source_flag > field_flag)
+
+    'inverse_nshift' - That field_flag within the range +/- freq/2, that is nearest to a source_flag, gets the source
+        flags value. (if source_flag > field_flag)
+    'inverse_bshift' - That field_flag succeeding a source flag within the range freq, that is nearest to a
+        source_flag, gets assigned this source flags value. (if source_flag > field_flag)
+    'inverse_nshift' - That field_flag preceeding a source flag within the range freq, that is nearest to a
+        source_flag, gets assigned this source flags value. (if source_flag > field_flag)
+
+    'match' - any field_flag with a timestamp matching a source_flags timestamp gets this source_flags value
+    (if source_flag > field_flag)
+
+    Note, to undo or backtrack a resampling/shifting/interpolation that has been performed with a certain method,
+    you can just pass the associated "inverse" method. Also you should pass the same drop flags keyword.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to project the source-flags onto.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift'}
+        The method used for projection of source flags onto field flags. See description above for more details.
+    source : str
+        The source source of flags projection.
+    freq : {None, str},default None
+        The freq determines the projection range for the projection method. See above description for more details.
+        Defaultly (None), the sampling frequency of source is used.
+    to_drop : {None, str, List[str]}, default None
+        Flags referring to values that are to drop before flags projection. Relevant only when projecting with an
+        inverted shift method. Defaultly flagger.BAD is listed.
+    freq_check : {None, 'check', 'auto'}, default None
+        - None: do not validate frequency-string passed to `freq`
+        - 'check': estimate frequency and log a warning if estimate miss matchs frequency string passed to 'freq', or
+            if no uniform sampling rate could be estimated
+        - 'auto': estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values and shape may have changed relatively to the flagger input.
+    """
+    flagscol, metacols = flagger.getFlags(source, full=True)
+    if flagscol.empty:
+        return data, flagger
+    target_datcol = data[field]
+    target_flagscol, target_metacols = flagger.getFlags(field, full=True)
+
+    if (freq is None) and (method != "match"):
+        freq_check = 'auto'
+
+    freq = evalFreqStr(freq, freq_check, flagscol.index)
+
+    if method[-13:] == "interpolation":
+        backprojected = flagscol.reindex(target_flagscol.index, method="bfill", tolerance=freq)
+        fwrdprojected = flagscol.reindex(target_flagscol.index, method="ffill", tolerance=freq)
+        b_replacement_mask = (backprojected > target_flagscol) & (backprojected >= fwrdprojected)
+        f_replacement_mask = (fwrdprojected > target_flagscol) & (fwrdprojected > backprojected)
+        target_flagscol.loc[b_replacement_mask] = backprojected.loc[b_replacement_mask]
+        target_flagscol.loc[f_replacement_mask] = fwrdprojected.loc[f_replacement_mask]
+
+        backprojected_meta = {}
+        fwrdprojected_meta = {}
+        for meta_key in target_metacols.keys():
+            backprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='bfill',
+                                                                      tolerance=freq)
+            fwrdprojected_meta[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method='ffill',
+                                                                      tolerance=freq)
+            target_metacols[meta_key].loc[b_replacement_mask] = backprojected_meta[meta_key].loc[b_replacement_mask]
+            target_metacols[meta_key].loc[f_replacement_mask] = fwrdprojected_meta[meta_key].loc[f_replacement_mask]
+
+    if method[-3:] == "agg" or method == "match":
+        # Aggregation - Inversion
+        projection_method = METHOD2ARGS[method][0]
+        tolerance = METHOD2ARGS[method][1](freq)
+        flagscol = flagscol.reindex(target_flagscol.index, method=projection_method, tolerance=tolerance)
+        replacement_mask = flagscol > target_flagscol
+        target_flagscol.loc[replacement_mask] = flagscol.loc[replacement_mask]
+        for meta_key in target_metacols.keys():
+            metacols[meta_key] = metacols[meta_key].reindex(target_metacols[meta_key].index, method=projection_method,
+                                                            tolerance=tolerance)
+            target_metacols[meta_key].loc[replacement_mask] = metacols[meta_key].loc[replacement_mask]
+
+    if method[-5:] == "shift":
+        # NOTE: although inverting a simple shift seems to be a less complex operation, it has quite some
+        # code assigned to it and appears to be more verbose than inverting aggregation -
+        # that owes itself to the problem of BAD/invalid values blocking a proper
+        # shift inversion and having to be outsorted before shift inversion and re-inserted afterwards.
+        #
+        # starting with the dropping and its memorization:
+
+        drop_mask = dropper(field, to_drop, flagger, flagger.BAD)
+        drop_mask |= target_datcol.isna()
+        target_flagscol_drops = target_flagscol[drop_mask]
+        target_flagscol.drop(drop_mask[drop_mask].index, inplace=True)
+
+        # shift inversion
+        projection_method = METHOD2ARGS[method][0]
+        tolerance = METHOD2ARGS[method][1](freq)
+        flags_merged = pd.merge_asof(
+            flagscol,
+            pd.Series(target_flagscol.index.values, index=target_flagscol.index, name="pre_index"),
+            left_index=True,
+            right_index=True,
+            tolerance=tolerance,
+            direction=projection_method,
+        )
+        flags_merged.dropna(subset=["pre_index"], inplace=True)
+        flags_merged = flags_merged.set_index(["pre_index"]).squeeze()
+
+        # write flags to target
+        replacement_mask = flags_merged > target_flagscol.loc[flags_merged.index]
+        target_flagscol.loc[replacement_mask[replacement_mask].index] = flags_merged.loc[replacement_mask]
+
+        # reinsert drops
+        target_flagscol = target_flagscol.reindex(target_flagscol.index.join(target_flagscol_drops.index, how="outer"))
+        target_flagscol.loc[target_flagscol_drops.index] = target_flagscol_drops.values
+
+        for meta_key in target_metacols.keys():
+            target_metadrops = target_metacols[meta_key][drop_mask]
+            target_metacols[meta_key].drop(drop_mask[drop_mask].index, inplace=True)
+            meta_merged = pd.merge_asof(
+                metacols[meta_key],
+                pd.Series(target_metacols[meta_key].index.values, index=target_metacols[meta_key].index,
+                          name="pre_index"),
+                left_index=True,
+                right_index=True,
+                tolerance=tolerance,
+                direction=projection_method,
+            )
+            meta_merged.dropna(subset=["pre_index"], inplace=True)
+            meta_merged = meta_merged.set_index(["pre_index"]).squeeze()
+            # reinsert drops
+            target_metacols[meta_key][replacement_mask[replacement_mask].index] = meta_merged[replacement_mask]
+            target_metacols[meta_key] = target_metacols[meta_key].reindex(
+                target_metacols[meta_key].index.join(target_metadrops.index, how="outer"))
+            target_metacols[meta_key].loc[target_metadrops.index] = target_metadrops.values
+
+    flagger = flagger.setFlags(field, flag=target_flagscol, with_extra=True, **target_metacols)
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/residues.py b/saqc/funcs/residues.py
new file mode 100644
index 0000000000000000000000000000000000000000..864c5c9554b6122f938a54e58305bd766f6f7c00
--- /dev/null
+++ b/saqc/funcs/residues.py
@@ -0,0 +1,99 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import numpy as np
+
+from saqc.core.register import register
+from saqc.funcs.rolling import roll
+from saqc.funcs.curvefit import fitPolynomial
+
+
+@register(masking='field')
+def calculatePolynomialResidues(data, field, flagger, winsz, polydeg, numba="auto", eval_flags=True, min_periods=0,
+                                **kwargs):
+    """
+    Function fits a polynomial model to the data and returns the residues.
+
+    The residue for value x is calculated by fitting a polynomial of degree "polydeg" to a data slice
+    of size "winsz", wich has x at its center.
+
+    Note, that the residues will be stored to the `field` field of the input data, so that the original data, the
+    polynomial is fitted to, gets overridden.
+
+    Note, that, if data[field] is not alligned to an equidistant frequency grid, the window size passed,
+    has to be an offset string. Also numba boost options don`t apply for irregularly sampled
+    timeseries.
+
+    Note, that calculating the residues tends to be quite costy, because a function fitting is perfomed for every
+    sample. To improve performance, consider the following possibillities:
+
+    In case your data is sampled at an equidistant frequency grid:
+
+    (1) If you know your data to have no significant number of missing values, or if you do not want to
+        calculate residues for windows containing missing values any way, performance can be increased by setting
+        min_periods=winsz.
+
+    (2) If your data consists of more then around 200000 samples, setting numba=True, will boost the
+        calculations up to a factor of 5 (for samplesize > 300000) - however for lower sample sizes,
+        numba will slow down the calculations, also, up to a factor of 5, for sample_size < 50000.
+        By default (numba='auto'), numba is set to true, if the data sample size exceeds 200000.
+
+    in case your data is not sampled at an equidistant frequency grid:
+
+    (1) Harmonization/resampling of your data will have a noticable impact on polyfittings performance - since
+        numba_boost doesnt apply for irregularly sampled data in the current implementation.
+
+    Note, that in the current implementation, the initial and final winsz/2 values do not get fitted.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-modelled.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    winsz : {str, int}
+        The size of the window you want to use for fitting. If an integer is passed, the size
+        refers to the number of periods for every fitting window. If an offset string is passed,
+        the size refers to the total temporal extension. The window will be centered around the vaule-to-be-fitted.
+        For regularly sampled timeseries the period number will be casted down to an odd number if
+        even.
+    polydeg : int
+        The degree of the polynomial used for fitting
+    numba : {True, False, "auto"}, default "auto"
+        Wheather or not to apply numbas just-in-time compilation onto the poly fit function. This will noticably
+        increase the speed of calculation, if the sample size is sufficiently high.
+        If "auto" is selected, numba compatible fit functions get applied for data consisiting of > 200000 samples.
+    eval_flags : bool, default True
+        Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
+        flag present in the interval, the data for its calculation was obtained from.
+    min_periods : {int, np.nan}, default 0
+        The minimum number of periods, that has to be available in every values fitting surrounding for the polynomial
+        fit to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
+        regardless of the number of values present (results in overfitting for too sparse intervals). To automatically
+        set the minimum number of periods to the number of values in an offset defined window size, pass np.nan.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+
+    """
+    data, flagger = fitPolynomial(data, field, flagger, winsz, polydeg, numba=numba, eval_flags=eval_flags,
+                                  min_periods=min_periods, _return_residues=True, **kwargs)
+
+    return data, flagger
+
+
+@register(masking='field')
+def calculateRollingResidues(data, field, flagger, winsz, func=np.mean, eval_flags=True, min_periods=0, center=True,
+                             **kwargs):
+
+    data, flagger = roll(data, field, flagger, winsz, func=func, eval_flags=eval_flags,
+                         min_periods=min_periods, center=center, _return_residues=True, **kwargs)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/rolling.py b/saqc/funcs/rolling.py
new file mode 100644
index 0000000000000000000000000000000000000000..47966bd33141876c6c677f4ec346a6d5e07241ca
--- /dev/null
+++ b/saqc/funcs/rolling.py
@@ -0,0 +1,117 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import numpy as np
+import pandas as pd
+
+from saqc.core.register import register
+
+
+@register(masking='field')
+def roll(data, field, flagger, winsz, func=np.mean, eval_flags=True, min_periods=0, center=True,
+         _return_residues=False, **kwargs):
+    """
+        Models the data with the rolling mean and returns the residues.
+
+        Note, that the residues will be stored to the `field` field of the input data, so that the data that is modelled
+        gets overridden.
+
+        Parameters
+        ----------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        field : str
+            The fieldname of the column, holding the data-to-be-modelled.
+        flagger : saqc.flagger.BaseFlagger
+            A flagger object, holding flags and additional Informations related to `data`.
+        winsz : {int, str}
+            The size of the window you want to roll with. If an integer is passed, the size
+            refers to the number of periods for every fitting window. If an offset string is passed,
+            the size refers to the total temporal extension.
+            For regularly sampled timeseries, the period number will be casted down to an odd number if
+            center = True.
+        func : Callable[np.array, float], default np.mean
+            Function to apply on the rolling window and obtain the curve fit value.
+        eval_flags : bool, default True
+            Wheather or not to assign new flags to the calculated residuals. If True, a residual gets assigned the worst
+            flag present in the interval, the data for its calculation was obtained from.
+            Currently not implemented in combination with not-harmonized timeseries.
+        min_periods : int, default 0
+            The minimum number of periods, that has to be available in every values fitting surrounding for the mean
+            fitting to be performed. If there are not enough values, np.nan gets assigned. Default (0) results in fitting
+            regardless of the number of values present.
+        center : bool, default True
+            Wheather or not to center the window the mean is calculated of around the reference value. If False,
+            the reference value is placed to the right of the window (classic rolling mean with lag.)
+
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+            Data values may have changed relatively to the data input.
+        flagger : saqc.flagger.BaseFlagger
+            The flagger object, holding flags and additional Informations related to `data`.
+            Flags values may have changed relatively to the flagger input.
+        """
+
+    data = data.copy()
+    to_fit = data[field]
+    flags = flagger.getFlags(field)
+    if to_fit.empty:
+        return data, flagger
+
+    # starting with the annoying case: finding the rolling interval centers of not-harmonized input time series:
+    if (to_fit.index.freqstr is None) and center:
+        if isinstance(winsz, int):
+            raise NotImplementedError(
+                "Integer based window size is not supported for not-harmonized"
+                'sample series when rolling with "center=True".'
+            )
+        # get interval centers
+        centers = np.floor((to_fit.rolling(pd.Timedelta(winsz) / 2, closed="both", min_periods=min_periods).count()))
+        centers = centers.drop(centers[centers.isna()].index)
+        centers = centers.astype(int)
+        roller = to_fit.rolling(pd.Timedelta(winsz), closed="both", min_periods=min_periods)
+        try:
+            means = getattr(roller, func.__name__)()
+        except AttributeError:
+            means = to_fit.rolling(pd.Timedelta(winsz), closed="both", min_periods=min_periods).apply(func)
+
+        def center_func(x, y=centers):
+            pos = x.index[int(len(x) - y[x.index[-1]])]
+            return y.index.get_loc(pos)
+
+        centers_iloc = centers.rolling(winsz, closed="both").apply(center_func, raw=False).astype(int)
+        temp = means.copy()
+        for k in centers_iloc.iteritems():
+            means.iloc[k[1]] = temp[k[0]]
+        # last values are false, due to structural reasons:
+        means[means.index[centers_iloc[-1]]: means.index[-1]] = np.nan
+
+    # everything is more easy if data[field] is harmonized:
+    else:
+        if isinstance(winsz, str):
+            winsz = int(np.floor(pd.Timedelta(winsz) / pd.Timedelta(to_fit.index.freqstr)))
+        if (winsz % 2 == 0) & center:
+            winsz = int(winsz - 1)
+
+        roller = to_fit.rolling(window=winsz, center=center, closed="both")
+        try:
+            means = getattr(roller, func.__name__)()
+        except AttributeError:
+            means = to_fit.rolling(window=winsz, center=center, closed="both").apply(func)
+
+    if _return_residues:
+        residues = means - to_fit
+
+    data[field] = residues
+    if eval_flags:
+        num_cats, codes = flags.factorize()
+        num_cats = pd.Series(num_cats, index=flags.index).rolling(winsz, center=True, min_periods=min_periods).max()
+        nan_samples = num_cats[num_cats.isna()]
+        num_cats.drop(nan_samples.index, inplace=True)
+        to_flag = pd.Series(codes[num_cats.astype(int)], index=num_cats.index)
+        to_flag = to_flag.align(nan_samples)[0]
+        to_flag[nan_samples.index] = flags[nan_samples.index]
+        flagger = flagger.setFlags(field, to_flag.values, **kwargs)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/scores.py b/saqc/funcs/scores.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d776ba69de8f3bf548bc9af44022b61f7ea9eb4
--- /dev/null
+++ b/saqc/funcs/scores.py
@@ -0,0 +1,145 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+import numpy as np
+import pandas as pd
+
+from saqc.lib import ts_operators as ts_ops
+from saqc.lib.tools import toSequence
+from saqc.core.register import register
+
+
+@register(masking='all')
+def assignKNNScore(data, field, flagger, fields, n_neighbors=10, trafo=lambda x: x, trafo_on_partition=True,
+                   scoring_func=np.sum, target_field='kNN_scores', partition_freq=np.inf, partition_min=2,
+                   kNN_algorithm='ball_tree', metric='minkowski', p=2, radius=None):
+    """
+    Score datapoints by an aggregation of the dictances to their k nearest neighbors.
+
+    The function is a wrapper around the NearestNeighbors method from pythons sklearn library (See reference [1]).
+
+    The steps taken to calculate the scores are as follows:
+
+    1. All the timeseries, named fields, are combined to one feature space by an *inner* join on their date time indexes.
+       thus, only samples, that share timestamps across all fields will be included in the feature space
+    2. Any datapoint/sample, where one ore more of the features is invalid (=np.nan) will get excluded.
+    3. For every data point, the distance to its `n_neighbors` nearest neighbors is calculated by applying the
+       metric `metric` at grade `p` onto the feature space. The defaults lead to the euclidian to be applied.
+       If `radius` is not None, it sets the upper bound of distance for a neighbor to be considered one of the
+       `n_neigbors` nearest neighbors. Furthermore, the `partition_freq` argument determines wich samples can be
+       included into a datapoints nearest neighbors list, by segmenting the data into chunks of specified temporal
+       extension and feeding that chunks to the kNN algorithm seperatly.
+    4. For every datapoint, the calculated nearest neighbors distances get aggregated to a score, by the function
+       passed to `scoring_func`. The default, ``sum`` obviously just sums up the distances.
+    5. The resulting timeseries of scores gets assigned to the field target_field.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The reference variable, the deviation from wich determines the flagging.
+    flagger : saqc.flagger
+        A flagger object, holding flags and additional informations related to `data`.fields
+    n_neighbors : int, default 10
+        The number of nearest neighbors to which the distance is comprised in every datapoints scoring calculation.
+    trafo : Callable[np.array, np.array], default lambda x: x
+        Transformation to apply on the variables before kNN scoring
+    trafo_on_partition : bool, default True
+        Weather or not to apply the transformation `trafo` onto the whole variable or onto each partition seperatly.
+    scoring_func : Callable[numpy.array, float], default np.sum
+        A function that assigns a score to every one dimensional array, containing the distances
+        to every datapoints `n_neighbors` nearest neighbors.
+    target_field : str, default 'kNN_scores'
+        Name of the field, where the resulting scores should be written to.
+    partition_freq : {np.inf, float, str}, default np.inf
+        Determines the segmentation of the data into partitions, the kNN algorithm is
+        applied onto individually.
+
+        * ``np.inf``: Apply Scoring on whole data set at once
+        * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
+        * Offset String : Apply scoring on successive partitions of temporal extension matching the passed offset
+          string
+
+    partition_min : int, default 2
+        The minimum number of periods that have to be present in a partition for the kNN scoring
+        to be applied. If the number of periods present is below `partition_min`, the score for the
+        datapoints in that partition will be np.nan.
+    kNN_algorithm : {'ball_tree', 'kd_tree', 'brute', 'auto'}, default 'ball_tree'
+        The search algorithm to find each datapoints k nearest neighbors.
+        The keyword just gets passed on to the underlying sklearn method.
+        See reference [1] for more information on the algorithm.
+    metric : str, default 'minkowski'
+        The metric the distances to any datapoints neighbors is computed with. The default of `metric`
+        together with the default of `p` result in the euclidian to be applied.
+        The keyword just gets passed on to the underlying sklearn method.
+        See reference [1] for more information on the algorithm.
+    p : int, default 2
+        The grade of the metrice specified by parameter `metric`.
+        The keyword just gets passed on to the underlying sklearn method.
+        See reference [1] for more information on the algorithm.
+    radius : {None, float}, default None
+        If the radius is not None, only the distance to neighbors that ly within the range specified by `radius`
+        are comprised in the scoring aggregation.
+        The scoring method passed must be capable of handling np.nan values - since, for every point missing
+        within `radius` range to make complete the list of the distances to the `n_neighbors` nearest neighbors,
+        one np.nan value gets appended to the list passed to the scoring method.
+        The keyword just gets passed on to the underlying sklearn method.
+        See reference [1] for more information on the algorithm.
+
+    References
+    ----------
+
+    [1] https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html
+
+    """
+    data = data.copy()
+    fields = toSequence(fields)
+    val_frame = data[fields]
+    score_index = val_frame.index_of("shared")
+    score_ser = pd.Series(np.nan, index=score_index, name=target_field)
+    val_frame = val_frame.loc[val_frame.index_of("shared")].to_df()
+    val_frame.dropna(inplace=True)
+    if not trafo_on_partition:
+        val_frame = val_frame.transform(trafo)
+
+    if val_frame.empty:
+        return data, flagger
+
+    # partitioning
+    if not partition_freq:
+        partition_freq = val_frame.shape[0]
+
+    if isinstance(partition_freq, str):
+        grouper = pd.Grouper(freq=partition_freq)
+    else:
+        grouper = pd.Series(data=np.arange(0, val_frame.shape[0]), index=val_frame.index)
+        grouper = grouper.transform(lambda x: int(np.floor(x / partition_freq)))
+
+    partitions = val_frame.groupby(grouper)
+
+    for _, partition in partitions:
+        if partition.empty or (partition.shape[0] < partition_min):
+            continue
+        if trafo_on_partition:
+            partition = partition.transform(trafo)
+            partition.dropna(inplace=True)
+
+        sample_size = partition.shape[0]
+        nn_neighbors = min(n_neighbors - 1, max(sample_size, 2))
+        dist, *_ = ts_ops.kNN(partition.values, nn_neighbors, algorithm=kNN_algorithm, metric=metric, p=p,
+                              radius=radius)
+        try:
+            resids = getattr(dist, scoring_func.__name__)(axis=1)
+        except AttributeError:
+            resids = np.apply_along_axis(scoring_func, 1, dist)
+
+        score_ser[partition.index] = resids
+
+    score_flagger = flagger.initFlags(score_ser)
+
+    if target_field in flagger._flags.columns:
+        flagger = flagger.slice(drop=target_field)
+
+    flagger = flagger.merge(score_flagger)
+    data[target_field] = score_ser
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/soil_moisture_tests.py b/saqc/funcs/soil_moisture_tests.py
deleted file mode 100644
index ecbe911a6a76f47e4ab6bf41b30dd95718d4c45f..0000000000000000000000000000000000000000
--- a/saqc/funcs/soil_moisture_tests.py
+++ /dev/null
@@ -1,620 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import pandas as pd
-import joblib
-import dios
-from scipy.signal import savgol_filter
-
-from saqc.funcs.breaks_detection import breaks_flagSpektrumBased
-from saqc.funcs.spikes_detection import spikes_flagSpektrumBased
-from saqc.funcs.constants_detection import constants_flagVarianceBased
-from saqc.core.register import register
-from saqc.lib.tools import retrieveTrustworthyOriginal
-
-
-@register(masking='field')
-def sm_flagSpikes(
-    data,
-    field,
-    flagger,
-    raise_factor=0.15,
-    deriv_factor=0.2,
-    noise_func="CoVar",
-    noise_window="12h",
-    noise_thresh=1,
-    smooth_window="3h",
-    smooth_poly_deg=2,
-    **kwargs,
-):
-
-    """
-    The Function provides just a call to ``flagSpikes_spektrumBased``, with parameter defaults,
-    that refer to References [1].
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    raise_factor : float, default 0.15
-        Minimum relative value difference between two values to consider the latter as a spike candidate.
-        See condition (1) (or reference [2]).
-    deriv_factor : float, default 0.2
-        See condition (2) (or reference [2]).
-    noise_func : {'CoVar', 'rVar'}, default 'CoVar'
-        Function to calculate noisiness of the data surrounding potential spikes.
-        ``'CoVar'``: Coefficient of Variation
-        ``'rVar'``: Relative Variance
-    noise_window : str, default '12h'
-        An offset string that determines the range of the time window of the "surrounding" data of a potential spike.
-        See condition (3) (or reference [2]).
-    noise_thresh : float, default 1
-        Upper threshold for noisiness of data surrounding potential spikes. See condition (3) (or reference [2]).
-    smooth_window : {None, str}, default None
-        Size of the smoothing window of the Savitsky-Golay filter.
-        The default value ``None`` results in a window of two times the sampling rate (i.e. containing three values).
-    smooth_poly_deg : int, default 2
-        Degree of the polynomial used for fitting with the Savitsky-Golay filter.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional Informations related to `data`.
-        Flags values may have changed relatively to the flagger input.
-
-    References
-    ----------
-    This Function is a generalization of the Spectrum based Spike flagging mechanism as presented in:
-
-    [1] Dorigo, W. et al: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-
-    [2] https://git.ufz.de/rdm-software/saqc/-/blob/testfuncDocs/docs/funcs/FormalDescriptions.md#spikes_flagspektrumbased
-
-    """
-
-    return spikes_flagSpektrumBased(
-        data,
-        field,
-        flagger,
-        raise_factor=raise_factor,
-        deriv_factor=deriv_factor,
-        noise_func=noise_func,
-        noise_window=noise_window,
-        noise_thresh=noise_thresh,
-        smooth_window=smooth_window,
-        smooth_poly_deg=smooth_poly_deg,
-        **kwargs,
-    )
-
-
-@register(masking='field')
-def sm_flagBreaks(
-    data,
-    field,
-    flagger,
-    thresh_rel=0.1,
-    thresh_abs=0.01,
-    first_der_factor=10,
-    first_der_window="12h",
-    scnd_der_ratio_range=0.05,
-    scnd_der_ratio_thresh=10,
-    smooth=False,
-    smooth_window="3h",
-    smooth_poly_deg=2,
-    **kwargs,
-):
-
-    """
-    The Function provides just a call to flagBreaks_spektrumBased, with parameter defaults that refer to references [1].
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    thresh_rel : float, default 0.1
-        Float in [0,1]. See (1) of function description above to learn more
-    thresh_abs : float, default 0.01
-        Float > 0. See (2) of function descritpion above to learn more.
-    first_der_factor : float, default 10
-        Float > 0. See (3) of function descritpion above to learn more.
-    first_der_window_range : str, default '12h'
-        Offset string. See (3) of function description to learn more.
-    scnd_der_ratio_margin_1 : float, default 0.05
-        Float in [0,1]. See (4) of function descritpion above to learn more.
-    scnd_der_ratio_margin_2 : float, default 10
-        Float in [0,1]. See (5) of function descritpion above to learn more.
-    smooth : bool, default True
-        Method for obtaining dataseries' derivatives.
-        * False: Just take series step differences (default)
-        * True: Smooth data with a Savitzky Golay Filter before differentiating.
-    smooth_window : {None, str}, default 2
-        Effective only if `smooth` = True
-        Offset string. Size of the filter window, used to calculate the derivatives.
-    smooth_poly_deg : int, default 2
-        Effective only, if `smooth` = True
-        Polynomial order, used for smoothing with savitzk golay filter.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    [1] Dorigo,W. et al.: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-
-    Find a brief mathematical description of the function here:
-
-    [2] https://git.ufz.de/rdm-software/saqc/-/blob/testfuncDocs/docs/funcs
-        /FormalDescriptions.md#breaks_flagspektrumbased
-
-    """
-    return breaks_flagSpektrumBased(
-        data,
-        field,
-        flagger,
-        thresh_rel=thresh_rel,
-        thresh_abs=thresh_abs,
-        first_der_factor=first_der_factor,
-        first_der_window=first_der_window,
-        scnd_der_ratio_range=scnd_der_ratio_range,
-        scnd_der_ratio_thresh=scnd_der_ratio_thresh,
-        smooth=smooth,
-        smooth_window=smooth_window,
-        smooth_poly_deg=smooth_poly_deg,
-        **kwargs,
-    )
-
-
-@register(masking='all')
-def sm_flagFrost(data, field, flagger, soil_temp_variable, window="1h", frost_thresh=0, **kwargs):
-
-    """
-    This Function is an implementation of the soil temperature based Soil Moisture flagging, as presented in
-    references [1]:
-
-    All parameters default to the values, suggested in this publication.
-
-    Function flags Soil moisture measurements by evaluating the soil-frost-level in the moment of measurement.
-    Soil temperatures below "frost_level" are regarded as denoting frozen soil state.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    soil_temp_variable : str,
-        An offset string, denoting the fields name in data, that holds the data series of soil temperature values,
-        the to-be-flagged values shall be checked against.
-    window : str
-        An offset string denoting the maximal temporal deviation, the soil frost states timestamp is allowed to have,
-        relative to the data point to-be-flagged.
-    frost_thresh : float
-        Value level, the flagger shall check against, when evaluating soil frost level.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    [1] Dorigo,W. et al.: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-    """
-
-    # retrieve reference series
-    refseries = data[soil_temp_variable].copy()
-    ref_use = flagger.isFlagged(soil_temp_variable, flag=flagger.GOOD, comparator="==") | flagger.isFlagged(
-        soil_temp_variable, flag=flagger.UNFLAGGED, comparator="=="
-    )
-    # drop flagged values:
-    refseries = refseries[ref_use.values]
-    # drop nan values from reference series, since those are values you dont want to refer to.
-    refseries = refseries.dropna()
-    # skip further processing if reference series is empty:
-    if refseries.empty:
-        return data, flagger
-
-    refseries = refseries.reindex(data[field].dropna().index, method="nearest", tolerance=window)
-    refseries = refseries[refseries < frost_thresh].index
-
-    flagger = flagger.setFlags(field, refseries, **kwargs)
-    return data, flagger
-
-
-@register(masking='all')
-def sm_flagPrecipitation(
-    data,
-    field,
-    flagger,
-    prec_variable,
-    raise_window=None,
-    sensor_depth=0,
-    sensor_accuracy=0,
-    soil_porosity=0,
-    std_factor=2,
-    std_window="24h",
-    ignore_missing=False,
-    **kwargs,
-):
-
-    """
-    This Function is an implementation of the precipitation based Soil Moisture flagging, as presented in
-    references [1].
-
-    All parameters default to the values, suggested in this publication. (excluding porosity,sensor accuracy and
-    sensor depth)
-
-
-    Function flags Soil moisture measurements by flagging moisture rises that do not follow up a sufficient
-    precipitation event. If measurement depth, sensor accuracy of the soil moisture sensor and the porosity of the
-    surrounding soil is passed to the function, an inferior level of precipitation, that has to preceed a significant
-    moisture raise within 24 hours, can be estimated. If those values are not delivered, this inferior bound is set
-    to zero. In that case, any non zero precipitation count will justify any soil moisture raise.
-
-    A data point y_t is flagged an invalid soil moisture raise, if:
-
-    (1) y_t > y_(t-`raise_window`)
-    (2) y_t - y_(t-`std_factor_range`) > `std_factor` * std(y_(t-`std_factor_range`),...,y_t)
-    (3) sum(prec(t-24h),...,prec(t)) > `sensor_depth` * `sensor_accuracy` * `soil_porosity`
-
-    NOTE1: np.nan entries in the input precipitation series will be regarded as susipicious and the test will be
-    omited for every 24h interval including a np.nan entrie in the original precipitation sampling rate.
-    Only entry "0" will be regarded as denoting "No Rainfall".
-
-    NOTE2: The function wont test any values that are flagged suspicious anyway - this may change in a future version.
-
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional informations related to `data`.
-    prec_variable : str
-        Fieldname of the precipitation meassurements column in data.
-    raise_window: {None, str}, default None
-        Denotes the distance to the datapoint, relatively to witch
-        it is decided if the current datapoint is a raise or not. Equation [1].
-        It defaults to None. When None is passed, raise_window is just the sample
-        rate of the data. Any raise reference must be a multiple of the (intended)
-        sample rate and below std_factor_range.
-    sensor_depth : float, default 0
-        Measurement depth of the soil moisture sensor, [m].
-    sensor_accuracy : float, default 0
-        Accuracy of the soil moisture sensor, [-].
-    soil_porosity : float, default 0
-        Porosity of moisture sensors surrounding soil, [-].
-    std_factor : int, default 2
-        The value determines by which rule it is decided, weather a raise in soil
-        moisture is significant enough to trigger the flag test or not:
-        Significance is assumed, if the raise is  greater then "std_factor" multiplied
-        with the last 24 hours standart deviation.
-    std_window: str, default '24h'
-        An offset string that denotes the range over witch the standart deviation is obtained,
-        to test condition [2]. (Should be a multiple of the sampling rate)
-    raise_window: str
-        Denotes the distance to the datapoint, relatively to witch
-        it is decided if the current datapoint is a raise or not. Equation [1].
-        It defaults to None. When None is passed, raise_window is just the sample
-        rate of the data. Any raise reference must be a multiple of the (intended)
-        sample rate and below std_factor_range.
-    ignore_missing: bool, default False
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    [1] Dorigo,W. et al.: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-    """
-
-    dataseries, moist_rate = retrieveTrustworthyOriginal(data, field, flagger)
-
-    # data not hamronized:
-    refseries = data[prec_variable].dropna()
-    # abort processing if any of the measurement series has no valid entries!
-    if moist_rate is np.nan:
-        return data, flagger
-    if refseries.empty:
-        return data, flagger
-
-    refseries = refseries.reindex(refseries.index.join(dataseries.index, how="outer"))
-    # get 24 h prec. monitor
-    prec_count = refseries.rolling(window="1D").sum()
-    # exclude data not signifying a raise::
-    if raise_window is None:
-        raise_window = 1
-    else:
-        raise_window = int(np.ceil(pd.Timedelta(raise_window) / moist_rate))
-
-    # first raise condition:
-    raise_mask = dataseries > dataseries.shift(raise_window)
-
-    # second raise condition:
-    std_window = int(np.ceil(pd.Timedelta(std_window) / moist_rate))
-    if ignore_missing:
-        std_mask = dataseries.dropna().rolling(std_window).std() < (
-            (dataseries - dataseries.shift(std_window)) / std_factor
-        )
-    else:
-        std_mask = dataseries.rolling(std_window).std() < ((dataseries - dataseries.shift(std_window)) / std_factor)
-
-    dataseries = dataseries[raise_mask & std_mask]
-    invalid_indices = prec_count[dataseries.index] <= sensor_depth * sensor_accuracy * soil_porosity
-
-    flagger = flagger.setFlags(field, loc=invalid_indices, **kwargs)
-    return data, flagger
-
-
-@register(masking='field')
-def sm_flagConstants(
-    data,
-    field,
-    flagger,
-    window="12h",
-    thresh=0.0005,
-    precipitation_window="12h",
-    tolerance=0.95,
-    deriv_max=0.0025,
-    deriv_min=0,
-    max_missing=None,
-    max_consec_missing=None,
-    smooth_window=None,
-    smooth_poly_deg=2,
-    **kwargs,
-):
-
-    """
-    This function flags plateaus/series of constant values in soil moisture data.
-
-    Mentionings of "conditions" in the following explanations refer to references [2].
-
-    The function represents a stricter version of
-    constants_flagVarianceBased.
-
-    The additional constraints (3)-(5), are designed to match the special cases of constant
-    values in soil moisture measurements and basically for preceding precipitation events
-    (conditions (3) and (4)) and certain plateau level (condition (5)).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    window : str, default '12h'
-        Minimum duration during which values need to identical to become plateau candidates. See condition (1)
-    thresh : float, default 0.0005
-        Maximum variance of a group of values to still consider them constant. See condition (2)
-    precipitation_window : str, default '12h'
-        See condition (3) and (4)
-    tolerance : float, default 0.95
-        Tolerance factor, see condition (5)
-    deriv_max : float, default 0
-        See condition (4)
-    deriv_min : float, default 0.0025
-        See condition (3)
-    max_missing : {None, int}, default None
-        Maximum number of missing values allowed in window, by default this condition is ignored
-    max_consec_missing : {None, int}, default None
-        Maximum number of consecutive missing values allowed in window, by default this condition is ignored
-    smooth_window : {None, str}, default None
-        Size of the smoothing window of the Savitsky-Golay filter. The default value None results in a window of two
-        times the sampling rate (i.e. three values)
-    smooth_poly_deg : int, default 2
-        Degree of the polynomial used for smoothing with the Savitsky-Golay filter
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-
-    References
-    ----------
-    [1] Dorigo,W. et al.: Global Automated Quality Control of In Situ Soil Moisture
-        Data from the international Soil Moisture Network. 2013. Vadoze Zone J.
-        doi:10.2136/vzj2012.0097.
-
-    [2] https://git.ufz.de/rdm-software/saqc/-/edit/testfuncDocs/docs/funcs/FormalDescriptions.md#sm_flagconstants
-    """
-
-    # get plateaus:
-    _, comp_flagger = constants_flagVarianceBased(
-        data,
-        field,
-        flagger,
-        window=window,
-        thresh=thresh,
-        max_missing=max_missing,
-        max_consec_missing=max_consec_missing,
-    )
-
-    new_plateaus = (comp_flagger.getFlags(field)).eq(flagger.getFlags(field))
-    # get dataseries at its sampling freq:
-    dataseries, moist_rate = retrieveTrustworthyOriginal(data, field, flagger)
-    # get valuse referring to dataseries:
-    new_plateaus.resample(pd.Timedelta(moist_rate)).asfreq()
-    # cut out test_slices for min/max derivatives condition check:
-    # offset 2 periods:
-    precipitation_window = int(np.ceil(pd.Timedelta(precipitation_window) / moist_rate))
-    window = int(np.ceil(pd.Timedelta(window) / moist_rate))
-    period_diff = precipitation_window - window
-    # we cast plateua series to int - because replace has problems with replacing bools by "method".
-    new_plateaus = new_plateaus.astype(int)
-    # get plateau groups:
-    group_counter = new_plateaus.cumsum()
-    group_counter = group_counter[group_counter.diff() == 0]
-    group_counter.name = "group_counter"
-    plateau_groups = pd.merge(group_counter, dataseries, left_index=True, right_index=True, how="inner")
-    # test mean-condition on plateau groups:
-    test_barrier = tolerance * dataseries.max()
-    plateau_group_drops = plateau_groups.groupby("group_counter").filter(lambda x: x[field].mean() <= test_barrier)
-    # discard values that didnt pass the test from plateau candidate series:
-    new_plateaus[plateau_group_drops.index] = 1
-
-    # we extend the plateaus to cover condition testing sets
-    # 1: extend backwards (with a technical "one" added):
-    cond1_sets = new_plateaus.replace(1, method="bfill", limit=(precipitation_window + window))
-    # 2. extend forwards:
-    if period_diff > 0:
-        cond1_sets = cond1_sets.replace(1, method="ffill", limit=period_diff)
-
-    # get first derivative
-    if smooth_window is None:
-        smooth_window = 3 * pd.Timedelta(moist_rate)
-    else:
-        smooth_window = pd.Timedelta(smooth_window)
-    filter_window_seconds = smooth_window.seconds
-    smoothing_periods = int(np.ceil((filter_window_seconds / moist_rate.n)))
-    first_derivate = savgol_filter(dataseries, window_length=smoothing_periods, polyorder=smooth_poly_deg, deriv=1,)
-    first_derivate = pd.Series(data=first_derivate, index=dataseries.index, name=dataseries.name)
-    # cumsumming to seperate continous plateau groups from each other:
-    group_counter = cond1_sets.cumsum()
-    group_counter = group_counter[group_counter.diff() == 0]
-    group_counter.name = "group_counter"
-    group_frame = pd.merge(group_counter, first_derivate, left_index=True, right_index=True, how="inner")
-    group_frame = group_frame.groupby("group_counter")
-    condition_passed = group_frame.filter(lambda x: (x[field].max() >= deriv_max) & (x[field].min() <= deriv_min))
-
-    flagger = flagger.setFlags(field, loc=condition_passed.index, **kwargs)
-
-    return data, flagger
-
-
-@register(masking='all')
-def sm_flagRandomForest(data, field, flagger, references, window_values: int, window_flags: int, path: str, **kwargs):
-    """
-    This Function uses pre-trained machine-learning model objects for flagging of a specific variable. The model is
-    supposed to be trained using the script provided in "ressources/machine_learning/train_machine_learning.py". For
-    flagging, Inputs to the model are the timeseries of the respective target at one specific sensors, the automatic
-    flags that were assigned by SaQC as well as multiple reference series. Internally, context information for each
-    point is gathered in form of moving windows to improve the flagging algorithm according to user input during
-    model training. For the model to work, the parameters 'references', 'window_values' and 'window_flags' have to be
-    set to the same values as during training.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flagger : saqc.flagger.BaseFlagger
-        A flagger object, holding flags and additional Informations related to `data`.
-    references : {str, List[str]}
-        List or list of strings, denoting the fieldnames of the data series that should be used as reference variables
-    window_values : int
-        An integer, denoting the window size that is used to derive the gradients of both the field- and
-        reference-series inside the moving window
-    window_flags : int
-        An integer, denoting the window size that is used to count the surrounding automatic flags that have been set
-        before
-    path : str
-        A string giving the path to the respective model object, i.e. its name and
-        the respective value of the grouping variable. e.g. "models/model_0.2.pkl"
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flagger : saqc.flagger.BaseFlagger
-        The flagger object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flagger input.
-    """
-
-    def _refCalc(reference, window_values):
-        """ Helper function for calculation of moving window values """
-        outdata = dios.DictOfSeries()
-        name = reference.name
-        # derive gradients from reference series
-        outdata[name + "_Dt_1"] = reference - reference.shift(1)  # gradient t vs. t-1
-        outdata[name + "_Dt1"] = reference - reference.shift(-1)  # gradient t vs. t+1
-        # moving mean of gradients var1 and var2 before/after
-        outdata[name + "_Dt_" + str(window_values)] = (
-            outdata[name + "_Dt_1"].rolling(window_values, center=False).mean()
-        )  # mean gradient t to t-window
-        outdata[name + "_Dt" + str(window_values)] = (
-            outdata[name + "_Dt_1"].iloc[::-1].rolling(window_values, center=False).mean()[::-1]
-        )  # mean gradient t to t+window
-        return outdata
-
-    # Function for moving window calculations
-    # Create custom df for easier processing
-    df = data.loc[:, [field] + references]
-    # Create binary column of BAD-Flags
-    df["flag_bin"] = flagger.isFlagged(field, flag=flagger.BAD, comparator="==").astype("int")
-
-    # Add context information of flags
-    # Flag at t +/-1
-    df["flag_bin_t_1"] = df["flag_bin"] - df["flag_bin"].shift(1)
-    df["flag_bin_t1"] = df["flag_bin"] - df["flag_bin"].shift(-1)
-    # n Flags in interval t to t-window_flags
-    df[f"flag_bin_t_{window_flags}"] = df["flag_bin"].rolling(window_flags + 1, center=False).sum()
-    # n Flags in interval t to t+window_flags
-    # forward-orientation not possible, so right-orientation on reversed data an reverse result
-    df[f"flag_bin_t{window_flags}"] = df["flag_bin"].iloc[::-1].rolling(window_flags + 1, center=False).sum()[::-1]
-
-    # TODO: dios.merge() / dios.join() ...
-    # replace the following version with its DictOfSeries -> DataFrame
-    # conversions as soon as merging/joining is available in dios
-
-    # Add context information for field+references
-    df = df.to_df()  # df is a dios
-    for i in [field] + references:
-        ref = _refCalc(reference=df[i], window_values=window_values).to_df()
-        df = pd.concat([df, ref], axis=1)
-    # all further actions work on pd.DataFrame. thats ok,
-    # because only the df.index is used to set the actual
-    # flags in the underlining dios.
-
-    # remove NAN-rows from predictor calculation
-    df = df.dropna(axis=0, how="any")
-    # drop column of automatic flags at time t
-    df = df.drop(columns="flag_bin")
-    # Load model and predict on df:
-    model = joblib.load(path)
-    preds = model.predict(df)
-
-    flag_indices = df[preds.astype("bool")].index
-    flagger = flagger.setFlags(field, loc=flag_indices, **kwargs)
-    return data, flagger
diff --git a/saqc/funcs/tools.py b/saqc/funcs/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..36472165a2c19d3c948599f80c518e49a448e58d
--- /dev/null
+++ b/saqc/funcs/tools.py
@@ -0,0 +1,231 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import dios
+import numpy as np
+
+from saqc.core.register import register
+
+from saqc.lib.tools import periodicMask
+
+
+@register(masking='none')
+def copy(data, field, flagger, newfield, **kwargs):
+    """
+    The function generates a copy of the data "field" and inserts it under the name field + suffix into the existing
+    data.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to fork (copy).
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    suffix: str
+        Substring to append to the forked data variables name.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        data shape may have changed relatively to the flagger input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags shape may have changed relatively to the flagger input.
+    """
+
+    if newfield in flagger.flags.columns.union(data.columns):
+        raise ValueError(f"{field}: field already exist")
+
+    flags, extras = flagger.getFlags(field, full=True)
+    newflagger = flagger.replaceField(newfield, flags=flags, **extras)
+    newdata = data.copy()
+    newdata[newfield] = data[field].copy()
+    return newdata, newflagger
+
+
+@register(masking='none')
+def drop(data, field, flagger, **kwargs):
+    """
+    The function drops field from the data dios and the flagger.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to drop.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        data shape may have changed relatively to the flagger input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags shape may have changed relatively to the flagger input.
+    """
+
+    data = data.copy()
+    del data[field]
+    flagger = flagger.replaceField(field, flags=None)
+    return data, flagger
+
+
+@register(masking='none')
+def rename(data, field, flagger, new_name, **kwargs):
+    """
+    The function renames field to new name (in both, the flagger and the data).
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the data column, you want to rename.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    new_name : str
+        String, field is to be replaced with.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+    """
+    # store
+    s = data[field]
+    f, e = flagger.getFlags(field, full=True)
+
+    # delete
+    data = data.copy()
+    del data[field]
+    flagger = flagger.replaceField(field, flags=None)
+
+    # insert
+    data[new_name] = s
+    flagger = flagger.replaceField(new_name, inplace=True, flags=f, **e)
+
+    return data, flagger
+
+
+def mask(data, field, flagger, mode, mask_var=None, period_start=None, period_end=None,
+         include_bounds=True):
+    """
+    This function realizes masking within saqc.
+
+    Due to some inner saqc mechanics, it is not straight forwardly possible to exclude
+    values or datachunks from flagging routines. This function replaces flags with np.nan
+    value, wherever values are to get masked. Furthermore, the masked values get replaced by
+    np.nan, so that they dont effect calculations.
+
+    Here comes a recipe on how to apply a flagging function only on a masked chunk of the variable field:
+
+    1. dublicate "field" in the input data (proc_copy)
+    2. mask the dublicated data (modelling_mask)
+    3. apply the tests you only want to be applied onto the masked data chunks (saqc_tests)
+    4. project the flags, calculated on the dublicated and masked data onto the original field data
+        (proc_projectFlags or flagGeneric)
+    5. drop the dublicated data (proc_drop)
+
+    To see an implemented example, checkout flagSeasonalRange in the saqc.functions module
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-masked.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    mode : {"periodic", "mask_var"}
+        The masking mode.
+        - "periodic": parameters "period_start", "period_end" are evaluated to generate a periodical mask
+        - "mask_var": data[mask_var] is expected to be a boolean valued timeseries and is used as mask.
+    mask_var : {None, str}, default None
+        Only effective if mode == "mask_var"
+        Fieldname of the column, holding the data that is to be used as mask. (must be moolean series)
+        Neither the series` length nor its labels have to match data[field]`s index and length. An inner join of the
+        indices will be calculated and values get masked where the values of the inner join are "True".
+    period_start : {None, str}, default None
+        Only effective if mode == "seasonal"
+        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
+        Has to be of same length as `period_end` parameter.
+        See examples section below for some examples.
+    period_end : {None, str}, default None
+        Only effective if mode == "periodic"
+        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
+        Has to be of same length as `period_end` parameter.
+        See examples section below for some examples.
+    include_bounds : boolean
+        Wheather or not to include the mask defining bounds to the mask.
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+        Flags values may have changed relatively to the flagger input.
+
+
+    Examples
+    --------
+    The `period_start` and `period_end` parameters provide a conveniant way to generate seasonal / date-periodic masks.
+    They have to be strings of the forms: "mm-ddTHH:MM:SS", "ddTHH:MM:SS" , "HH:MM:SS", "MM:SS" or "SS"
+    (mm=month, dd=day, HH=hour, MM=minute, SS=second)
+    Single digit specifications have to be given with leading zeros.
+    `period_start` and `seas   on_end` strings have to be of same length (refer to the same periodicity)
+    The highest date unit gives the period.
+    For example:
+
+    >>> period_start = "01T15:00:00"
+    >>> period_end = "13T17:30:00"
+
+    Will result in all values sampled between 15:00 at the first and  17:30 at the 13th of every month get masked
+
+    >>> period_start = "01:00"
+    >>> period_end = "04:00"
+
+    All the values between the first and 4th minute of every hour get masked.
+
+    >>> period_start = "01-01T00:00:00"
+    >>> period_end = "01-03T00:00:00"
+
+    Mask january and february of evcomprosed in theery year. masking is inclusive always, so in this case the mask will
+    include 00:00:00 at the first of march. To exclude this one, pass:
+
+    >>> period_start = "01-01T00:00:00"
+    >>> period_end = "02-28T23:59:59"
+
+    To mask intervals that lap over a seasons frame, like nights, or winter, exchange sequence of season start and
+    season end. For example, to mask night hours between 22:00:00 in the evening and 06:00:00 in the morning, pass:
+
+    >>> period_start = "22:00:00"
+    >>> period_end = "06:00:00"
+
+    When inclusive_selection="season", all above examples work the same way, only that you now
+    determine wich values NOT TO mask (=wich values are to constitute the "seasons").
+    """
+    data = data.copy()
+    datcol_idx = data[field].index
+
+    if mode == 'periodic':
+        to_mask = periodicMask(datcol_idx, period_start, period_end, include_bounds)
+    elif mode == 'mask_var':
+        idx = data[mask_var].index.intersection(datcol_idx)
+        to_mask = data.loc[idx, mask_var]
+    else:
+        raise ValueError("Keyword passed as masking mode is unknown ({})!".format(mode))
+
+    data.aloc[to_mask, field] = np.nan
+    flagger = flagger.setFlags(field, loc=to_mask, flag=np.nan, force=True)
+
+    return data, flagger
\ No newline at end of file
diff --git a/saqc/funcs/transformation.py b/saqc/funcs/transformation.py
new file mode 100644
index 0000000000000000000000000000000000000000..411d9ff41384ab67544f7c31ebac771d173ffd52
--- /dev/null
+++ b/saqc/funcs/transformation.py
@@ -0,0 +1,67 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import pandas as pd
+
+from saqc.core.register import register
+import numpy as np
+import dios
+
+
+@register(masking='field')
+def transform(data, field, flagger, func, partition_freq=None, **kwargs):
+    """
+    Function to transform data columns with a transformation that maps series onto series of the same length.
+
+    Note, that flags get preserved.
+
+    Parameters
+    ----------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+    field : str
+        The fieldname of the column, holding the data-to-be-transformed.
+    flagger : saqc.flagger.BaseFlagger
+        A flagger object, holding flags and additional Informations related to `data`.
+    func : Callable[{pd.Series, np.array}, np.array]
+        Function to transform data[field] with.
+    partition_freq : {np.inf, float, str}, default np.inf
+        Determines the segmentation of the data into partitions, the transformation is applied on individually
+
+        * ``np.inf``: Apply transformation on whole data set at once
+        * ``x`` > 0 : Apply transformation on successive data chunks of periods length ``x``
+        * Offset String : Apply transformation on successive partitions of temporal extension matching the passed offset
+          string
+
+    Returns
+    -------
+    data : dios.DictOfSeries
+        A dictionary of pandas.Series, holding all the data.
+        Data values may have changed relatively to the data input.
+    flagger : saqc.flagger.BaseFlagger
+        The flagger object, holding flags and additional Informations related to `data`.
+    """
+
+    data = data.copy()
+    val_ser = data[field]
+    # partitioning
+    if not partition_freq:
+        partition_freq = val_ser.shape[0]
+
+    if isinstance(partition_freq, str):
+        grouper = pd.Grouper(freq=partition_freq)
+    else:
+        grouper = pd.Series(data=np.arange(0, val_ser.shape[0]), index=val_ser.index)
+        grouper = grouper.transform(lambda x: int(np.floor(x / partition_freq)))
+
+    partitions = val_ser.groupby(grouper)
+
+    for _, partition in partitions:
+        if partition.empty:
+            continue
+        val_ser[partition.index] = func(partition)
+
+    data[field] = val_ser
+    return data, flagger
+
+
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index 3cbe5ab766a7bdb0f58324307d36a9981d7f98a6..2ee355c7b0bccefe39c730f1235f591fb7e6880d 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -176,7 +176,7 @@ def offset2seconds(offset):
     return pd.Timedelta.total_seconds(pd.Timedelta(offset))
 
 
-def seasonalMask(dtindex, season_start, season_end, include_bounds):
+def periodicMask(dtindex, season_start, season_end, include_bounds):
     """
     This function generates date-periodic/seasonal masks from an index passed.
 
diff --git a/saqc/lib/ts_operators.py b/saqc/lib/ts_operators.py
index 30ce15899d4d5c21999f1e686f2ec2bf83598bbb..e7f0402c8b6ff5d98a8f09ee1006b9a3008bf442 100644
--- a/saqc/lib/ts_operators.py
+++ b/saqc/lib/ts_operators.py
@@ -16,7 +16,6 @@ from sklearn.neighbors import NearestNeighbors
 from scipy.stats import iqr
 import numpy.polynomial.polynomial as poly
 
-
 logger = logging.getLogger("SaQC")
 
 
@@ -108,11 +107,22 @@ def standardizeByMedian(ts):
     return (ts - np.median(ts)) / iqr(ts, nan_policy="omit")
 
 
-def kNN(in_arr, n_neighbors, algorithm="ball_tree"):
+def kNN(in_arr, n_neighbors, algorithm="ball_tree", metric='minkowski', p=2, radius=None):
     # k-nearest-neighbor search
-    nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm).fit(in_arr.reshape(in_arr.shape[0], -1))
-    return nbrs.kneighbors()
 
+    nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric, p=p)\
+        .fit(in_arr.reshape(in_arr.shape[0], -1))
+    if radius is None:
+        return nbrs.kneighbors()
+
+    rad_nbrs = nbrs.radius_neighbors(radius=radius)
+    dist = np.zeros((in_arr.shape[0], n_neighbors))
+    dist[:] = np.nan
+    i = 0
+    for k in rad_nbrs[0]:
+        dist[i, 0:len(k)] = k
+        i += 1
+    return dist, np.array([])
 
 def kNNMaxGap(in_arr, n_neighbors=10, algorithm="ball_tree"):
     # searches for the "n_neighbors" nearest neighbors of every value in "in_arr"
diff --git a/test/core/test_core.py b/test/core/test_core.py
index 55a75a227bf880d30084ad7a0e6956d1611d9d93..a50952545e08ce121e24c6e3044e4e7733b17a61 100644
--- a/test/core/test_core.py
+++ b/test/core/test_core.py
@@ -7,6 +7,7 @@ import pytest
 import numpy as np
 import pandas as pd
 
+
 from saqc import SaQC, register
 from saqc.funcs import flagRange
 from saqc.lib import plotting as splot
@@ -57,7 +58,7 @@ def test_duplicatedVariable(flagger):
     data = initData(1)
     var1 = data.columns[0]
 
-    pdata, pflags = SaQC(flagger, data).flagDummy(var1).flagDummy(var1).getResult()
+    pdata, pflags = SaQC(flagger, data).flagDummy(var1).getResult()
 
     if isinstance(pflags.columns, pd.MultiIndex):
         cols = pflags.columns.get_level_values(0).drop_duplicates()
diff --git a/test/core/test_masking.py b/test/core/test_masking.py
index b41eebbcb2bc6ba6b3d9a1594fed5282e23ac5fc..ce2a50a698a65313383fbae7233aa29fa131f527 100644
--- a/test/core/test_masking.py
+++ b/test/core/test_masking.py
@@ -34,7 +34,7 @@ def test_masking(data, flagger):
     # if masking works, `data > max` will be masked,
     # so the following will deliver True for in range (data < max),
     # otherwise False, like an inverse range-test
-    qc = qc.procGeneric("dummy", func=lambda var1: var1 >= mn)
+    qc = qc.process("dummy", func=lambda var1: var1 >= mn)
 
     pdata, pflagger = qc.getResult(raw=True)
     out_of_range = pflagger.isFlagged(var1)
diff --git a/test/funcs/test_breaks_detection.py b/test/funcs/test_breaks_detection.py
deleted file mode 100644
index f07e949b2278a3d273b101fdbff9e2e6ba5b0a96..0000000000000000000000000000000000000000
--- a/test/funcs/test_breaks_detection.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import pytest
-
-from saqc.funcs.breaks_detection import breaks_flagSpektrumBased
-from test.common import TESTFLAGGER, initData
-
-
-@pytest.fixture
-def data():
-    return initData(cols=1, start_date="2011-01-01 00:00:00", end_date="2011-01-02 03:00:00", freq="5min")
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_breaks_flagSpektrumBased(data, flagger):
-    field, *_ = data.columns
-    data.iloc[5:15] += 100
-    break_positions = [5, 15]
-    flagger = flagger.initFlags(data)
-    data, flagger_result = breaks_flagSpektrumBased(data, field, flagger)
-    flag_result = flagger_result.getFlags(field)
-    test_sum = (flag_result[break_positions] == flagger.BAD).sum()
-    assert test_sum == len(break_positions)
diff --git a/test/funcs/test_constants_detection.py b/test/funcs/test_constants_detection.py
index 52e2f6d9e50fab7d0ecda01adea82d60ff3614ea..75dab02aeb1c1994b689eacf6a464f2b42212720 100644
--- a/test/funcs/test_constants_detection.py
+++ b/test/funcs/test_constants_detection.py
@@ -4,7 +4,7 @@
 import pytest
 import numpy as np
 
-from saqc.funcs.constants_detection import constants_flagBasic, constants_flagVarianceBased
+from saqc.funcs.constants import flagConstants, flagByVariance
 
 from test.common import TESTFLAGGER, initData
 
@@ -21,7 +21,7 @@ def test_constants_flagBasic(data, flagger):
     expected = np.arange(5, 22)
     field, *_ = data.columns
     flagger = flagger.initFlags(data)
-    data, flagger_result = constants_flagBasic(data, field, flagger, window="15Min", thresh=0.1,)
+    data, flagger_result = flagConstants(data, field, flagger, window="15Min", thresh=0.1, )
     flags = flagger_result.getFlags(field)
     assert np.all(flags[expected] == flagger.BAD)
 
@@ -31,7 +31,7 @@ def test_constants_flagVarianceBased(data, flagger):
     expected = np.arange(5, 25)
     field, *_ = data.columns
     flagger = flagger.initFlags(data)
-    data, flagger_result1 = constants_flagVarianceBased(data, field, flagger, window="1h")
+    data, flagger_result1 = flagByVariance(data, field, flagger, window="1h")
 
     flag_result1 = flagger_result1.getFlags(field)
     test_sum = (flag_result1[expected] == flagger.BAD).sum()
diff --git a/test/funcs/test_functions.py b/test/funcs/test_functions.py
index 8670e09a2e675c7fa5b9338916e4c62104948090..a47331cd0c5c0113b437c8809d1bdac63e80c0e3 100644
--- a/test/funcs/test_functions.py
+++ b/test/funcs/test_functions.py
@@ -2,11 +2,16 @@
 # -*- coding: utf-8 -*-
 
 import pytest
-import numpy as np
 import pandas as pd
+import numpy as np
 import dios
 
-from saqc.funcs.functions import *
+from saqc.funcs.drift import flagDriftFromNorm, flagDriftFromReference, flagDriftFromScaledNorm
+from saqc.funcs.outliers import flagCrossStatistic, flagRange
+from saqc.funcs.flagtools import flagManual, forceFlags, clearFlags
+from saqc.funcs.tools import drop, copy, mask
+from saqc.funcs.resampling import reindexFlags
+from saqc.funcs.breaks import flagIsolated
 from test.common import initData, TESTFLAGGER
 
 
@@ -46,7 +51,16 @@ def test_flagSesonalRange(data, field, flagger):
 
     for test, expected in tests:
         flagger = flagger.initFlags(data)
-        data, flagger = flagSesonalRange(data, field, flagger, **test)
+        newfield = f"{field}_masked"
+        start = f"{test['startmonth']:02}-{test['startday']:02}T00:00:00"
+        end = f"{test['endmonth']:02}-{test['endday']:02}T00:00:00"
+
+        data, flagger = copy(data, field, flagger, field + "_masked")
+        data, flagger = mask(data, newfield, flagger, mode='periodic', period_start=start, period_end=end,
+                             include_bounds=True)
+        data, flagger = flagRange(data, newfield, flagger, min=test['min'], max=test['max'])
+        data, flagger = reindexFlags(data, field, flagger, method='match', source=newfield)
+        data, flagger = drop(data, newfield, flagger)
         flagged = flagger.isFlagged(field)
         assert flagged.sum() == expected
 
@@ -105,7 +119,7 @@ def test_flagCrossScoring(dat, flagger):
     s2 = pd.Series(data=s2.values, index=s1.index)
     data = dios.DictOfSeries([s1, s2], columns=["data1", "data2"])
     flagger = flagger.initFlags(data)
-    _, flagger_result = flagCrossScoring(data, field, flagger, fields=fields, thresh=3, cross_stat=np.mean)
+    _, flagger_result = flagCrossStatistic(data, field, flagger, fields=fields, thresh=3, cross_stat=np.mean)
     for field in fields:
         isflagged = flagger_result.isFlagged(field)
         assert isflagged[characteristics["raise"]].all()
@@ -208,8 +222,8 @@ def test_flagDriftFromNormal(dat, flagger):
     data_ref, flagger_ref = flagDriftFromReference(data, 'd1', flagger, ['d1', 'd2', 'd3'], segment_freq="3D",
                                       thresh=20)
 
-    data_scale, flagger_scale = flagDriftScale(data, 'dummy', flagger, ['d1', 'd3'], ['d4', 'd5'], segment_freq="3D",
-                                                   thresh=20,  norm_spread=5)
+    data_scale, flagger_scale = flagDriftFromScaledNorm(data, 'dummy', flagger, ['d1', 'd3'], ['d4', 'd5'], segment_freq="3D",
+                                                        thresh=20, norm_spread=5)
     assert flagger_norm.isFlagged()['d3'].all()
     assert flagger_ref.isFlagged()['d3'].all()
     assert flagger_scale.isFlagged()['d3'].all()
diff --git a/test/funcs/test_generic_api_functions.py b/test/funcs/test_generic_api_functions.py
index c800178cae77684a29c1e11f2b0ddf8f7e32001b..7bb136f702b76775895cfc3b4516ab57083b0e69 100644
--- a/test/funcs/test_generic_api_functions.py
+++ b/test/funcs/test_generic_api_functions.py
@@ -13,7 +13,8 @@ from saqc.core.visitor import ConfigFunctionParser
 from saqc.core.config import Fields as F
 from saqc.core.register import register
 from saqc import SaQC, SimpleFlagger
-from saqc.funcs.functions import _execGeneric
+from saqc.funcs.generic import _execGeneric
+from saqc.funcs.tools import mask
 
 
 register(masking='field')(flagAll)
@@ -28,7 +29,7 @@ def data():
 def test_addFieldFlagGeneric(data, flagger):
     saqc = SaQC(data=data, flagger=flagger)
 
-    data, flags = saqc.flagGeneric(
+    data, flags = saqc.flag(
         "tmp1",
         func=lambda var1: pd.Series(False, index=data[var1.name].index)
     ).getResult()
@@ -39,10 +40,10 @@ def test_addFieldFlagGeneric(data, flagger):
 def test_addFieldProcGeneric(data, flagger):
     saqc = SaQC(data=data, flagger=flagger)
 
-    data, flagger = saqc.procGeneric("tmp1", func=lambda: pd.Series([])).getResult(raw=True)
+    data, flagger = saqc.process("tmp1", func=lambda: pd.Series([])).getResult(raw=True)
     assert "tmp1" in data.columns and data["tmp1"].empty
 
-    data, flagger = saqc.procGeneric("tmp2", func=lambda var1, var2: var1 + var2).getResult()
+    data, flagger = saqc.process("tmp2", func=lambda var1, var2: var1 + var2).getResult()
     assert "tmp2" in data.columns and (data["tmp2"] == data["var1"] + data["var2"]).all(axis=None)
 
 
@@ -53,9 +54,9 @@ def test_mask(data, flagger):
     data_org = data.copy(deep=True)
     mean = data["var1"] / 2
 
-    data, _ = saqc.procGeneric("var1", lambda var1: mask(var1 < mean)).getResult()
+    data, _ = saqc.process("var1", lambda var1: mask(var1 < mean)).getResult()
     assert ((data["var1"].isna()) == (data_org["var1"] < 10) & data_org["var1"].isna()).all(axis=None)
 
-    data, flags = saqc.procGeneric("tmp", lambda var1: mask(var1 < mean)).getResult()
+    data, flags = saqc.process("tmp", lambda var1: mask(var1 < mean)).getResult()
     assert ("tmp" in data.columns) and ("tmp" in flags.columns)
     assert ((data["tmp"].isna()) == (data_org["var1"] < 10) & data_org["var1"].isna()).all(axis=None)
diff --git a/test/funcs/test_generic_config_functions.py b/test/funcs/test_generic_config_functions.py
index febb1f672fbed82a57f5a92fab3c5346cafe5aef..0993931c2f319a7dff9a8669169d7274ec355167 100644
--- a/test/funcs/test_generic_config_functions.py
+++ b/test/funcs/test_generic_config_functions.py
@@ -14,7 +14,7 @@ from saqc.core.visitor import ConfigFunctionParser
 from saqc.core.config import Fields as F
 from saqc.core.register import register
 from saqc import SaQC, SimpleFlagger
-from saqc.funcs.functions import _execGeneric
+from saqc.funcs.generic import _execGeneric
 
 
 @pytest.fixture
@@ -49,7 +49,7 @@ def test_missingIdentifier(data, flagger):
     ]
 
     for test in tests:
-        func = _compileGeneric(f"flagGeneric(func={test})", flagger)
+        func = _compileGeneric(f"flag(func={test})", flagger)
         with pytest.raises(NameError):
             _execGeneric(flagger, data, func, field="", nodata=np.nan)
 
@@ -65,7 +65,7 @@ def test_syntaxError(flagger):
 
     for test in tests:
         with pytest.raises(SyntaxError):
-            _compileGeneric(f"flagGeneric(func={test})", flagger)
+            _compileGeneric(f"flag(func={test})", flagger)
 
 
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
@@ -81,7 +81,7 @@ def test_typeError(flagger):
 
     for test in tests:
         with pytest.raises(TypeError):
-            _compileGeneric(f"flagGeneric(func={test})", flagger)
+            _compileGeneric(f"flag(func={test})", flagger)
 
 
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
@@ -100,7 +100,7 @@ def test_comparisonOperators(data, flagger):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"flagGeneric(func={test})", flagger)
+        func = _compileGeneric(f"flag(func={test})", flagger)
         result = _execGeneric(flagger, data, func, field=var1, nodata=np.nan)
         assert np.all(result == expected)
 
@@ -121,7 +121,7 @@ def test_arithmeticOperators(data, flagger):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"procGeneric(func={test})", flagger)
+        func = _compileGeneric(f"process(func={test})", flagger)
         result = _execGeneric(flagger, data, func, field=var1, nodata=np.nan)
         assert np.all(result == expected)
 
@@ -141,7 +141,7 @@ def test_nonReduncingBuiltins(data, flagger):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"procGeneric(func={test})", flagger)
+        func = _compileGeneric(f"process(func={test})", flagger)
         result = _execGeneric(flagger, data, func, field=this, nodata=np.nan)
         assert (result == expected).all()
 
@@ -165,7 +165,7 @@ def test_reduncingBuiltins(data, flagger, nodata):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"procGeneric(func={test})", flagger)
+        func = _compileGeneric(f"process(func={test})", flagger)
         result = _execGeneric(flagger, data, func, field=this.name, nodata=nodata)
         assert result == expected
 
@@ -184,7 +184,7 @@ def test_ismissing(data, flagger, nodata):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"flagGeneric(func={test})", flagger)
+        func = _compileGeneric(f"flag(func={test})", flagger)
         result = _execGeneric(flagger, data, func, this.name, nodata)
         assert np.all(result == expected)
 
@@ -204,7 +204,7 @@ def test_bitOps(data, flagger, nodata):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"flagGeneric(func={test})", flagger)
+        func = _compileGeneric(f"flag(func={test})", flagger)
         result = _execGeneric(flagger, data, func, this, nodata)
         assert np.all(result == expected)
 
@@ -225,7 +225,7 @@ def test_isflagged(data, flagger):
     ]
 
     for test, expected in tests:
-        func = _compileGeneric(f"flagGeneric(func={test}, flag=BAD)", flagger)
+        func = _compileGeneric(f"flag(func={test}, flag=BAD)", flagger)
         result = _execGeneric(flagger, data, func, field=None, nodata=np.nan)
         assert np.all(result == expected)
 
@@ -236,8 +236,8 @@ def test_variableAssignments(data, flagger):
 
     config = f"""
     {F.VARNAME}  ; {F.TEST}
-    dummy1       ; procGeneric(func=var1 + var2)
-    dummy2       ; flagGeneric(func=var1 + var2 > 0)
+    dummy1       ; process(func=var1 + var2)
+    dummy2       ; flag(func=var1 + var2 > 0)
     """
 
     fobj = writeIO(config)
@@ -252,13 +252,13 @@ def test_variableAssignments(data, flagger):
 
 @pytest.mark.xfail(stric=True)
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_procGenericMultiple(data_diff, flagger):
+def test_processMultiple(data_diff, flagger):
     var1, var2, *_ = data_diff.columns
 
     config = f"""
     {F.VARNAME} ; {F.TEST}
-    dummy       ; procGeneric(func=var1 + 1)
-    dummy       ; procGeneric(func=var2 - 1)
+    dummy       ; process(func=var1 + 1)
+    dummy       ; process(func=var2 - 1)
     """
 
     fobj = writeIO(config)
diff --git a/test/funcs/test_harm_funcs.py b/test/funcs/test_harm_funcs.py
index d8825f9689c4c7108b53e9dc22772d203449ab04..2fca105a6125b08991e8326a93f8101956f8d0c6 100644
--- a/test/funcs/test_harm_funcs.py
+++ b/test/funcs/test_harm_funcs.py
@@ -11,12 +11,12 @@ import dios
 
 from test.common import TESTFLAGGER
 
-from saqc.funcs.harm_functions import (
-    harm_linear2Grid,
-    harm_interpolate2Grid,
-    harm_shift2Grid,
-    harm_aggregate2Grid,
-    harm_deharmonize,
+from saqc.funcs.resampling import (
+    linear,
+    interpolate,
+    shift,
+    aggregate,
+    mapToOriginal,
 )
 
 RESHAPERS = ["nshift", "fshift", "bshift", "nagg", "bagg", "fagg", "interpolation"]
@@ -50,10 +50,10 @@ def test_harmSingleVarIntermediateFlagging(data, flagger, reshaper):
     freq = "15min"
     assert len(data.columns) == 1
     field = data.columns[0]
-    data, flagger = harm_linear2Grid(data, "data", flagger, freq)
+    data, flagger = linear(data, "data", flagger, freq)
     # flag something bad
     flagger = flagger.setFlags("data", loc=data[field].index[3:4])
-    data, flagger = harm_deharmonize(data, "data", flagger, method="inverse_" + reshaper)
+    data, flagger = mapToOriginal(data, "data", flagger, method="inverse_" + reshaper)
     d = data[field]
     if reshaper == "nagg":
         assert flagger.isFlagged(loc=d.index[3:7]).squeeze().all()
@@ -122,11 +122,11 @@ def test_harmSingleVarInterpolations(data, flagger):
     ]
 
     for interpolation, freq, expected in tests:
-        data_harm, flagger_harm = harm_aggregate2Grid(
+        data_harm, flagger_harm = aggregate(
             data, field, flagger, freq, value_func=np.sum, method=interpolation
         )
         assert data_harm[field].equals(expected)
-        data_deharm, flagger_deharm = harm_deharmonize(
+        data_deharm, flagger_deharm = mapToOriginal(
             data_harm, "data", flagger_harm, method="inverse_" + interpolation
         )
         assert data_deharm[field].equals(pre_data)
@@ -184,9 +184,9 @@ def test_harmSingleVarInterpolations(data, flagger):
     ]
 
     for interpolation, freq, expected in tests:
-        data_harm, flagger_harm = harm_shift2Grid(data, field, flagger, freq, method=interpolation)
+        data_harm, flagger_harm = shift(data, field, flagger, freq, method=interpolation)
         assert data_harm[field].equals(expected)
-        data_deharm, flagger_deharm = harm_deharmonize(
+        data_deharm, flagger_deharm = mapToOriginal(
             data_harm, "data", flagger_harm, method="inverse_" + interpolation
         )
         assert data_deharm[field].equals(pre_data)
@@ -204,10 +204,10 @@ def test_gridInterpolation(data, method):
 
     # we are just testing if the interpolation gets passed to the series without causing an error:
 
-    harm_interpolate2Grid(data, field, flagger, freq, method=method, downcast_interpolation=True)
+    interpolate(data, field, flagger, freq, method=method, downcast_interpolation=True)
     if method == "polynomial":
-        harm_interpolate2Grid(data, field, flagger, freq, order=2, method=method, downcast_interpolation=True)
-        harm_interpolate2Grid(data, field, flagger, freq, order=10, method=method, downcast_interpolation=True)
+        interpolate(data, field, flagger, freq, order=2, method=method, downcast_interpolation=True)
+        interpolate(data, field, flagger, freq, order=10, method=method, downcast_interpolation=True)
 
 
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
@@ -217,7 +217,7 @@ def test_wrapper(data, flagger):
     freq = "15min"
     flagger = flagger.initFlags(data)
 
-    harm_linear2Grid(data, field, flagger, freq, to_drop=None)
-    harm_aggregate2Grid(data, field, flagger, freq, value_func=np.nansum, method="nagg", to_drop=None)
-    harm_shift2Grid(data, field, flagger, freq, method="nshift", to_drop=None)
-    harm_interpolate2Grid(data, field, flagger, freq, method="spline")
+    linear(data, field, flagger, freq, to_drop=None)
+    aggregate(data, field, flagger, freq, value_func=np.nansum, method="nagg", to_drop=None)
+    shift(data, field, flagger, freq, method="nshift", to_drop=None)
+    interpolate(data, field, flagger, freq, method="spline")
diff --git a/test/funcs/test_modelling.py b/test/funcs/test_modelling.py
index f221944f1c6c2fcfd1c23acba4dd13f552b9063f..00ba8a8810b5d25f6b0ce36e29558116e845bab6 100644
--- a/test/funcs/test_modelling.py
+++ b/test/funcs/test_modelling.py
@@ -12,7 +12,9 @@ import dios
 
 from test.common import TESTFLAGGER
 
-from saqc.funcs.modelling import modelling_polyFit, modelling_rollingMean, modelling_mask
+
+from saqc.funcs.tools import mask
+from saqc.funcs.residues import calculatePolynomialResidues, calculateRollingResidues
 
 TF = TESTFLAGGER[:1]
 
@@ -25,15 +27,15 @@ def test_modelling_polyFit_forRegular(dat, flagger):
     data = data + 10 * np.sin(np.arange(0, len(data.indexes[0])))
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    result1, _ = modelling_polyFit(data, "data", flagger, 11, 2, numba=False)
-    result2, _ = modelling_polyFit(data, "data", flagger, 11, 2, numba=True)
+    result1, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=False)
+    result2, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=True)
     assert (result1["data"] - result2["data"]).abs().max() < 10 ** -10
-    result3, _ = modelling_polyFit(data, "data", flagger, "110min", 2, numba=False)
+    result3, _ = calculatePolynomialResidues(data, "data", flagger, "110min", 2, numba=False)
     assert result3["data"].equals(result1["data"])
-    result4, _ = modelling_polyFit(data, "data", flagger, 11, 2, numba=True, min_periods=11)
+    result4, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=True, min_periods=11)
     assert (result4["data"] - result2["data"]).abs().max() < 10 ** -10
     data.iloc[13:16] = np.nan
-    result5, _ = modelling_polyFit(data, "data", flagger, 11, 2, numba=True, min_periods=9)
+    result5, _ = calculatePolynomialResidues(data, "data", flagger, 11, 2, numba=True, min_periods=9)
     assert result5["data"].iloc[10:19].isna().all()
 
 
@@ -43,8 +45,9 @@ def test_modelling_rollingMean_forRegular(dat, flagger):
     data, _ = dat(freq="10min", periods=30, initial_level=0, final_level=100, out_val=-100)
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    modelling_rollingMean(data, "data", flagger, 5, eval_flags=True, min_periods=0, center=True)
-    modelling_rollingMean(data, "data", flagger, 5, eval_flags=True, min_periods=0, center=False)
+    calculateRollingResidues(data, "data", flagger, 5,  func=np.mean, eval_flags=True, min_periods=0, center=True)
+    calculateRollingResidues(data, "data", flagger, 5,  func=np.mean, eval_flags=True, min_periods=0, center=False)
+
 
 @pytest.mark.parametrize("flagger", TF)
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_1")])
@@ -52,16 +55,16 @@ def test_modelling_mask(dat, flagger):
     data, _ = dat()
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    data_seasonal, flagger_seasonal = modelling_mask(data, "data", flagger, mode='seasonal', season_start="20:00",
-                                                     season_end="40:00", include_bounds=False)
+    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="20:00",
+                                           period_end="40:00", include_bounds=False)
     flaggs = flagger_seasonal._flags["data"]
     assert flaggs[np.logical_and(20 <= flaggs.index.minute, 40 >= flaggs.index.minute)].isna().all()
-    data_seasonal, flagger_seasonal = modelling_mask(data, "data", flagger, mode='seasonal', season_start="15:00:00",
-                                                     season_end="02:00:00")
+    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="15:00:00",
+                                           period_end="02:00:00")
     flaggs = flagger_seasonal._flags["data"]
     assert flaggs[np.logical_and(15 <= flaggs.index.hour, 2 >= flaggs.index.hour)].isna().all()
-    data_seasonal, flagger_seasonal = modelling_mask(data, "data", flagger, mode='seasonal', season_start="03T00:00:00",
-                                                     season_end="10T00:00:00")
+    data_seasonal, flagger_seasonal = mask(data, "data", flagger, mode='periodic', period_start="03T00:00:00",
+                                           period_end="10T00:00:00")
     flaggs = flagger_seasonal._flags["data"]
     assert flaggs[np.logical_and(3 <= flaggs.index.hour, 10 >= flaggs.index.hour)].isna().all()
 
@@ -69,6 +72,6 @@ def test_modelling_mask(dat, flagger):
     mask_ser[::5] = True
     data["mask_ser"] = mask_ser
     flagger = flagger.initFlags(data)
-    data_masked, flagger_masked = modelling_mask(data, "data", flagger, mode='mask_var', mask_var="mask_ser")
+    data_masked, flagger_masked = mask(data, "data", flagger, mode='mask_var', mask_var="mask_ser")
     flaggs = flagger_masked._flags["data"]
     assert flaggs[data_masked['mask_ser']].isna().all()
\ No newline at end of file
diff --git a/test/funcs/test_pattern_rec.py b/test/funcs/test_pattern_rec.py
index 66ebcbfd1fdf13f5cb30cb5bd34a5a457a31dc3d..0331c2fa8fa6a8e1cbb2fbefb8ecdebada4a8c67 100644
--- a/test/funcs/test_pattern_rec.py
+++ b/test/funcs/test_pattern_rec.py
@@ -4,7 +4,7 @@
 import pytest
 from dios import dios
 
-from saqc.funcs.pattern_rec import *
+from saqc.funcs.pattern import *
 from test.common import initData, TESTFLAGGER
 
 
@@ -28,7 +28,7 @@ def test_flagPattern_wavelet(flagger):
     data = dios.DictOfSeries(dict(data=data, pattern_data=pattern))
 
     flagger = flagger.initFlags(data)
-    data, flagger = flagPattern_wavelet(data, "data", flagger, ref_field="pattern_data")
+    data, flagger = flagPatternByDTW(data, "data", flagger, ref_field="pattern_data")
     assert (flagger.isFlagged("data")[1:6]).all()
     assert (flagger.isFlagged("data")[:1]).any()
     assert (flagger.isFlagged("data")[7:]).any()
@@ -44,7 +44,7 @@ def test_flagPattern_dtw(flagger):
     data = dios.DictOfSeries(dict(data=data, pattern_data=pattern))
 
     flagger = flagger.initFlags(data)
-    data, flagger = flagPattern_dtw(data, "data", flagger, ref_field="pattern_data")
+    data, flagger = flagPatternByWavelet(data, "data", flagger, ref_field="pattern_data")
     assert (flagger.isFlagged("data")[1:6]).all()
     assert (flagger.isFlagged("data")[:1]).any()
     assert (flagger.isFlagged("data")[7:]).any()
diff --git a/test/funcs/test_proc_functions.py b/test/funcs/test_proc_functions.py
index 457c56f06b0da92dbe372a71ed9b570aa351dbd1..3aa5c2c1b80acc503a59e3e7a28a65853d7e76a2 100644
--- a/test/funcs/test_proc_functions.py
+++ b/test/funcs/test_proc_functions.py
@@ -9,14 +9,12 @@ import numpy as np
 import pandas as pd
 import dios
 
-from saqc.funcs.proc_functions import (
-    proc_interpolateMissing,
-    proc_resample,
-    proc_transform,
-    proc_rollingInterpolateMissing,
-    proc_interpolateGrid,
-    proc_offsetCorrecture
+from saqc.funcs.transformation import (
+    transform
 )
+from saqc.funcs.drift import correctOffset
+from saqc.funcs.interpolation import interpolateByRolling, interpolateInvalid, interpolateIndex
+from saqc.funcs.resampling import resample
 from saqc.lib.ts_operators import linearInterpolation, polynomialInterpolation
 
 from test.common import TESTFLAGGER
@@ -28,13 +26,13 @@ def test_rollingInterpolateMissing(course_5, flagger):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    dataInt, *_ = proc_rollingInterpolateMissing(
+    dataInt, *_ = interpolateByRolling(
         data, field, flagger, 3, func=np.median, center=True, min_periods=0, interpol_flag="UNFLAGGED"
     )
     # import pdb
     # pdb.set_trace()
     assert dataInt[field][characteristics["missing"]].notna().all()
-    dataInt, *_ = proc_rollingInterpolateMissing(
+    dataInt, *_ = interpolateByRolling(
         data, field, flagger, 3, func=np.nanmean, center=False, min_periods=3, interpol_flag="UNFLAGGED"
     )
     assert dataInt[field][characteristics["missing"]].isna().all()
@@ -46,14 +44,14 @@ def test_interpolateMissing(course_5, flagger):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    dataLin, *_ = proc_interpolateMissing(data, field, flagger, method="linear")
-    dataPoly, *_ = proc_interpolateMissing(data, field, flagger, method="polynomial")
+    dataLin, *_ = interpolateInvalid(data, field, flagger, method="linear")
+    dataPoly, *_ = interpolateInvalid(data, field, flagger, method="polynomial")
     assert dataLin[field][characteristics["missing"]].notna().all()
     assert dataPoly[field][characteristics["missing"]].notna().all()
     data, characteristics = course_5(periods=10, nan_slice=[5, 6, 7])
-    dataLin1, *_ = proc_interpolateMissing(data, field, flagger, method="linear", inter_limit=2)
-    dataLin2, *_ = proc_interpolateMissing(data, field, flagger, method="linear", inter_limit=3)
-    dataLin3, *_ = proc_interpolateMissing(data, field, flagger, method="linear", inter_limit=4)
+    dataLin1, *_ = interpolateInvalid(data, field, flagger, method="linear", inter_limit=2)
+    dataLin2, *_ = interpolateInvalid(data, field, flagger, method="linear", inter_limit=3)
+    dataLin3, *_ = interpolateInvalid(data, field, flagger, method="linear", inter_limit=4)
     assert dataLin1[field][characteristics["missing"]].isna().all()
     assert dataLin2[field][characteristics["missing"]].isna().all()
     assert dataLin3[field][characteristics["missing"]].notna().all()
@@ -65,11 +63,11 @@ def test_transform(course_5, flagger):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    data1, *_ = proc_transform(data, field, flagger, func=linearInterpolation)
+    data1, *_ = transform(data, field, flagger, func=linearInterpolation)
     assert data1[field][characteristics["missing"]].isna().all()
-    data1, *_ = proc_transform(data, field, flagger, func=lambda x: linearInterpolation(x, inter_limit=3))
+    data1, *_ = transform(data, field, flagger, func=lambda x: linearInterpolation(x, inter_limit=3))
     assert data1[field][characteristics["missing"]].notna().all()
-    data1, *_ = proc_transform(
+    data1, *_ = transform(
         data, field, flagger, func=lambda x: polynomialInterpolation(x, inter_limit=3, inter_order=3)
     )
     assert data1[field][characteristics["missing"]].notna().all()
@@ -81,7 +79,7 @@ def test_resample(course_5, flagger):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    data1, *_ = proc_resample(data, field, flagger, "10min", np.mean, max_invalid_total_d=2, max_invalid_consec_d=1)
+    data1, *_ = resample(data, field, flagger, "10min", np.mean, max_invalid_total_d=2, max_invalid_consec_d=1)
     assert ~np.isnan(data1[field].iloc[0])
     assert np.isnan(data1[field].iloc[1])
     assert np.isnan(data1[field].iloc[2])
@@ -94,7 +92,7 @@ def test_interpolateGrid(course_5, course_3, flagger):
     data['grid'] = data_grid.to_df()
     # data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    dataInt, *_ = proc_interpolateGrid(data, 'data', flagger, '1h', 'time', grid_field='grid', inter_limit=10)
+    dataInt, *_ = interpolateIndex(data, 'data', flagger, '1h', 'time', grid_field='grid', inter_limit=10)
 
 
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
@@ -104,6 +102,6 @@ def test_offsetCorrecture(flagger):
     data.iloc[70:80] = 100
     data = dios.DictOfSeries(data)
     flagger = flagger.initFlags(data)
-    data, flagger = proc_offsetCorrecture(data, 'dat', flagger, 40, 20, '3d', 1)
+    data, flagger = correctOffset(data, 'dat', flagger, 40, 20, '3d', 1)
     assert (data == 0).all()[0]
 
diff --git a/test/funcs/test_soil_moisture_tests.py b/test/funcs/test_soil_moisture_tests.py
deleted file mode 100644
index d4eb78f788c94a2999a6093c20528954e7e20394..0000000000000000000000000000000000000000
--- a/test/funcs/test_soil_moisture_tests.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import pytest
-import numpy as np
-import pandas as pd
-import dios
-
-from saqc.funcs.soil_moisture_tests import sm_flagFrost, sm_flagPrecipitation, sm_flagConstants, sm_flagRandomForest
-
-from test.common import TESTFLAGGER, initData
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_sm_flagFrost(flagger):
-    index = pd.date_range(start="2011-01-01 00:00:00", end="2011-01-01 03:00:00", freq="5min")
-
-    sm = pd.Series(data=np.linspace(0, +1, index.size), index=index)
-    st = pd.Series(data=np.linspace(1, -1, index.size), index=index)
-    data = dios.DictOfSeries([sm, st], columns=["soil_moisture", "soil_temperature"])
-
-    flagger = flagger.initFlags(data)
-    data, flagger_result = sm_flagFrost(data, "soil_moisture", flagger, "soil_temperature")
-    flag_assertion = np.arange(19, 37)
-    flag_result = flagger_result.getFlags("soil_moisture")
-    assert (flag_result[flag_assertion] == flagger.BAD).all()
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_flagSoilMoisturePrecipitationEvents(flagger):
-    index = pd.date_range(start="2011-01-01 00:00:00", end="2011-01-04 00:00:00", freq="15min")
-
-    sm = pd.Series(data=np.linspace(0, 1, index.size), index=index)
-    pr = pd.Series(data=np.linspace(1, 1, index.size), index=index)
-    data = dios.DictOfSeries([sm, pr], columns=["soil_moisture", "precipitation"])
-
-    data.loc["2011-01-03", "precipitation"] = 0
-    data.loc["2011-01-04", "precipitation"] = 0
-
-    flagger = flagger.initFlags(data)
-    data, flag_result = sm_flagPrecipitation(data, "soil_moisture", flagger, "precipitation")
-
-    flag_assertion = [288, 287]
-    flag_result = flag_result.getFlags("soil_moisture")
-    test_sum = (flag_result[flag_assertion] == flagger.BAD).sum()
-    assert test_sum == len(flag_assertion)
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_sm_flagConstantss(flagger):
-    data = initData(1, start_date="2011-01-01 00:00:00", end_date="2011-01-02 00:00:00", freq="5min")
-    data.iloc[5:25] = 0
-    data.iloc[100:120] = data.apply(max)[0]
-    field = data.columns[0]
-    flagger = flagger.initFlags(data)
-    data, flagger = sm_flagConstants(data, field, flagger, window="1h", precipitation_window="1h")
-
-    assert ~(flagger.isFlagged()[5:25]).all()[0]
-    assert (flagger.isFlagged()[100:120]).all()[0]
-
-
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_sm_flagRandomForest(flagger):
-    ### CREATE MWE DATA
-    data = pd.read_feather("ressources/machine_learning/data/soil_moisture_mwe.feather")
-    data = data.set_index(pd.DatetimeIndex(data.Time))
-    flags_raw = data[["SM1_Flag", "SM2_Flag", "SM3_Flag"]]
-    flags_raw.columns = ["SM1", "SM2", "SM3"]
-
-    # masks for flag preparation
-    mask_bad = flags_raw.isin(["Auto:BattV", "Auto:Range", "Auto:Spike"])
-    mask_unflagged = flags_raw.isin(["Manual"])
-    mask_good = flags_raw.isin(["OK"])
-
-    field = "SM2"
-
-    # prepare flagsframe
-    data = dios.to_dios(data)
-    flagger = flagger.initFlags(data)
-    flagger = flagger.setFlags(field, loc=mask_bad[field])
-    flagger = flagger.setFlags(field, loc=mask_unflagged[field], flag=flagger.UNFLAGGED)
-    flagger = flagger.setFlags(field, loc=mask_good[field], flag=flagger.GOOD)
-
-    references = ["Temp2", "BattV"]
-    window_values = 20
-    window_flags = 20
-    groupvar = 0.2
-    modelname = "testmodel"
-    path = f"ressources/machine_learning/models/{modelname}_{groupvar}.pkl"
-
-    outdat, outflagger = sm_flagRandomForest(data, field, flagger, references, window_values, window_flags, path)
-
-    # compare
-    # assert resulting no of bad flags
-    badflags = outflagger.isFlagged(field)
-    assert badflags.sum() == 10447
-
-    # Have the right values been flagged?
-    checkdates = pd.DatetimeIndex(
-        [
-            "2014-08-05 23:03:59",
-            "2014-08-06 01:35:44",
-            "2014-08-06 01:50:54",
-            "2014-08-06 02:06:05",
-            "2014-08-06 02:21:15",
-            "2014-08-06 04:22:38",
-            "2014-08-06 04:37:49",
-            "2014-08-06 04:52:59",
-        ]
-    )
-    assert badflags[checkdates].all()
diff --git a/test/funcs/test_spikes_detection.py b/test/funcs/test_spikes_detection.py
index cfdeb79b0a6a5f612f3b2c5a88cdd1e8fdaa61c6..da86834798448509b9305913a8d13fcff89753fe 100644
--- a/test/funcs/test_spikes_detection.py
+++ b/test/funcs/test_spikes_detection.py
@@ -7,14 +7,12 @@ import numpy as np
 import pandas as pd
 import dios
 
-from saqc.funcs.spikes_detection import (
-    spikes_flagSpektrumBased,
-    spikes_flagMad,
-    spikes_flagSlidingZscore,
-    spikes_flagBasic,
-    spikes_flagRaise,
-    spikes_flagMultivarScores,
-    spikes_flagGrubbs,
+from saqc.funcs.outliers import (
+    flagMAD,
+    flagOffset,
+    flagRaise,
+    flagMVScores,
+    flagByGrubbs,
 )
 
 from test.common import TESTFLAGGER
@@ -30,53 +28,23 @@ def spiky_data():
     return dios.DictOfSeries(s), flag_assertion
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-def test_flagSpikesSpektrumBased(spiky_data, flagger):
-    data = spiky_data[0]
-    field, *_ = data.columns
-    flagger = flagger.initFlags(data)
-    data, flagger_result = spikes_flagSpektrumBased(data, field, flagger)
-    flag_result = flagger_result.getFlags(field)
-    test_sum = (flag_result[spiky_data[1]] == flagger.BAD).sum()
-    assert test_sum == len(spiky_data[1])
-
-
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
 def test_flagMad(spiky_data, flagger):
     data = spiky_data[0]
     field, *_ = data.columns
     flagger = flagger.initFlags(data)
-    data, flagger_result = spikes_flagMad(data, field, flagger, "1H")
+    data, flagger_result = flagMAD(data, field, flagger, "1H")
     flag_result = flagger_result.getFlags(field)
     test_sum = (flag_result[spiky_data[1]] == flagger.BAD).sum()
     assert test_sum == len(spiky_data[1])
 
 
-@pytest.mark.parametrize("flagger", TESTFLAGGER)
-@pytest.mark.parametrize("method", ["modZ", "zscore"])
-def test_slidingOutlier(spiky_data, flagger, method):
-    # test for numeric input
-    data = spiky_data[0]
-    field, *_ = data.columns
-    flagger = flagger.initFlags(data)
-
-    tests = [
-        spikes_flagSlidingZscore(data, field, flagger, window=300, offset=50, method=method),
-        spikes_flagSlidingZscore(data, field, flagger, window="1500min", offset="250min", method=method),
-    ]
-
-    for _, flagger_result in tests:
-        flag_result = flagger_result.getFlags(field)
-        test_sum = (flag_result.iloc[spiky_data[1]] == flagger.BAD).sum()
-        assert int(test_sum) == len(spiky_data[1])
-
-
 @pytest.mark.parametrize("flagger", TESTFLAGGER)
 def test_flagSpikesBasic(spiky_data, flagger):
     data = spiky_data[0]
     field, *_ = data.columns
     flagger = flagger.initFlags(data)
-    data, flagger_result = spikes_flagBasic(data, field, flagger, thresh=60, tolerance=10, window="20min")
+    data, flagger_result = flagOffset(data, field, flagger, thresh=60, tolerance=10, window="20min")
     flag_result = flagger_result.getFlags(field)
     test_sum = (flag_result[spiky_data[1]] == flagger.BAD).sum()
     assert test_sum == len(spiky_data[1])
@@ -97,7 +65,7 @@ def test_flagSpikesLimitRaise(dat, flagger):
     data, characteristics = dat()
     field, *_ = data.columns
     flagger = flagger.initFlags(data)
-    _, flagger_result = spikes_flagRaise(
+    _, flagger_result = flagRaise(
         data, field, flagger, thresh=2, intended_freq="10min", raise_window="20min", numba_boost=False
     )
     assert flagger_result.isFlagged(field)[characteristics["raise"]].all()
@@ -118,8 +86,8 @@ def test_flagMultivarScores(dat, flagger):
     s2 = pd.Series(data=s2.values, index=s1.index)
     data = dios.DictOfSeries([s1, s2], columns=["data1", "data2"])
     flagger = flagger.initFlags(data)
-    _, flagger_result = spikes_flagMultivarScores(
-        data, field, flagger, fields=fields, binning=50, trafo=np.log, iter_start=0.95, n_neighbors=10
+    _, flagger_result = flagMVScores(
+        data, field, flagger, fields=fields, trafo=np.log, iter_start=0.95, n_neighbors=10
     )
     for field in fields:
         isflagged = flagger_result.isFlagged(field)
@@ -135,6 +103,6 @@ def test_grubbs(dat, flagger):
         freq="10min", periods=45, initial_level=0, final_level=0, crowd_size=1, crowd_spacing=3, out_val=-10
     )
     flagger = flagger.initFlags(data)
-    data, result_flagger = spikes_flagGrubbs(data, "data", flagger, winsz=20, min_periods=15)
+    data, result_flagger = flagByGrubbs(data, "data", flagger, winsz=20, min_periods=15)
     assert result_flagger.isFlagged("data")[char_dict["drop"]].all()