diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..01bc9ebc57390a5b46ccd2c3f9dd1780e5cb6160
--- /dev/null
+++ b/.github/workflows/main.yml
@@ -0,0 +1,71 @@
+# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This is a basic workflow to help you get started with Actions
+
+name: CI
+
+# Controls when the workflow will run
+on:
+  push:
+    branches:
+      - master
+      - develop
+    tags: 
+      - v**
+      
+  pull_request:
+
+  # Allow to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+
+jobs:
+  build:
+    name: build (py${{ matrix.python-version }}, ${{ matrix.os }})
+    runs-on: ${{ matrix.os }}
+    strategy:
+      fail-fast: false
+      matrix:
+        os: ["windows-latest", "ubuntu-latest", "macos-latest"]
+        python-version: ["3.7", "3.8", "3.9", "3.10"]
+    defaults:
+      run:
+        # somehow this also works for windows O.o ??
+        shell: bash -l {0}
+    
+    steps:
+      
+      # checkout the repository under $GITHUB_WORKSPACE
+      - uses: actions/checkout@v3
+      - uses: conda-incubator/setup-miniconda@v2
+        with:
+          auto-update-conda: true
+          python-version: ${{ matrix.python-version }}
+          activate-environment: venv
+      
+      - name: show conda info
+        run: conda info
+      
+      - name: install requirements
+        run: | 
+          pip install -r requirements.txt
+          pip install -r tests/requirements.txt
+      
+      - name: show installed packages 
+        run: conda list
+      
+      - name: run SaQC test suite
+        run: |
+          pytest tests dios/test -Werror 
+          python -m saqc --config docs/resources/data/config.csv --data docs/resources/data/data.csv --outfile /tmp/test.csv
+      
+      - name: run doc tests    
+        run: |
+          cd docs
+          pip install -r requirements.txt
+          make doc
+          make test
+          
+          
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1996835fa938594ba43ef996b612dbe4b13f4e17..dfeafeac8f451d2c1bd484f98e363ac67406b1c7 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -70,7 +70,9 @@ coverage:
   artifacts:
     when: always
     reports:
-      cobertura: coverage.xml
+      coverage_report:
+        coverage_format: cobertura
+        path: coverage.xml
 
 
 # test saqc with python 3.7
diff --git a/.gitlab/issue_templates/bug_report.md b/.gitlab/issue_templates/bug_report.md
new file mode 100644
index 0000000000000000000000000000000000000000..e0170fc1346167420446534488e2ff24c98fc3a7
--- /dev/null
+++ b/.gitlab/issue_templates/bug_report.md
@@ -0,0 +1,43 @@
+<!--
+SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+## Summary
+
+(Summarize the bug encountered concisely)
+
+## Reproducible Example 
+
+```python
+import numpy as np
+import pandas as pd
+import saqc
+...
+
+```
+
+## What is the current bug behavior?
+
+(What actually happens)
+
+## What is the expected correct behavior?
+
+(What you should see instead)
+
+## Stacktrace 
+
+<details><summary>Click to expand</summary>
+
+```
+Paste any relevant stacktrace inside the ```
+```
+
+</details>
+
+## Possible fixes
+
+(If you can, link to the line of code that might be responsible for the problem)
+
+/label ~BUG ~needs-investigation
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f22ec980c10ee3c75e49f5ceb7ba3c3ab0af0784..9600623f84f60954d030e05444d45c543c3f993b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,19 +6,38 @@ SPDX-License-Identifier: GPL-3.0-or-later
 
 # Changelog
 
-This changelog starts with version 2.0.0. Basically all parts of the system, including the format of this changelog, have been reworked between the releases 1.4 and 2.0. Preceding the major breaking release 2.0, the maintenance of this file was rather sloppy, so we won't provide a detailed change history for early versions.
-
-
 ## Unreleased
-[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.1.0...develop)
+[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.2.0...develop)
 ### Added
 ### Changed
 ### Removed
 ### Fixed
 
+## [2.2.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.0.1) - 2022-10-28
+[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.1.0...v2.2.0)
+### Added
+- translation of `dfilter`
+- new generic function `clip`
+- parameter `min_periods` to `SaQC.flagConstants`
+- function `fitButterworth`
+- tracking interpolation routines in `History`
+### Changed
+- test function interface changed to `func(saqc: SaQC, field: str | Sequence[str], *args, **kwargs)`
+- lib function `butterFilter` returns `NaN` for too-short series
+- `dfilter` default value precedence order
+### Removed
+- `closed` keyword in `flagJumps`
+### Fixed
+- fixed undesired behavior in `flagIsolated` for not harmonized data 
+- fixed failing translation of `dfilter`-defaults
+- fixed unbound recursion error when interpolating with order-independent methods in `interpolateIndex`
+- fixed not working min_periods condition if `window=None` in `assignZScore`
+- fixed Exception occuring when fitting polynomials via `polyfit` to harmonized data, containing all-NaN gaps wider than the polynomial fitting window size.
+- fixed bug in function parameter checking
+- fixed bug one-off bug in `flagJumps`
+
 ## [2.1.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.0.1) - 2022-06-14
 [List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.0.1...v2.1.0)
-
 ### Added
 - documentation of global keywords
 - generic documentation module `docurator.py`
@@ -27,7 +46,6 @@ This changelog starts with version 2.0.0. Basically all parts of the system, inc
 - new function `progagateFlags`
 - include function typehints in parameter documentation
 - `label` parameter to the generic function `isflagged`
-
 ### Changed
 - `flagOffsets` parameters `thresh` and `thresh_relative` are optional
 - corrected false notion of the term *residual* (replace all occurences of *residue* by *residual*)
@@ -39,10 +57,8 @@ This changelog starts with version 2.0.0. Basically all parts of the system, inc
 - renamed `History.max` to `History.squeeze`
 - renamed parameter `freq` of function flagByStray to `window`
 - `DmpScheme`: set `DFILTER_DEFAULT` to 1 in order to not mask the flag 'OK'
-
 ### Removed
 - data accessors `SaQC.result`, `SaQC.data_raw`, `SaQC.flags_raw`
-
 ### Fixed
 - `flagOffset` failure on falsy `thresh`
 - `flagCrossStatistics` failure on unaligned input variables
diff --git a/dios/dios/base.py b/dios/dios/base.py
index 674c939a1d79a24790f1d9a00ef184b421ad9c46..29c0601bc044c5f3e4ff4a66670ff9973da9a994 100644
--- a/dios/dios/base.py
+++ b/dios/dios/base.py
@@ -11,7 +11,7 @@ import operator as op
 from abc import abstractmethod
 from copy import copy as shallowcopy
 from copy import deepcopy
-from typing import Any, Hashable, Mapping, Sequence, overload
+from typing import Any, Hashable, Mapping, Sequence, TypeVar, overload
 
 import pandas as pd
 
@@ -25,10 +25,13 @@ __email__ = "bert.palm@ufz.de"
 __copyright__ = "Copyright 2018, Helmholtz-Zentrum für Umweltforschung GmbH - UFZ"
 
 
+D = TypeVar("D", bound="_DiosBase")
+
+
 class _DiosBase:
     @property
     @abstractmethod
-    def _constructor(self) -> type[_DiosBase]:
+    def _constructor(self: D) -> type[D]:
         raise NotImplementedError
 
     def _finalize(self, other: _DiosBase):
@@ -187,19 +190,13 @@ class _DiosBase:
         self._data.at[col] = val.copy(deep=True)
 
     @overload
-    def __getitem__(self, key: str | int) -> pd.Series:
-        ...
-
-    @overload
-    def __getitem__(self, key: slice) -> pd.Series:
-        ...
-
-    @overload
-    def __getitem__(self, key: "_DiosBase" | pd.DataFrame) -> "_DiosBase":
+    def __getitem__(self, key: str | int | slice) -> pd.Series:
         ...
 
     @overload
-    def __getitem__(self, key: Sequence[str | int]) -> "_DiosBase":
+    def __getitem__(
+        self: D, key: "_DiosBase" | pd.DataFrame | Sequence[str | int]
+    ) -> D:
         ...
 
     def __getitem__(self, key):
diff --git a/dios/requirements.txt b/dios/requirements.txt
index 198c6b1f4e1ec9a956bea2961503dbd3610605ee..981ad1b6463cf6f897cc0efe1c9c25d1e344c9ce 100644
--- a/dios/requirements.txt
+++ b/dios/requirements.txt
@@ -5,5 +5,5 @@
 numpy==1.21.2
 pandas==1.3.5
 python-dateutil==2.8.2
-pytz==2021.3
+pytz==2022.2.1
 six==1.16.0
diff --git a/dios/test/test__ops__.py b/dios/test/test__ops__.py
index 144ae93427ba9d2a9d83510396903ffff49ff5ea..dede30afba1dcfca3b5f39edebe28f12029f72ef 100644
--- a/dios/test/test__ops__.py
+++ b/dios/test/test__ops__.py
@@ -27,8 +27,12 @@ def test__eq__(left, right):
             assert res == exp
 
 
-@pytest.mark.filterwarnings("ignore: invalid value encountered in long_scalars")
-@pytest.mark.filterwarnings("ignore: divide by zero encountered in long_scalars")
+@pytest.mark.filterwarnings(
+    "ignore: invalid value encountered in .*_scalars", category=RuntimeWarning
+)
+@pytest.mark.filterwarnings(
+    "ignore: divide by zero encountered in .*_scalars", category=RuntimeWarning
+)
 @pytest.mark.parametrize("left", diosFromMatr(DATA_ALIGNED))
 @pytest.mark.parametrize("right", diosFromMatr(DATA_ALIGNED))
 @pytest.mark.parametrize("op", OP2)
@@ -47,8 +51,12 @@ def test__op2__aligningops(left, right, op):
             assert res == exp
 
 
-@pytest.mark.filterwarnings("ignore: invalid value encountered in long_scalars")
-@pytest.mark.filterwarnings("ignore: divide by zero encountered in long_scalars")
+@pytest.mark.filterwarnings(
+    "ignore: invalid value encountered in .*_scalars", category=RuntimeWarning
+)
+@pytest.mark.filterwarnings(
+    "ignore: divide by zero encountered in .*_scalars", category=RuntimeWarning
+)
 @pytest.mark.parametrize("left", diosFromMatr(DATA_UNALIGNED))
 @pytest.mark.parametrize("right", diosFromMatr(DATA_UNALIGNED))
 @pytest.mark.parametrize("op", OPNOCOMP)
diff --git a/docs/Makefile b/docs/Makefile
index 6b1029bbef2b64abf9a6c4e187e8d6cc549faf09..411507f8e898ef163b1eb0ea89cf4692bb8c3a77 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -35,7 +35,7 @@ doc:
 	# generate environment table from dictionary
 	@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 
-# make test, clean up
+# run tests
 test:
 	# generate parent fake module for the functions to be documented
 	@$(SPHINXBUILD) -M doctest "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/cookbooks/DataRegularisation.rst b/docs/cookbooks/DataRegularisation.rst
index 4802066febed95b2a7917564eaf3ca036265be91..ddd3c59d55913874cecaa2c2c8799f1af28c7b15 100644
--- a/docs/cookbooks/DataRegularisation.rst
+++ b/docs/cookbooks/DataRegularisation.rst
@@ -2,7 +2,7 @@
 ..
 .. SPDX-License-Identifier: GPL-3.0-or-later
 
-Data Regularisation
+Data Regularization
 ===================
 
 The tutorial aims to introduce the usage of ``SaQC`` methods, in order to obtain regularly sampled data derivatives
@@ -12,12 +12,12 @@ spacing in between subsequent data points.
 In the following steps, the tutorial guides through the usage of the *SaQC* :doc:`resampling <../funcs/generic>`
 library.
 
-#. Initially, we introduce and motivate regularisation techniques and we do import the tutorial data.
+#. Initially, we introduce and motivate regularization techniques and we do import the tutorial data.
 
-   * :ref:`Why Regularisation <cookbooks/DataRegularisation:Why Regularisation>`
+   * :ref:`Why Regularization <cookbooks/DataRegularisation:Why Regularization?>`
    * :ref:`Tutorial Data <cookbooks/DataRegularisation:Tutorial Data>`
 
-#. We will get an overview over the main :ref:`Regularisation <cookbooks/DataRegularisation:regularisations>` methods, starting with the shift.
+#. We will get an overview over the main :ref:`Regularization <cookbooks/DataRegularisation:regularization>` methods, starting with the shift.
 
    * :ref:`Shift <cookbooks/DataRegularisation:shift>`
    * :ref:`Target Parameter <cookbooks/DataRegularisation:target parameter>`
@@ -27,7 +27,7 @@ library.
      * :ref:`Valid Data <cookbooks/DataRegularisation:Valid Data>`
 
 #. We introduce the notion of *valid* data and see how sparse intervals and those with multiple values interact with
-   regularisation.
+   regularization.
 
 
    * :ref:`Data Loss and Empty Intervals <cookbooks/DataRegularisation:data loss and empty intervals>`
@@ -50,12 +50,12 @@ library.
 
    * :ref:`Representing Data Sparsity <cookbooks/DataRegularisation:interpolation and data sparsity>`
 
-#. We see how regularisation interacts with Flags.
+#. We see how regularization interacts with Flags.
 
-   * :ref:`Flags and Regularisation <cookbooks/DataRegularisation:flags and regularisation>`
+   * :ref:`Flags and Regularization <cookbooks/DataRegularisation:flags and regularization>`
 
-Why Regularisation
-------------------
+Why Regularization?
+-------------------
 
 Often, measurement data does not come in regularly sampled time series. The reasons, why one usually would
 like to have time series data, that exhibits a constant temporal gap size
@@ -112,17 +112,17 @@ rate of *15* minutes.
 Finding out about the proper sampling a series should be regularized to, is a subject on its own and wont be covered
 here. Usually, the intended sampling rate of sensor data is known from the specification of the sensor.
 
-If that is not the case, and if there seem to be more than one candidates for a rate regularisation, a rough rule of
-thumb, aiming at minimisation of data loss and data manipulation, may be,
+If that is not the case, and if there seem to be more than one candidates for a rate regularization, a rough rule of
+thumb, aiming at minimization of data loss and data manipulation, may be,
 to go for the smallest rate seemingly present in the data.
 
-Regularisations
----------------
+Regularization
+--------------
 
 So lets transform the measurements timestamps to have a regular *10* minutes frequency. In order to do so,
 we have to decide what to do with each time stamps associated data, when we alter the timestamps value.
 
-Basically, there are three types of :doc:`regularisations <../funcs/resampling>` methods:
+Basically, there are three types of :doc:`regularization <../funcs/resampling>` methods:
 
 
 #. We could keep the values as they are, and thus,
@@ -145,7 +145,7 @@ Lets apply a simple shift via the :py:meth:`~saqc.SaQC.shift` method.
 Target parameter
 ^^^^^^^^^^^^^^^^
 
-We selected a new ``target`` field, to store the shifted data to a new field, so that our original data wouldnt be
+We selected a new ``target`` field, to store the shifted data to a new field, so that our original data wouldn't be
 overridden.
 
 Freq parameter
@@ -153,7 +153,7 @@ Freq parameter
 
 We passed the ``freq`` keyword of the intended sampling frequency in terms of a
 `date alias <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_ string. All of
-the :doc:`regularisations <../funcs/resampling>` methods have such a frequency keyword,
+the :doc:`regularization <../funcs/resampling>` methods have such a frequency keyword,
 and it just determines the sampling rate, the resulting regular timeseries will have.
 
 Shifting Method
@@ -164,7 +164,7 @@ which applies a *backwards* shift, so data points get shifted *backwards*\ , unt
 that is a multiple of *10* minutes. (See :py:meth:`~saqc.SaQC.shift` documentation for more
 details on the keywords.)
 
-Lets see, how the data is now sampled. Therefore, we use the ``data`` Atribute from the
+Lets see, how the data is now sampled. Therefore, we use the ``data`` Attribute from the
 :py:class:`SaQC <saqc.core.core.SaQC>` object. This will prevent the methods output from
 being merged to a ``pandas.DataFrame`` object, and the changes from the resampling will be easier
 comprehensible from one look.
@@ -192,17 +192,17 @@ Shifted data
    <BLANKLINE>
 
 
-We see, the first and last *10* datapoints of both, the original data time series and the shifted one.
+We see, the first and last *10* data points of both, the original data time series and the shifted one.
 
-Obveously, the shifted data series now exhibits a regular sampling rate of *10* minutes, with the index
-ranging from the latest timestamp, that is a multiple of *10* minutes and preceeds the initial timestamp
-of the original data, up to the first *10* minutes multiple, that succeeds the last original datas timestamp.
-This is default behavior to all the :doc:`regularisations <../funcs/resampling>` provided by ``saqc``.
+Obviously, the shifted data series now exhibits a regular sampling rate of *10* minutes, with the index
+ranging from the latest timestamp, that is a multiple of *10* minutes and precedes the initial timestamp
+of the original data, up to the first *10* minutes multiple, that succeeds the last original data timestamp.
+This is default behavior to all the :doc:`regularization <../funcs/resampling>` provided by ``saqc``.
 
 Data Loss and Empty Intervals
 -----------------------------
 
-The number of datapoints  (displayed at the bottom of the table columns) has changed through the
+The number of data points  (displayed at the bottom of the table columns) has changed through the
 transformation as well. That change stems from 2 sources mainly:
 
 Empty Intervals
@@ -215,20 +215,20 @@ that in the interval that is represented by that date time index, there was data
 Valid Data
 ^^^^^^^^^^
 
-Data points are referred to, as *valid*\ , in context of a regularisation, if:
+Data points are referred to, as *valid*\ , in context of a regularization, if:
 
 
 #.
    the data points value is not ``NaN``
 
 #.
-   the *flag* of that datapoint has a value lower than the value passed to the methods
+   the *flag* of that data point has a value lower than the value passed to the methods
    ``to_mask`` keyword - since this keyword defaults to the highest flag level available,
-   defaultly, all data flagged :py:const:`~saqc.constants.BAD`, is considered invalid by that method.
+   by default, all data flagged :py:const:`~saqc.constants.BAD`, is considered invalid by that method.
 
 Note, that, from point *2* above, it follows, that flagging data values
-before regularisation, will effectively exclude them from the regularistaion process. See chapter
-:ref:`flagging and resampling <cookbooks/DataRegularisation:flags and regularisation>` for an example of this effect and how it can help
+before regularization, will effectively exclude them from the regularization process. See chapter
+:ref:`flagging and regularization <cookbooks/DataRegularisation:flags and regularization>` for an example of this effect and how it can help
 control :ref:`data reduction <cookbooks/DataRegularisation:data reduction>`.
 
 data reduction
@@ -238,7 +238,7 @@ If there are multiple values present within an interval with size according to t
 ``freq``\ , this values get reduced to one single value, that will get assigned to the timestamp associated with the
 interval.
 
-This reduction depends on the selected :doc:`regularisation <../funcs/resampling>` method.
+This reduction depends on the selected :doc:`regularization <../funcs/resampling>` method.
 
 For example, :ref:`above <cookbooks/DataRegularisation:shift>`\ , we applied a backwards :py:meth:`~saqc.SaQC.shift` with a *10* minutes frequency.
 As a result, the first value, encountered after any multiple of *10* minutes, gets shifted backwards to be aligned with
@@ -267,7 +267,7 @@ Notice, how, for example, the data point for ``2021-01-01 07:49:41`` gets shifte
 ``2021-01-01 07:40:00`` - although, shifting it forward to ``07:40:00`` would be less a manipulation, since this timestamp
 appears to be closer to the original one.
 
-To shift to any frequncy aligned timestamp the value that is closest to that timestamp, we
+To shift to any frequency aligned timestamp the value that is closest to that timestamp, we
 can perform a *nearest shift* instead of a simple *back shift*\ , by using the shift method ``"nshift"``\ :
 
    >>> qc = qc.shift('SoilMoisture', target='SoilMoisture_nshift', freq='10min', method='nshift')
@@ -284,7 +284,7 @@ can perform a *nearest shift* instead of a simple *back shift*\ , by using the s
 
 Now, any timestamp got assigned, the value that is nearest to it, *if* there is one valid data value available in the
 interval surrounding that timestamp with a range of half the frequency. In our example, this would mean, the regular
-timestamp would get assigned the nearest value of all the values, that preceed or succeed it by less than *5* minutes.
+timestamp would get assigned the nearest value of all the values, that precede or succeed it by less than *5* minutes.
 
 Maybe check out, what happens with the chunk of the final 2 hours of our shifted *Soil Moisture* dataset, to get an idea.
 
@@ -315,7 +315,7 @@ Aggregation
 If we want to comprise several values by aggregation and assign the result to the new regular timestamp, instead of
 selecting a single one, we can do this, with the :py:meth:`~saqc.SaQC.resample` method.
 Lets resample the *SoilMoisture* data to have a *20* minutes sample rate by aggregating every *20* minutes intervals
-content with the arithmetic mean (which is implemented by numpies ``numpy.mean`` function for example).
+content with the arithmetic mean (which is provided by the ``numpy.mean`` function for example).
 
    >>> import numpy as np
    >>> qc = qc.resample('SoilMoisture', target='SoilMoisture_mean', freq='20min', method='bagg', func=np.mean)
@@ -333,7 +333,7 @@ content with the arithmetic mean (which is implemented by numpies ``numpy.mean``
    2021-01-01 01:17:41    23.343100 | 2021-01-01 02:20:00         23.343100 |
    2021-01-01 01:27:29    23.298800 | 2021-01-01 02:40:00         23.343100 |
    2021-01-01 01:37:17    23.343100 | 2021-01-01 03:00:00         23.343100 |
-                             ... | ...                               ... |
+                                ... | ...                               ... |
    2021-03-20 05:07:02   137.271500 | 2021-03-20 05:40:00        154.116806 |
    2021-03-20 05:21:35   138.194107 | 2021-03-20 06:00:00        150.567505 |
    2021-03-20 05:41:59   154.116806 | 2021-03-20 06:20:00               NaN |
@@ -350,32 +350,32 @@ Aggregation functions
 ^^^^^^^^^^^^^^^^^^^^^
 
 You can pass arbitrary function objects to the ``func`` parameter, to be applied to calculate every intervals result,
-as long as this function returns a scalar *float* value upon an array-like input. (So ``np.median`` would be propper
+as long as this function returns a scalar *float* value upon an array-like input. (So ``np.median`` would be proper
 for calculating the median, ``sum``\ , for assigning the value sum, and so on.)
 
 Aggregation method
 ^^^^^^^^^^^^^^^^^^
 
-As it is with the :ref:`shift <cookbooks/DataRegularisation:Shift>` functionality, a ``method`` keyword controlls, weather the
+As it is with the :ref:`shift <cookbooks/DataRegularisation:Shift>` functionality, a ``method`` keyword controls, weather the
 aggregation result for the interval in between 2 regular timestamps gets assigned to the left (=\ ``bagg``\ ) or to the
 right (\ ``fagg``\ ) boundary timestamp.
 
 
-* Also, analogous to to the shift functionality, intervals of size ``freq``\ , that do
-  not contain any :ref:`valid <cookbooks/DataRegularisation:valid data>` data, that could be aggregated, get ``ǹp.nan`` assigned.
+* Also, analogous to the shift functionality, intervals of size ``freq``\ , that do
+  not contain any :ref:`valid <cookbooks/DataRegularisation:valid data>` data, that could be aggregated, get ``np.nan`` assigned.
 
 Interpolation
 -------------
 
 Another common way of obtaining regular timestamps, is, the interpolation of data at regular timestamps.
 
-In the pool of py:mod:`regularisation <Functions.saqc.resampling>` methods, is available the
+In the pool of py:mod:`regularization <Functions.saqc.resampling>` methods, is available the
 :py:meth:`~saqc.SaQC.interpolate` method.
 
 Lets apply a linear interpolation onto the dataset. To access
 linear interpolation, we pass the ``method`` parameter the string ``"time"``. This
 applies an interpolation, that is sensitive to the difference in temporal gaps
-(as opposed by ``"linear"``\ , wich expects all the gaps to be equal). Get an overview
+(as opposed by ``"linear"``\ , which expects all the gaps to be equal). Get an overview
 of the possible interpolation methods in the :py:meth:`~saqc.SaQC.interpolate>`
 documentation. Lets check the results:
 
@@ -394,7 +394,7 @@ documentation. Lets check the results:
    2021-01-01 01:10:00    23.377891 | 2021-01-01 01:17:41             23.343100 |
    2021-01-01 01:20:00    23.332627 | 2021-01-01 01:27:29             23.298800 |
    2021-01-01 01:30:00    23.310176 | 2021-01-01 01:37:17             23.343100 |
-                             ... | ...                                   ... |
+                                ... | ...                                   ... |
    2021-03-20 07:20:00   154.723105 | 2021-03-20 05:07:02            137.271500 |
    2021-03-20 07:30:00          NaN | 2021-03-20 05:21:35            138.194107 |
    2021-03-20 07:40:00          NaN | 2021-03-20 05:41:59            154.116806 |
@@ -410,15 +410,15 @@ documentation. Lets check the results:
 Interpolation and Data Sparsity
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The regularisation by interpolation is strict in the sense, that regular timestamps *only* get
-interpolated, if they have at least one :ref:`valid <cookbooks/DataRegularisation:valid data>` data value preceeding them *and* one
-succeeding them *within* the given frequency range (wich is controlled by the ``freq`` keyword.).
+The regularization by interpolation is strict in the sense, that regular timestamps *only* get
+interpolated, if they have at least one :ref:`valid <cookbooks/DataRegularisation:valid data>` data value preceding them *and* one
+succeeding them *within* the given frequency range (which is controlled by the ``freq`` keyword.).
 
-Thats, why, you have no interpolation value at ``2021-03-20 07:30:00`` - bacause it is preceeded
+That's why, you have no interpolation value at ``2021-03-20 07:30:00`` - because it is preceded
 by a :ref:`valid <cookbooks/DataRegularisation:valid data>` value at ``2021-03-20 07:26:16``\ , but there is no :ref:`valid <cookbooks/DataRegularisation:valid data>` value
 available in between the succeeding *10* minutes interval from ``2021-03-20 07:30:00`` to ``2021-03-20 07:30:00``.
 
-On the other hand, there is an interpolated value assigned to ``2021-03-20 07:50:00``\ , it is preceeded by
+On the other hand, there is an interpolated value assigned to ``2021-03-20 07:50:00``\ , it is preceded by
 a :ref:`valid <cookbooks/DataRegularisation:valid data>` value at ``2021-03-20 07:40:37`` and one succeeding at ``2021-03-20 07:54:59``.
 
 This behavior is intended to reflect the sparsity of the original data in the
@@ -430,12 +430,12 @@ Linear Interpolation
 
 Note, that there is a wrapper available for linear interpolation: :py:meth:`~saqc.SaQC.linear`.
 
-Flags and Regularisation
+Flags and Regularization
 ------------------------
 
 Since data, that is flagged by a level higher or equal to the passed ``to_mask`` value
 (default=:py:const:~saqc.constants.BAD), is not regarded :ref:`valid <cookbooks/DataRegularisation:valid data>` by the applied function,
-it can be of advantage, to flag data before regularisation in order to effectively exclude it
+it can be of advantage, to flag data before regularization in order to effectively exclude it
 from the resulting regularly sampled data set. Lets see an example for the *SoilMoisture* data set.
 
 >>> qc = qc.linear('SoilMoisture', target='SoilMoisture_linear', freq='10min') # doctest: +SKIP
@@ -454,12 +454,12 @@ At ``2021-01-01 15:40:02`` the original data exhibits a measurement value
 of ``-120`` - which is obviously not a valid data point, regarding the fact, that *SoilMoisture* measurements
 should be percentage values in between *0* and *100*.
 
-Since we dont exclude the value from interpolation, it gets included in the interpolation
-process for the regular timstamp at ``2021-01-01 15:40:00`` - wich, as a result, also exhibits
-a non - sence value of *-119.512446*. We could now flag the resulting regular dataset and
-exclude this calculated non sence value from further processing and analysis.
+Since we don't exclude the value from interpolation, it gets included in the interpolation
+process for the regular timestamp at ``2021-01-01 15:40:00`` - which, as a result, also exhibits
+a nonsense value of *-119.512446*. We could now flag the resulting regular dataset and
+exclude this calculated nonsense value from further processing and analysis.
 
-But, this would mean, that we would have a small data gap at this point.
+But, this would introduce a small data gap at this point.
 
 We can circumvent having that gap, by flagging that value before interpolation. This
 works, because there is actually another, now valid value, available in the interval
@@ -481,6 +481,7 @@ do the interpolation.
    2021-01-01 15:40:00    23.319971 | 2021-01-01 15:40:02             -120.0000 |
    2021-01-01 15:50:00    23.299553 | 2021-01-01 15:49:50               23.2988 |
 
+
 back projection of flags
 ------------------------
 
diff --git a/docs/cookbooks/MultivariateFlagging.rst b/docs/cookbooks/MultivariateFlagging.rst
index d89242d24a445afde0d777def978566940ce2d2f..c7297ade17f4497c58b3ae5766beb2433c8ad702 100644
--- a/docs/cookbooks/MultivariateFlagging.rst
+++ b/docs/cookbooks/MultivariateFlagging.rst
@@ -170,7 +170,7 @@ But checking out values around *2017-10-29*, we notice, that the sampling rate s
 
 Those instabilities do bias most statistical evaluations and it is common practice to apply some
 :doc:`resampling functions <../funcs/resampling>` onto the data, to obtain a regularly spaced timestamp.
-(See also the :ref:`harmonization tutorial <cookbooks/DataRegularisation:data regularisation>` for more informations
+(See also the :ref:`harmonization tutorial <cookbooks/DataRegularisation:data regularization>` for more informations
 on that topic.)
 
 We will apply :py:meth:`linear harmonisation <saqc.SaQC.linear>` to all the sensor data variables,
diff --git a/docs/documentation/Customizations.rst b/docs/documentation/Customizations.rst
index 473070fa383077b45e4e2b74c17ef11797c3c787..3c76088f20a08079c12087d5f9a263124009e6a0 100644
--- a/docs/documentation/Customizations.rst
+++ b/docs/documentation/Customizations.rst
@@ -7,7 +7,7 @@ Customizations
 
 SaQC comes with a continuously growing number of pre-implemented
 quality checking and processing routines as well as flagging schemes. 
-For any sufficiently large use case however it is very likely that the 
+For any sufficiently large use case however, it is very likely that the 
 functions provided won't fulfill all your needs and requirements.
 
 Acknowledging the impossibility to address all imaginable use cases, we 
@@ -32,7 +32,7 @@ SaQC provides two ways to integrate custom routines into the system:
 Interface
 ^^^^^^^^^
 
-In order to make a function usable within the evaluation framework of SaQC it needs to
+In order to make a function usable within the evaluation framework of SaQC, it needs to
 implement the following function interface
 
 .. code-block:: python
@@ -42,12 +42,11 @@ implement the following function interface
    import saqc
 
    def yourTestFunction(
-      data: dios.DictOfSeries,
+      saqc: SaQC
       field: str,
-      flags: saqc.Flags,
       *args,
       **kwargs
-      ) -> (dios.DictOfSeries, saqc.Flags)
+      ) -> SaQC
 
 Argument Descriptions
 ~~~~~~~~~~~~~~~~~~~~~
@@ -81,8 +80,8 @@ test functions into SaQC. Here is a complete dummy example:
    from saqc import register
 
    @flagging()
-   def yourTestFunction(data, field, flags, *args, **kwargs):
-       return data, flags
+   def yourTestFunction(saqc: SaQC, field: str, *args, **kwargs):
+       return saqc
 
 Example
 ^^^^^^^
diff --git a/docs/documentation/GlobalKeywords.rst b/docs/documentation/GlobalKeywords.rst
index 4bea09db6298e180ee1027e7c729cf679a6c5dd1..781dfbc6f3db5f12bbb8e65e5ff75b898da3a169 100644
--- a/docs/documentation/GlobalKeywords.rst
+++ b/docs/documentation/GlobalKeywords.rst
@@ -225,7 +225,7 @@ We can make the value flagged by both the flagging functions by increasing the
 
    qc = saqc.SaQC(data)
    qc = qc.flagRange('data', max=15, label='value > 15')
-   qc = qc.flagRange('data', max=0, label='value > 0', dfilter=300)
+   qc = qc.flagRange('data', max=0, label='value > 0', dfilter=255)
    qc.plot('data')
 
 Unflagging Values
@@ -267,5 +267,5 @@ gets the already flagged values passed to test them.
    :context: close-figs
    :include-source: False
 
-   qc = qc.flagConstants('data', window='2D', thresh=0, dfilter=300, flag=-np.inf)
+   qc = qc.flagConstants('data', window='2D', thresh=0, dfilter=255, flag=-np.inf)
    qc.plot('data')
diff --git a/docs/documentation/WritingFunctions.rst b/docs/documentation/WritingFunctions.rst
index 6080a5e316d66b5ed68af94ddf5d183cdd7661a8..25ddb90bbbd04c3a53567a4e1901528251b58b05 100644
--- a/docs/documentation/WritingFunctions.rst
+++ b/docs/documentation/WritingFunctions.rst
@@ -10,7 +10,7 @@ Writing non-standard functions
 
 When implementing non-standard functions, i.e. all function not decorated with ``flagging``, some special care is
 needed to comply to the standard ``SaQC`` behaviour. The following passages guide you through the jungle of
-``register`` arguments and there semantics.
+``register`` arguments and their semantics.
 
 Masking
 """""""
diff --git a/docs/gettingstarted/InstallationGuide.rst b/docs/gettingstarted/InstallationGuide.rst
index ebe572bd82b500f0cead98407ee11ce167f039be..700175b506885cb94d3f95d927bd76599f68ea23 100644
--- a/docs/gettingstarted/InstallationGuide.rst
+++ b/docs/gettingstarted/InstallationGuide.rst
@@ -79,9 +79,9 @@ or
    pip install git+https://git.ufz.de/rdm-software/saqc@master
 
 
-If you feel more adventurous feel free to use the latest development version from our
-`GitLab-repository <https://git.ufz.de/rdm-software/saqc>`_. While we try to keep the
-develop branch in a workable state, we sill won't make any guarantees here.
+If you feel more adventurous, feel free to use the latest development version from our
+`GitLab-repository <https://git.ufz.de/rdm-software/saqc>`_. We try to keep the
+develop branch in a workable state, but sill don't make any guarantees here.
 
 .. code-block:: sh
 
diff --git a/docs/gettingstarted/TutorialCLI.rst b/docs/gettingstarted/TutorialCLI.rst
index f635ebd9a9973289feda03be7210f2db288263cb..e173f3c66139b29af4b3181e64df3279a9daf252 100644
--- a/docs/gettingstarted/TutorialCLI.rst
+++ b/docs/gettingstarted/TutorialCLI.rst
@@ -292,4 +292,3 @@ You can learn more about the syntax of these custom functions
    :align: center
 
    qc.plot('SM2')
-
diff --git a/docs/index.rst b/docs/index.rst
index f2b74478ef9d0afeca51cd660676458ef7f9aa8a..81a667b377b0ffe704f91450a077089691bec1bc 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -92,10 +92,11 @@ Getting Started
    Writing Functions <documentation/WritingFunctions>
 
 
-Indices and tables
-==================
+..
+   Indices and tables
+   ==================
 
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+   * :ref:`genindex`
+   * :ref:`modindex`
+   * :ref:`search`
 
diff --git a/docs/misc/title.rst b/docs/misc/title.rst
index f92b0cdd09e6bd6b78548800d764d1873b7e538d..418249e5551fd6cbeb8bdf505d5c058ee3392718 100644
--- a/docs/misc/title.rst
+++ b/docs/misc/title.rst
@@ -69,7 +69,7 @@ Features
         * or use SaQC as a commandline application and configure your pipelines via plain text
     * - |sacRaw|
       - * easily load data from multiple sources, concatenating them in a SaQC object
-        * :ref:`preprocess your data, by aligning it to shared frequency grids <cookbooks/DataRegularisation:Data Regularisation>`
+        * :ref:`preprocess your data, by aligning it to shared frequency grids <cookbooks/DataRegularisation:Data Regularization>`
     * - |sacFlagged|
       - * apply basic plausibility checks, as well as
         * more complex, univariat flagging Functions
diff --git a/docs/requirements.txt b/docs/requirements.txt
index b22a8edcca0d70768082e5c1fd2e8055feb9f625..2ff705310e0b6b7a232101d5af5023f2c3c5fc1b 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -3,11 +3,11 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 recommonmark==0.7.1
-sphinx<5
+sphinx<6
 sphinx-automodapi==0.14.1
 sphinxcontrib-fulltoc==1.2.0
-sphinx-markdown-tables==0.0.15
+sphinx-markdown-tables==0.0.17
 m2r==0.2.1
 jupyter-sphinx==0.3.2
 sphinx_autodoc_typehints==1.18.2
-sphinx-tabs==3.3.1
+sphinx-tabs==3.4.1
diff --git a/docs/resources/images/flagJumpsPic.excalidraw b/docs/resources/images/flagJumpsPic.excalidraw
new file mode 100644
index 0000000000000000000000000000000000000000..f25d2014f2e65ca4fd5526ee16b85ff5288bce76
--- /dev/null
+++ b/docs/resources/images/flagJumpsPic.excalidraw
@@ -0,0 +1,7136 @@
+{
+  "type": "excalidraw",
+  "version": 2,
+  "source": "https://excalidraw.com",
+  "elements": [
+    {
+      "type": "ellipse",
+      "version": 284,
+      "versionNonce": 467968316,
+      "isDeleted": false,
+      "id": "XL5f1DxC2KzI5XW8wZlO3",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -29.333333333332575,
+      "y": 423.3333333333337,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 760309488,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943927,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 326,
+      "versionNonce": 2065571972,
+      "isDeleted": false,
+      "id": "Hv6f-Lnt1vrsLa6uMgtcP",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 68.3333333333344,
+      "y": 493.3333333333335,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 403750928,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943927,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 180,
+      "versionNonce": 1541187900,
+      "isDeleted": false,
+      "id": "YTyHQXvJy6OP5gnXI3lN6",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 150.33333333333394,
+      "y": 795.3333333333335,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 90,
+      "height": 48.333333333333485,
+      "seed": 1127683824,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943930,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -90,
+          48.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "rectangle",
+      "version": 802,
+      "versionNonce": 361194884,
+      "isDeleted": false,
+      "id": "aLDREFOyyyEerjGWCr0l6",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 115.66666666666742,
+      "y": 194.66666666666686,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 338.3333333333332,
+      "height": 145,
+      "seed": 1788482576,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943931,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 756,
+      "versionNonce": 1965991100,
+      "isDeleted": false,
+      "id": "22A7zki5z8yjgUmlTYslq",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 135.66666666666697,
+      "y": 216.0000000000001,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1419970288,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943932,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "text",
+      "version": 619,
+      "versionNonce": 1908855940,
+      "isDeleted": false,
+      "id": "fJUpHXq0KQdkzp24oPD9U",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 190.66666666666742,
+      "y": 208.5000000000001,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 216,
+      "height": 36,
+      "seed": 2111011344,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943932,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "unflagged value",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "unflagged value"
+    },
+    {
+      "type": "text",
+      "version": 610,
+      "versionNonce": 304195004,
+      "isDeleted": false,
+      "id": "wRIFRYte5B8UdwuFnnAFK",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 184.0000000000009,
+      "y": 270.16666666666674,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 219,
+      "height": 36,
+      "seed": 2059429904,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943932,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "flagged as jump",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "flagged as jump"
+    },
+    {
+      "type": "rectangle",
+      "version": 439,
+      "versionNonce": 1451336708,
+      "isDeleted": false,
+      "id": "TGTnsFkzVfxpbXKzlRY9Q",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -194.9999999999992,
+      "y": -925.9999999999998,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 1366.6666666666667,
+      "height": 1632.6666666666667,
+      "seed": 2386672,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943932,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 631,
+      "versionNonce": 1023960764,
+      "isDeleted": false,
+      "id": "1guS4l1vTy694Scq_sbaN",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 176.78571428571513,
+      "y": 500.8333333333335,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1707531324,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943933,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 665,
+      "versionNonce": 1126396676,
+      "isDeleted": false,
+      "id": "1KvfmFhdLHZdUP2T1W0II",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 115.92857142857201,
+      "y": 481.40476190476204,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1724124604,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943933,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 706,
+      "versionNonce": 155463484,
+      "isDeleted": false,
+      "id": "0V6ajk9y3uSExC2jCy3WF",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 16.785714285714675,
+      "y": 471.1190476190478,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1460285828,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943933,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 784,
+      "versionNonce": 1069970052,
+      "isDeleted": false,
+      "id": "uc5ULQIZh3xBNKCPo7kWk",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -83.21428571428521,
+      "y": 349.11904761904793,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1344014396,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943933,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 798,
+      "versionNonce": 1542645692,
+      "isDeleted": false,
+      "id": "UZIhf6AgDGl7xChb8lTjm",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -165.4999999999992,
+      "y": 296.2619047619048,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 843910716,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 793,
+      "versionNonce": 1054704132,
+      "isDeleted": false,
+      "id": "xpitB0vVrNHcpjLETvI3k",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 239.64285714285825,
+      "y": 502.54761904761926,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 321828740,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 831,
+      "versionNonce": 1228312636,
+      "isDeleted": false,
+      "id": "YlqL5QblitGk35zJTZ7aa",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 306.78571428571513,
+      "y": 519.4047619047622,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1756249532,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 858,
+      "versionNonce": 2003214724,
+      "isDeleted": false,
+      "id": "QivEpETMvx3V3HaJNlUXR",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 366.78571428571513,
+      "y": 542.261904761905,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1206268164,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 878,
+      "versionNonce": 284240060,
+      "isDeleted": false,
+      "id": "sbUEs3KTyUORpP9_Rpwow",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 419.6428571428578,
+      "y": 512.2619047619049,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1021851708,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 900,
+      "versionNonce": 278909188,
+      "isDeleted": false,
+      "id": "_VdpZd2TfpVgU7h_7BLLu",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 491.07142857142935,
+      "y": 545.1190476190477,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 4937148,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943934,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 925,
+      "versionNonce": 650424636,
+      "isDeleted": false,
+      "id": "2AMBG_Ob1TPgSWuI1567v",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 543.928571428572,
+      "y": 522.2619047619049,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1344543676,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943935,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 951,
+      "versionNonce": 436670596,
+      "isDeleted": false,
+      "id": "VhSrHb7g3SEZiyYmQh-l8",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 595.3571428571436,
+      "y": 505.11904761904793,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 659916676,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943935,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 967,
+      "versionNonce": 974334396,
+      "isDeleted": false,
+      "id": "jTqIjzayKfqt2kJqh-2il",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 649.6428571428578,
+      "y": 523.6904761904763,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 622022788,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943935,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 999,
+      "versionNonce": 56254468,
+      "isDeleted": false,
+      "id": "9QPLCd_pJ8PAK32_czMc0",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 702.5000000000009,
+      "y": 520.8333333333335,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1710971908,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943935,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1154,
+      "versionNonce": 345489284,
+      "isDeleted": false,
+      "id": "lAYAEGJqz-UBGjYpkxRPz",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 835.0714285714294,
+      "y": 302.8333333333336,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1736339516,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943935,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1206,
+      "versionNonce": 1902701244,
+      "isDeleted": false,
+      "id": "iEi0edkADPpl1WpNGbZeF",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 889.3571428571436,
+      "y": 247.11904761904748,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1819923132,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943936,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1217,
+      "versionNonce": 1727279932,
+      "isDeleted": false,
+      "id": "FtCQ6AwtZsXBQlNLeO7Cu",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 952.2142857142862,
+      "y": 297.1190476190478,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1169700484,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1255,
+      "versionNonce": 1476963972,
+      "isDeleted": false,
+      "id": "s6yM2UrMwHlauz8ZwX15L",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1016.5000000000005,
+      "y": 299.9761904761906,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 2134315140,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1282,
+      "versionNonce": 218262460,
+      "isDeleted": false,
+      "id": "cOC13E5Fh0WqIAnlt_M0U",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1079.3571428571431,
+      "y": 297.1190476190477,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 952706692,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1305,
+      "versionNonce": 192429572,
+      "isDeleted": false,
+      "id": "RF62rh7F-MgRiAb58Y0Sh",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1135.071428571429,
+      "y": 294.26190476190504,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1192426172,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 948,
+      "versionNonce": 901092412,
+      "isDeleted": false,
+      "id": "u2pSrV_Ii3SGBgbwX0H61",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 759.5952380952385,
+      "y": 271.64285714285745,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 42.85714285714264,
+      "height": 42.85714285714287,
+      "seed": 50523452,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "4Kqe4hnwxdvHo4hVE4VX6",
+      "type": "line",
+      "x": 405.3571428571436,
+      "y": 532.2619047619049,
+      "width": 442.8571428571431,
+      "height": 0,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 686926524,
+      "version": 333,
+      "versionNonce": 519023804,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943938,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          442.8571428571431,
+          0
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "line",
+      "version": 628,
+      "versionNonce": 1253805316,
+      "isDeleted": false,
+      "id": "CjAascEVBLT9rF5O9Lc-D",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 652.357142857144,
+      "y": 282.6904761904765,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 458.57142857142844,
+      "height": 1.4285714285713311,
+      "seed": 2101484420,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943939,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          458.57142857142844,
+          -1.4285714285713311
+        ]
+      ]
+    },
+    {
+      "id": "LSyfbHtD5MHNZsi-mYlOj",
+      "type": "ellipse",
+      "x": 768.3571428571436,
+      "y": 282.83333333333303,
+      "width": 24.285714285714214,
+      "height": 20.000000000000114,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1346650684,
+      "version": 296,
+      "versionNonce": 290147460,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943939,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1181,
+      "versionNonce": 1967173052,
+      "isDeleted": false,
+      "id": "ebJHH3EYdZmPc6C6R3s77",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 126.50000000000136,
+      "y": 268.26190476190493,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 42.85714285714264,
+      "height": 42.85714285714287,
+      "seed": 825835196,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943939,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 529,
+      "versionNonce": 2125929476,
+      "isDeleted": false,
+      "id": "igz8HgwVHhpgLos_uldom",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 135.26190476190595,
+      "y": 279.4523809523804,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 24.285714285714214,
+      "height": 20.000000000000114,
+      "seed": 151788292,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943939,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "4QS2V7HC_CzjVXPqThdWO",
+      "type": "rectangle",
+      "x": 403.928571428572,
+      "y": 492.26190476190493,
+      "width": 322.85714285714283,
+      "height": 84.28571428571422,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1198610692,
+      "version": 254,
+      "versionNonce": 2096954244,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943939,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "rectangle",
+      "version": 479,
+      "versionNonce": 436563644,
+      "isDeleted": false,
+      "id": "v80BaYp96kOJ1KEC9en_L",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "angle": 0,
+      "x": 750.7857142857151,
+      "y": 243.54761904761892,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 358.5714285714278,
+      "height": 84.28571428571422,
+      "seed": 682053436,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [
+        {
+          "id": "hA3FIiaiS6HrtazZFIfkm",
+          "type": "arrow"
+        }
+      ],
+      "updated": 1666206943940,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "0z1fYVnq2MJ-MKXSQ7189",
+      "type": "line",
+      "x": 712.5000000000009,
+      "y": 365.9761904761905,
+      "width": 52.57142857142867,
+      "height": 0,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1526134148,
+      "version": 169,
+      "versionNonce": 1365260092,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943940,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          52.57142857142867,
+          0
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "Hcv-z24x7pa2W0GWHnUdv",
+      "type": "line",
+      "x": 739.6428571428578,
+      "y": 371.9761904761906,
+      "width": 1.4285714285713311,
+      "height": 161.71428571428578,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 934072964,
+      "version": 173,
+      "versionNonce": 1397616572,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943940,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1.4285714285713311,
+          161.71428571428578
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "line",
+      "version": 180,
+      "versionNonce": 1150745092,
+      "isDeleted": false,
+      "id": "IW5Du9KxsHlQdBKQB7SYb",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 724.6428571428578,
+      "y": 533.6904761904763,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 38.57142857142867,
+      "height": 0,
+      "seed": 1880377860,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943940,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          38.57142857142867,
+          0
+        ]
+      ]
+    },
+    {
+      "id": "xpZwqGfy4N9piRCCKgdFa",
+      "type": "text",
+      "x": 718.5000000000005,
+      "y": 417.4761904761907,
+      "width": 88,
+      "height": 36,
+      "angle": 1.5736158681954446,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 575900804,
+      "version": 310,
+      "versionNonce": 804296764,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943940,
+      "link": null,
+      "locked": false,
+      "text": "thresh",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "thresh"
+    },
+    {
+      "id": "KmZ5Av6GUfConEDBHzo1b",
+      "type": "line",
+      "x": 753.0714285714294,
+      "y": 176.83333333333348,
+      "width": 0.5714285714284415,
+      "height": 48.285714285714334,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1823495612,
+      "version": 324,
+      "versionNonce": 328435844,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943941,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -0.5714285714284415,
+          48.285714285714334
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "line",
+      "version": 267,
+      "versionNonce": 1202336188,
+      "isDeleted": false,
+      "id": "XUWMeSxlsaCUd85JhOCAU",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 721.7857142857151,
+      "y": 586.5476190476195,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 1.4285714285715585,
+      "height": 48.571428571428555,
+      "seed": 139454908,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943941,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1.4285714285715585,
+          48.571428571428555
+        ]
+      ]
+    },
+    {
+      "id": "7bpqiu-y9bCmyBhmOOBPh",
+      "type": "text",
+      "x": 512.5000000000009,
+      "y": 614.904761904762,
+      "width": 87,
+      "height": 36,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1101496380,
+      "version": 140,
+      "versionNonce": 77060100,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943941,
+      "link": null,
+      "locked": false,
+      "text": "window",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "id": "S9pY1k66L29UnrCrAmhXh",
+      "type": "arrow",
+      "x": 724.5000000000009,
+      "y": 611.6904761904764,
+      "width": 320,
+      "height": 0,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1664574468,
+      "version": 150,
+      "versionNonce": 852849596,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943942,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -320,
+          0
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow"
+    },
+    {
+      "type": "arrow",
+      "version": 361,
+      "versionNonce": 641316356,
+      "isDeleted": false,
+      "id": "McpjA2zV_Kpge6eW47bK5",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 758.5000000000009,
+      "y": 205.69047619047643,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 344.0000000000002,
+      "height": 6,
+      "seed": 1809593020,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943942,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          344.0000000000002,
+          -6
+        ]
+      ]
+    },
+    {
+      "id": "6ahMlwqKu6HCSGWvy3wA8",
+      "type": "line",
+      "x": 1108.500000000001,
+      "y": 171.69047619047637,
+      "width": 2,
+      "height": 56,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 755971844,
+      "version": 163,
+      "versionNonce": 1005034556,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943942,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "line",
+      "version": 232,
+      "versionNonce": 219960708,
+      "isDeleted": false,
+      "id": "QyYGI2ArOgSrU-gjlIntv",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 393.50000000000045,
+      "y": 587.6904761904764,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 56,
+      "seed": 400071684,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943942,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 196,
+      "versionNonce": 564180156,
+      "isDeleted": false,
+      "id": "nNAacY4dN31aN4nCTE67f",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 863.0000000000009,
+      "y": 157.6904761904765,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 87,
+      "height": 36,
+      "seed": 1168237444,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943942,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "window",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "type": "ellipse",
+      "version": 361,
+      "versionNonce": 1398476548,
+      "isDeleted": false,
+      "id": "CZMH5uUyILP5xM0fb9rrN",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -19.61904761904634,
+      "y": -143.27380952380918,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1223696004,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943947,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 403,
+      "versionNonce": 1860457276,
+      "isDeleted": false,
+      "id": "8hMaLxPc43jsi_aMnLlKg",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 78.04761904762017,
+      "y": -73.2738095238094,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 344587196,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943947,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 712,
+      "versionNonce": 1643023292,
+      "isDeleted": false,
+      "id": "FvAM_bXMed5yAn7pdNrtx",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 186.50000000000136,
+      "y": -65.7738095238094,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 97199164,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943947,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 746,
+      "versionNonce": 1658723844,
+      "isDeleted": false,
+      "id": "S-SR1uCkcEWjgV_i09-lz",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 125.64285714285825,
+      "y": -85.20238095238085,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1796558212,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943947,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 787,
+      "versionNonce": 1478341692,
+      "isDeleted": false,
+      "id": "Su8dY86aLEJ38BfzYKn_6",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 26.50000000000091,
+      "y": -95.48809523809507,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1976777916,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943947,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 860,
+      "versionNonce": 150191492,
+      "isDeleted": false,
+      "id": "XuIAPBnWXT9tXSQz4qKJ4",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -79.49999999999898,
+      "y": -215.48809523809496,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1905851652,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 879,
+      "versionNonce": 2136198332,
+      "isDeleted": false,
+      "id": "DiM6y3sWyESPpuNJ1PWxt",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -155.7857142857132,
+      "y": -270.3452380952381,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1313097020,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 874,
+      "versionNonce": 707357956,
+      "isDeleted": false,
+      "id": "BARt6tas3SJid3I2yStl2",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 249.35714285714403,
+      "y": -64.05952380952363,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1526381700,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 912,
+      "versionNonce": 2033374524,
+      "isDeleted": false,
+      "id": "7DgeQbpuiidH9gL76U_km",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 316.5000000000009,
+      "y": -47.202380952380736,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1954561468,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 939,
+      "versionNonce": 134032516,
+      "isDeleted": false,
+      "id": "1zON7bqbBR-RTFGjMJoHC",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 376.5000000000009,
+      "y": -24.345238095237846,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1090030596,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 959,
+      "versionNonce": 1662832060,
+      "isDeleted": false,
+      "id": "jgi7QGqHcFMJcWup-RIaR",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 429.357142857144,
+      "y": -54.34523809523796,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1370039868,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 981,
+      "versionNonce": 70056964,
+      "isDeleted": false,
+      "id": "yT6E_HoAskV4tBRbPW5Hw",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 500.78571428571513,
+      "y": -21.488095238095184,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1435528068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1006,
+      "versionNonce": 1580447292,
+      "isDeleted": false,
+      "id": "1SZJMfng6rKXamVYLjpah",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 553.6428571428578,
+      "y": -44.34523809523796,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1520500412,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1032,
+      "versionNonce": 414636932,
+      "isDeleted": false,
+      "id": "IlzTildUg_fftdcNRvYkf",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 605.0714285714294,
+      "y": -61.48809523809496,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1526749956,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1048,
+      "versionNonce": 391745212,
+      "isDeleted": false,
+      "id": "Wy3OTHyCLSqL2Jh2OJs6V",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 659.3571428571436,
+      "y": -42.91666666666663,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 403294012,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1080,
+      "versionNonce": 1339607812,
+      "isDeleted": false,
+      "id": "wsyc-6-hfRyScPCQTJpZs",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 712.2142857142867,
+      "y": -45.773809523809405,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 465984132,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1235,
+      "versionNonce": 1328568124,
+      "isDeleted": false,
+      "id": "b93WxUvy517OPtu806-kR",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 844.7857142857151,
+      "y": -263.7738095238093,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 776306620,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1287,
+      "versionNonce": 1007733380,
+      "isDeleted": false,
+      "id": "6a-9S3Uqw6O4Qzj-HQ-xy",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 899.0714285714294,
+      "y": -319.4880952380954,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1654680068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1298,
+      "versionNonce": 1645875132,
+      "isDeleted": false,
+      "id": "R6wRg7olzGqzgM1lRAvWO",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 961.9285714285725,
+      "y": -269.48809523809507,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1702242364,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1336,
+      "versionNonce": 1880939012,
+      "isDeleted": false,
+      "id": "cTK_Z8o1x9U9GS3mwR4Gf",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1026.2142857142862,
+      "y": -266.6309523809523,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1921812868,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1363,
+      "versionNonce": 1345894460,
+      "isDeleted": false,
+      "id": "COkBRieGeDW2mG0KixcjS",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1089.0714285714294,
+      "y": -269.4880952380952,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 830324924,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1386,
+      "versionNonce": 1996159364,
+      "isDeleted": false,
+      "id": "uxaV27pxwuYPLfvPsDxXF",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1144.7857142857147,
+      "y": -272.34523809523785,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 2127083780,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 545,
+      "versionNonce": 1273571588,
+      "isDeleted": false,
+      "id": "70MsDvCSQg5GTiwyQGkl4",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 79.07142857142935,
+      "y": -52.34523809523799,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 342.8571428571431,
+      "height": 2,
+      "seed": 584251524,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          342.8571428571431,
+          2
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1010,
+      "versionNonce": 559733052,
+      "isDeleted": false,
+      "id": "P-jsHb0qnPNZDAdr4hcue",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 420.07142857143026,
+      "y": -25.916666666666345,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 354.57142857142844,
+      "height": 0.5714285714286689,
+      "seed": 1650049468,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          354.57142857142844,
+          0.5714285714286689
+        ]
+      ]
+    },
+    {
+      "type": "ellipse",
+      "version": 378,
+      "versionNonce": 1382377604,
+      "isDeleted": false,
+      "id": "WGwnOwMC-mEg-T20Czmac",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 778.0714285714294,
+      "y": -283.77380952380986,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 24.285714285714214,
+      "height": 20.000000000000114,
+      "seed": 1516849156,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943948,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "rectangle",
+      "version": 543,
+      "versionNonce": 2137412028,
+      "isDeleted": false,
+      "id": "S6LOBMPjPqQmd2CAV3q4s",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "angle": 0,
+      "x": 75.64285714285825,
+      "y": -102.34523809523796,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 342.85714285714283,
+      "height": 94.28571428571423,
+      "seed": 1370630716,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "rectangle",
+      "version": 641,
+      "versionNonce": 1311619076,
+      "isDeleted": false,
+      "id": "Fmr--WP7CIeips_xROd41",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "angle": 0,
+      "x": 418.50000000000136,
+      "y": -73.05952380952397,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 358.5714285714278,
+      "height": 84.28571428571422,
+      "seed": 2039064452,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [
+        {
+          "id": "hA3FIiaiS6HrtazZFIfkm",
+          "type": "arrow"
+        }
+      ],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 468,
+      "versionNonce": 1614306108,
+      "isDeleted": false,
+      "id": "RbxYITKA5dyjnWYHeBUEB",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 424.7857142857147,
+      "y": 38.226190476190595,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 0.5714285714284415,
+      "height": 48.285714285714334,
+      "seed": 1899824060,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -0.5714285714284415,
+          48.285714285714334
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 409,
+      "versionNonce": 578487940,
+      "isDeleted": false,
+      "id": "9ZXxmfDShJaycPd9NNx6n",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 413.5000000000009,
+      "y": 21.940476190476602,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 1.4285714285715585,
+      "height": 48.571428571428555,
+      "seed": 265496068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1.4285714285715585,
+          48.571428571428555
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 282,
+      "versionNonce": 461476796,
+      "isDeleted": false,
+      "id": "7UaE7RVc_xv8PA9ma5O0G",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 204.2142857142867,
+      "y": 50.29761904761915,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 87,
+      "height": 36,
+      "seed": 1725770812,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "window",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "type": "arrow",
+      "version": 292,
+      "versionNonce": 98539012,
+      "isDeleted": false,
+      "id": "xC02IlJ3UVhWqhI-s02Hg",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 416.2142857142867,
+      "y": 47.083333333333485,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 320,
+      "height": 0,
+      "seed": 1644676484,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -320,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "arrow",
+      "version": 505,
+      "versionNonce": 2047851580,
+      "isDeleted": false,
+      "id": "i65QGYhnLpEGlDpjVC9Pp",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 430.21428571428623,
+      "y": 67.0833333333336,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 344.0000000000002,
+      "height": 6,
+      "seed": 2003059900,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          344.0000000000002,
+          -6
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 307,
+      "versionNonce": 1023817092,
+      "isDeleted": false,
+      "id": "WnkxejOhkb-fJ-_Hm5laB",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 780.2142857142871,
+      "y": 33.083333333333485,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 56,
+      "seed": 1400665348,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 374,
+      "versionNonce": 633656508,
+      "isDeleted": false,
+      "id": "wDjxBEhAX3maDtQtGXOVK",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 85.21428571428669,
+      "y": 23.083333333333485,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 56,
+      "seed": 141839676,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 356,
+      "versionNonce": 623699204,
+      "isDeleted": false,
+      "id": "J8Fl25UZNMxeTatKktWrK",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 538.7142857142862,
+      "y": 85.0833333333336,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 87,
+      "height": 36,
+      "seed": 565130372,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "window",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "id": "jSAtcg-pH9hyUO2uC05lS",
+      "type": "line",
+      "x": -193.4999999999992,
+      "y": 123.69047619047637,
+      "width": 1366.0000000000002,
+      "height": 4,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 386783420,
+      "version": 151,
+      "versionNonce": 1011014972,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1366.0000000000002,
+          4
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "ellipse",
+      "version": 435,
+      "versionNonce": 1137812612,
+      "isDeleted": false,
+      "id": "PCcJqN6HXyrpFQ6AhjH-g",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -23.61904761904634,
+      "y": -660.3809523809518,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 890013116,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 477,
+      "versionNonce": 1458296252,
+      "isDeleted": false,
+      "id": "hFphmIzqtpW6God_uKM7H",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 74.04761904761972,
+      "y": -590.3809523809521,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1412744196,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 786,
+      "versionNonce": 316784188,
+      "isDeleted": false,
+      "id": "c2cTiZVLlzwO9-lmb_fep",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 182.50000000000136,
+      "y": -582.8809523809521,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 612737924,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 820,
+      "versionNonce": 1405965188,
+      "isDeleted": false,
+      "id": "98x_JUMYYLiRQt3K0Vu2V",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 121.64285714285825,
+      "y": -602.3095238095235,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 180045500,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 861,
+      "versionNonce": 419862204,
+      "isDeleted": false,
+      "id": "jXj7H3uEKJDx_Ze6a-fz-",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 22.500000000000455,
+      "y": -612.5952380952377,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 110007044,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943949,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 939,
+      "versionNonce": 1365263108,
+      "isDeleted": false,
+      "id": "aXr5HjBwloirbBrIYetRc",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -79.49999999999898,
+      "y": -738.5952380952376,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1570443068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 953,
+      "versionNonce": 637296444,
+      "isDeleted": false,
+      "id": "IQTNpoAAyXeeYYUYhAfdd",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -159.7857142857132,
+      "y": -787.4523809523807,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 684561028,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 948,
+      "versionNonce": 233390724,
+      "isDeleted": false,
+      "id": "w5OCc8JLm_OXpz_q4G_PI",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 245.35714285714357,
+      "y": -581.1666666666663,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1114490812,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 986,
+      "versionNonce": 304420796,
+      "isDeleted": false,
+      "id": "nGXjTDPAS2wMPbaYyKfh5",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 312.50000000000045,
+      "y": -564.3095238095234,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 973120004,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1013,
+      "versionNonce": 1613124100,
+      "isDeleted": false,
+      "id": "TtScgT7YDDE1jseGnfJtZ",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 372.50000000000045,
+      "y": -541.4523809523805,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1565880380,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1087,
+      "versionNonce": 1503838268,
+      "isDeleted": false,
+      "id": "XlzMe7eqa8UHb56teQQvW",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 425.3571428571436,
+      "y": -571.4523809523806,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1629864324,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1109,
+      "versionNonce": 387281284,
+      "isDeleted": false,
+      "id": "FYIktvkgRtVei4f1W17i-",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 496.78571428571513,
+      "y": -538.5952380952378,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 2117339324,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1134,
+      "versionNonce": 609704124,
+      "isDeleted": false,
+      "id": "3nxRQS0rpkdamylYi6GIM",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 549.6428571428573,
+      "y": -561.4523809523806,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 897249540,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1160,
+      "versionNonce": 1911857412,
+      "isDeleted": false,
+      "id": "VnGlX_1CZc3hN3wE6i1oQ",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 601.0714285714289,
+      "y": -578.5952380952376,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 682332476,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1176,
+      "versionNonce": 1306845500,
+      "isDeleted": false,
+      "id": "8UvLLkCSjQG0lVn_Kd6Wa",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 655.3571428571436,
+      "y": -560.0238095238093,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 914239620,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1208,
+      "versionNonce": 406970500,
+      "isDeleted": false,
+      "id": "W9eF05fNW75Z5CPCpwQGE",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 708.2142857142867,
+      "y": -562.8809523809521,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 471730620,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1309,
+      "versionNonce": 1840077244,
+      "isDeleted": false,
+      "id": "zIw0vUbQAih7E3U_-Uj9h",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 840.7857142857151,
+      "y": -780.880952380952,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1139638276,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1361,
+      "versionNonce": 297480196,
+      "isDeleted": false,
+      "id": "MSUwd-RDnHA8y-2nuc8O-",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 895.0714285714289,
+      "y": -836.5952380952381,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1538093628,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1372,
+      "versionNonce": 1590870588,
+      "isDeleted": false,
+      "id": "m69m5L6VbuEZcwELmVnwZ",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 957.9285714285725,
+      "y": -786.5952380952377,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 365354884,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1410,
+      "versionNonce": 1046384516,
+      "isDeleted": false,
+      "id": "Nqkly9HwjXMzaxh-6z1dY",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1022.2142857142862,
+      "y": -783.738095238095,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 909888188,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1437,
+      "versionNonce": 1211570876,
+      "isDeleted": false,
+      "id": "O6CnZh4sMeEyJ23OjeFSV",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1085.0714285714294,
+      "y": -786.5952380952378,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 877154052,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 1460,
+      "versionNonce": 1315569412,
+      "isDeleted": false,
+      "id": "cKEHvfxTgK8P1Zd-vrUEm",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1140.7857142857147,
+      "y": -789.4523809523805,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1315640124,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 774,
+      "versionNonce": 612131644,
+      "isDeleted": false,
+      "id": "JInP8SRnNpxgkxDYfRFVR",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -190.928571428571,
+      "y": -685.4523809523807,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 346.8571428571431,
+      "height": 2,
+      "seed": 1877986948,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          346.8571428571431,
+          -2
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1212,
+      "versionNonce": 979074692,
+      "isDeleted": false,
+      "id": "K1B_LHkfwq5bMhW-RdHU3",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 168.0714285714298,
+      "y": -565.023809523809,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "width": 354.57142857142844,
+      "height": 0.5714285714286689,
+      "seed": 580733884,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          354.57142857142844,
+          0.5714285714286689
+        ]
+      ]
+    },
+    {
+      "type": "ellipse",
+      "version": 452,
+      "versionNonce": 1241815996,
+      "isDeleted": false,
+      "id": "urIkrQ0GIT3GfApZ5ypMB",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 774.0714285714289,
+      "y": -800.8809523809525,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 24.285714285714214,
+      "height": 20.000000000000114,
+      "seed": 1501257220,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "rectangle",
+      "version": 984,
+      "versionNonce": 988876292,
+      "isDeleted": false,
+      "id": "p3hVNmsY2ksWMXNKQmCLV",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "angle": 0,
+      "x": -190.35714285714164,
+      "y": -789.4523809523806,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 346.8571428571429,
+      "height": 222.28571428571425,
+      "seed": 351564860,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "rectangle",
+      "version": 895,
+      "versionNonce": 1928447036,
+      "isDeleted": false,
+      "id": "Ovt0GkLX86-mwBHJvbvk4",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "angle": 0,
+      "x": 170.50000000000136,
+      "y": -616.1666666666666,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 348.5714285714277,
+      "height": 98.2857142857142,
+      "seed": 971570564,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [
+        {
+          "id": "hA3FIiaiS6HrtazZFIfkm",
+          "type": "arrow"
+        }
+      ],
+      "updated": 1666206943950,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 630,
+      "versionNonce": 1093184900,
+      "isDeleted": false,
+      "id": "wtqAszXs8ymku6UyRv9vU",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 172.78571428571468,
+      "y": -504.88095238095207,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 0.5714285714284415,
+      "height": 48.285714285714334,
+      "seed": 1934789820,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -0.5714285714284415,
+          48.285714285714334
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 627,
+      "versionNonce": 1160109244,
+      "isDeleted": false,
+      "id": "sIp6PKIyAIF5jET_utrqD",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 151.50000000000045,
+      "y": -551.1666666666661,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 0.5714285714284415,
+      "height": 56.571428571428555,
+      "seed": 175549700,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -0.5714285714284415,
+          56.571428571428555
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 441,
+      "versionNonce": 1849711876,
+      "isDeleted": false,
+      "id": "FxC3byXPLHG_3pWyYI-Sd",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -87.7857142857132,
+      "y": -526.8095238095236,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 87,
+      "height": 36,
+      "seed": 2136577340,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "window",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "type": "arrow",
+      "version": 615,
+      "versionNonce": 1514333500,
+      "isDeleted": false,
+      "id": "92f_7BYmevFx9w8QAj8Vm",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 142.2142857142867,
+      "y": -528.0238095238092,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 318,
+      "height": 6,
+      "seed": 1822583940,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -318,
+          -6
+        ]
+      ]
+    },
+    {
+      "type": "arrow",
+      "version": 737,
+      "versionNonce": 196484228,
+      "isDeleted": false,
+      "id": "D5GD4ZawbF8vZsD6e91Yl",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 182.21428571428623,
+      "y": -484.0238095238091,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 324.0000000000002,
+      "height": 0,
+      "seed": 1986760124,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          324.0000000000002,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 488,
+      "versionNonce": 1202380220,
+      "isDeleted": false,
+      "id": "Dpl2ijlWolAwzLrCYi3p9",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 514.2142857142871,
+      "y": -504.0238095238092,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 56,
+      "seed": 573193220,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 541,
+      "versionNonce": 55045124,
+      "isDeleted": false,
+      "id": "fnRrIQSofbcslEUZbS9ul",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -188.7857142857132,
+      "y": -558.0238095238092,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 56,
+      "seed": 430601788,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2,
+          56
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 528,
+      "versionNonce": 645908028,
+      "isDeleted": false,
+      "id": "O602IzKV3tLSVEH3fDz3Y",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 284.71428571428623,
+      "y": -478.0238095238091,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 87,
+      "height": 36,
+      "seed": 84679556,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "window",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "type": "line",
+      "version": 221,
+      "versionNonce": 1458817924,
+      "isDeleted": false,
+      "id": "akquLCDpNfDhOb7oweGbm",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": -196.4999999999992,
+      "y": -388.30952380952374,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 1366.0000000000002,
+      "height": 4,
+      "seed": 582676484,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1366.0000000000002,
+          4
+        ]
+      ]
+    },
+    {
+      "id": "M_Z9ZowGIEBRi4jfDmA_i",
+      "type": "rectangle",
+      "x": 216.5000000000009,
+      "y": -752.3095238095236,
+      "width": 238,
+      "height": 60,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 10,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1013787012,
+      "version": 174,
+      "versionNonce": 1193287612,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "27eRdKmp_Bsyq-8Esa00D",
+      "type": "line",
+      "x": 456.5000000000009,
+      "y": -782.3095238095236,
+      "width": 68,
+      "height": 52,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1060332036,
+      "version": 133,
+      "versionNonce": 204497084,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          68,
+          52
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "sH1QaRzRG2CGG9bKJSy3B",
+      "type": "line",
+      "x": 460.5000000000009,
+      "y": -662.3095238095236,
+      "width": 58,
+      "height": 66,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1032349444,
+      "version": 116,
+      "versionNonce": 1700587836,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          58,
+          -66
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "SlKe9mkKk5qRTKN4b-1JR",
+      "type": "line",
+      "x": 458.5000000000009,
+      "y": -780.3095238095236,
+      "width": 2,
+      "height": 112,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1717321660,
+      "version": 117,
+      "versionNonce": 1512986756,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666206943951,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -2,
+          112
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "type": "rectangle",
+      "version": 250,
+      "versionNonce": 225944508,
+      "isDeleted": false,
+      "id": "FfaKVWhR3ke2waVi_Qd0e",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 10,
+      "angle": 0,
+      "x": 398.5000000000009,
+      "y": -228.30952380952363,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 238,
+      "height": 60,
+      "seed": 568622652,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 209,
+      "versionNonce": 1645381124,
+      "isDeleted": false,
+      "id": "fqrQpF7k8SV6o4JcRABFG",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 638.5000000000009,
+      "y": -258.3095238095236,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 68,
+      "height": 52,
+      "seed": 995529604,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          68,
+          52
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 192,
+      "versionNonce": 1656998972,
+      "isDeleted": false,
+      "id": "NH5NpZKLjrH42ToI-sRGC",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 642.5000000000009,
+      "y": -138.30952380952363,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 58,
+      "height": 66,
+      "seed": 967923388,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          58,
+          -66
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 193,
+      "versionNonce": 1968907652,
+      "isDeleted": false,
+      "id": "5S1BFl6XI3-6NS4p8vqbY",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 640.5000000000009,
+      "y": -256.3095238095236,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 112,
+      "seed": 1975616260,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -2,
+          112
+        ]
+      ]
+    },
+    {
+      "type": "rectangle",
+      "version": 311,
+      "versionNonce": 2104943804,
+      "isDeleted": false,
+      "id": "nI1hQAjcy60_bfbwnzjcm",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 10,
+      "angle": 0,
+      "x": 802.5000000000009,
+      "y": 415.6904761904764,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 238,
+      "height": 60,
+      "seed": 847836804,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 270,
+      "versionNonce": 1701041412,
+      "isDeleted": false,
+      "id": "enj1PDxkYI6zHN6-9t6BM",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 1042.500000000001,
+      "y": 385.6904761904764,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 68,
+      "height": 52,
+      "seed": 1334485948,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          68,
+          52
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 253,
+      "versionNonce": 1932717372,
+      "isDeleted": false,
+      "id": "CsAvgITfT_zNDbD8bQcjG",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 1046.500000000001,
+      "y": 505.6904761904764,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 58,
+      "height": 66,
+      "seed": 1051690500,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          58,
+          -66
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 254,
+      "versionNonce": 1068143748,
+      "isDeleted": false,
+      "id": "x46WBtpj44E5if2EtCbza",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 2,
+      "opacity": 20,
+      "angle": 0,
+      "x": 1044.500000000001,
+      "y": 387.6904761904764,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2,
+      "height": 112,
+      "seed": 1786170428,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -2,
+          112
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 261,
+      "versionNonce": 1431954876,
+      "isDeleted": false,
+      "id": "3_eF85ioyteEZWhiBMSmY",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 134.4380059455143,
+      "y": -732.1666666666666,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 52.57142857142867,
+      "height": 0,
+      "seed": 1944484540,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          52.57142857142867,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 265,
+      "versionNonce": 837641220,
+      "isDeleted": false,
+      "id": "ybYZmnVw1LtdzKpuZYThn",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 161.5808630883721,
+      "y": -726.1666666666664,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 1.4285714285713311,
+      "height": 161.71428571428578,
+      "seed": 2145427204,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1.4285714285713311,
+          161.71428571428578
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 272,
+      "versionNonce": 957206076,
+      "isDeleted": false,
+      "id": "ABZ6LJJTfta8oyJMnJXar",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 146.5808630883712,
+      "y": -564.4523809523807,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 38.57142857142867,
+      "height": 0,
+      "seed": 1665123132,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          38.57142857142867,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 402,
+      "versionNonce": 36324228,
+      "isDeleted": false,
+      "id": "fJj3J-XZUvNkCwoMh7FQB",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 1.5736158681954446,
+      "x": 140.4380059455143,
+      "y": -680.6666666666662,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 88,
+      "height": 36,
+      "seed": 1140727428,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "thresh",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "thresh"
+    },
+    {
+      "type": "line",
+      "version": 368,
+      "versionNonce": 1060153020,
+      "isDeleted": false,
+      "id": "AnzOzn7Z1eTmCBIhHw0ld",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 386.43800594551476,
+      "y": -194.16666666666663,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 52.57142857142867,
+      "height": 0,
+      "seed": 1063686660,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          52.57142857142867,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 372,
+      "versionNonce": 111096580,
+      "isDeleted": false,
+      "id": "4Ssiqa-xx1W_Fs1b5J6nO",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 413.58086308837255,
+      "y": -188.1666666666664,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 1.4285714285713311,
+      "height": 161.71428571428578,
+      "seed": 1032224828,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          1.4285714285713311,
+          161.71428571428578
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 379,
+      "versionNonce": 476344124,
+      "isDeleted": false,
+      "id": "wTLPPnQa4v5HH_Wyb4Pp5",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 398.58086308837164,
+      "y": -26.452380952380736,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 38.57142857142867,
+      "height": 0,
+      "seed": 609071492,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          38.57142857142867,
+          0
+        ]
+      ]
+    },
+    {
+      "type": "text",
+      "version": 509,
+      "versionNonce": 499790468,
+      "isDeleted": false,
+      "id": "_dtWRqjcmuVONOt-j3-PA",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 1.5736158681954446,
+      "x": 392.43800594551476,
+      "y": -142.66666666666617,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 88,
+      "height": 36,
+      "seed": 1438849212,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943952,
+      "link": null,
+      "locked": false,
+      "fontSize": 28,
+      "fontFamily": 1,
+      "text": "thresh",
+      "baseline": 25,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "containerId": null,
+      "originalText": "thresh"
+    },
+    {
+      "type": "line",
+      "version": 255,
+      "versionNonce": 2004509060,
+      "isDeleted": false,
+      "id": "S2M6G8PN2Z0Ureb8ZA2q0",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -74.74999999999909,
+      "y": 358.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 82.00000000000023,
+      "height": 47.666666666666515,
+      "seed": 1074921604,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -82.00000000000023,
+          -47.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 236,
+      "versionNonce": 486107324,
+      "isDeleted": false,
+      "id": "x6ag69iBmWZIJHv7HVwRA",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -22.749999999999545,
+      "y": 432.5833333333337,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 71.66666666666629,
+      "seed": 1160369596,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -71.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 242,
+      "versionNonce": 1480298756,
+      "isDeleted": false,
+      "id": "MrCHxKpd3GmoFJv7BU8Pj",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 24.25000000000057,
+      "y": 484.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 57.66666666666629,
+      "seed": 1513549244,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -57.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 294,
+      "versionNonce": 130347324,
+      "isDeleted": false,
+      "id": "ZCFIu84tckhRRxz7rGTAM",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 79.25000000000057,
+      "y": 509.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 495615804,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 370,
+      "versionNonce": 2016071812,
+      "isDeleted": false,
+      "id": "OBDj_k0DS_qLD4Lu4TiNS",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 128.25000000000057,
+      "y": 496.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 46.00000000000023,
+      "height": 6.333333333333712,
+      "seed": 956200068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -46.00000000000023,
+          6.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 417,
+      "versionNonce": 285960636,
+      "isDeleted": false,
+      "id": "PpvMGddKu7IoKQuSDCi0m",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 187.25000000000057,
+      "y": 511.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 19.666666666666288,
+      "seed": 541565060,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          -19.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 445,
+      "versionNonce": 1728454660,
+      "isDeleted": false,
+      "id": "mgZFXfa_VDnorPCGEVJMl",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 249.25000000000057,
+      "y": 516.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 3.6666666666662877,
+      "seed": 1169952316,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -3.6666666666662877
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 496,
+      "versionNonce": 200200764,
+      "isDeleted": false,
+      "id": "pYCe_UCC0jKyZrGJrsdbS",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 316.25000000000057,
+      "y": 528.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 11.666666666666288,
+      "seed": 1261476868,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -11.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 563,
+      "versionNonce": 214663044,
+      "isDeleted": false,
+      "id": "ccni85Hgt0jrhFzVodQW7",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 378.25000000000057,
+      "y": 550.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 66.00000000000023,
+      "height": 21.666666666666288,
+      "seed": 1268981380,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -66.00000000000023,
+          -21.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 679,
+      "versionNonce": 391195324,
+      "isDeleted": false,
+      "id": "8ieFTi2SbyxZJuRovzkhq",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 431.25000000000057,
+      "y": 521.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 32.33333333333371,
+      "seed": 855829764,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          32.33333333333371
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 817,
+      "versionNonce": 565767940,
+      "isDeleted": false,
+      "id": "LDHmam7L24JvZGIrBJspa",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 504.25000000000057,
+      "y": 556.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 33.66666666666629,
+      "seed": 814936892,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -33.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 866,
+      "versionNonce": 634680124,
+      "isDeleted": false,
+      "id": "DKkhqzB9fiIaHSfbTs5Hf",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 558.2500000000006,
+      "y": 531.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 54.00000000000023,
+      "height": 26.333333333333712,
+      "seed": 2112752388,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -54.00000000000023,
+          26.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 900,
+      "versionNonce": 642543236,
+      "isDeleted": false,
+      "id": "In_-i7nQaVn4RT6R17NP_",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 607.2500000000006,
+      "y": 513.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 20.333333333333712,
+      "seed": 2029407748,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          20.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 965,
+      "versionNonce": 98216892,
+      "isDeleted": false,
+      "id": "_26qtU-907HmarW8FDw01",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 657.2500000000006,
+      "y": 536.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 1564517820,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 992,
+      "versionNonce": 1531182596,
+      "isDeleted": false,
+      "id": "gT4OCrC_xG00jRyDNhasA",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 711.2500000000006,
+      "y": 528.5833333333336,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 4.333333333333712,
+      "seed": 1800500924,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          4.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1105,
+      "versionNonce": 1770378300,
+      "isDeleted": false,
+      "id": "-PSBwXz0fPEtaXetZOhfE",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 777.2500000000006,
+      "y": 292.5833333333338,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 234.33333333333348,
+      "seed": 115779132,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          234.33333333333348
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1209,
+      "versionNonce": 966630788,
+      "isDeleted": false,
+      "id": "CzhY7iYuoqicZiwsrRwbI",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 847.2500000000006,
+      "y": 313.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 21.666666666666515,
+      "seed": 491552388,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -21.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1258,
+      "versionNonce": 1857547452,
+      "isDeleted": false,
+      "id": "_cU0g-Z4qtSCnKgzxCiUV",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 902.2500000000006,
+      "y": 255.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 52.00000000000023,
+      "height": 56.333333333333485,
+      "seed": 83374980,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -52.00000000000023,
+          56.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1306,
+      "versionNonce": 1788300548,
+      "isDeleted": false,
+      "id": "WVH0nyPvSvGuC8FOrSLXD",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 962.2500000000006,
+      "y": 306.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 53.666666666666515,
+      "seed": 1470027396,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          -53.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1339,
+      "versionNonce": 655553852,
+      "isDeleted": false,
+      "id": "UbL7-02Uti6SkTaelDUff",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1023.2500000000006,
+      "y": 311.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 60.00000000000023,
+      "height": 1.666666666666515,
+      "seed": 1490391228,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -60.00000000000023,
+          -1.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1390,
+      "versionNonce": 1068436612,
+      "isDeleted": false,
+      "id": "qEtub9h4YhD4ux7qMAEHb",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1090.2500000000007,
+      "y": 305.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 6.333333333333485,
+      "seed": 1438117892,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          6.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1432,
+      "versionNonce": 536407484,
+      "isDeleted": false,
+      "id": "o4VQv24DHYafkGB2wuTub",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1147.2500000000005,
+      "y": 303.58333333333394,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 2.333333333333485,
+      "seed": 264172164,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206943953,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          2.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 322,
+      "versionNonce": 950357052,
+      "isDeleted": false,
+      "id": "K2Ttzf4lcIgTTZxBbv8it",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -63.74999999999943,
+      "y": -206.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 82.00000000000023,
+      "height": 47.666666666666515,
+      "seed": 979166852,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -82.00000000000023,
+          -47.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 303,
+      "versionNonce": 139568516,
+      "isDeleted": false,
+      "id": "gPtXrokmSCcmoRuGtC-dD",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -11.749999999999886,
+      "y": -132.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 71.66666666666629,
+      "seed": 1395504060,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -71.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 309,
+      "versionNonce": 455632060,
+      "isDeleted": false,
+      "id": "qSm-Tbhzl2ATFrnBM0pu3",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 35.25000000000023,
+      "y": -80.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 57.66666666666629,
+      "seed": 109409796,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -57.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 361,
+      "versionNonce": 1435129092,
+      "isDeleted": false,
+      "id": "k9nO7PySXOoSPwFINb98A",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 90.25000000000023,
+      "y": -55.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 657972284,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 437,
+      "versionNonce": 1588882748,
+      "isDeleted": false,
+      "id": "u7fsS56_uis6fHhDNfBvy",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 139.25000000000023,
+      "y": -68.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 46.00000000000023,
+      "height": 6.333333333333712,
+      "seed": 1844450692,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -46.00000000000023,
+          6.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 484,
+      "versionNonce": 1728124036,
+      "isDeleted": false,
+      "id": "7dysuUAY33pqlYrIW56Yo",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 198.25000000000023,
+      "y": -53.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 19.666666666666288,
+      "seed": 954791100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          -19.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 512,
+      "versionNonce": 1420518844,
+      "isDeleted": false,
+      "id": "QTzfcz8a_yDZL8yKYrIsp",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 260.2500000000002,
+      "y": -48.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 3.6666666666662877,
+      "seed": 1574618372,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -3.6666666666662877
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 563,
+      "versionNonce": 712238084,
+      "isDeleted": false,
+      "id": "sDqdeK8VSQ87T23dbboS2",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 327.2500000000002,
+      "y": -36.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 11.666666666666288,
+      "seed": 367197500,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -11.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 630,
+      "versionNonce": 1378831932,
+      "isDeleted": false,
+      "id": "0nmvr41pw_7V3mquds3Ex",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 389.2500000000002,
+      "y": -14.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 66.00000000000023,
+      "height": 21.666666666666288,
+      "seed": 1807975556,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -66.00000000000023,
+          -21.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 746,
+      "versionNonce": 635487108,
+      "isDeleted": false,
+      "id": "7GzAhLJLSncl7X69kNypn",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 442.2500000000002,
+      "y": -43.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 32.33333333333371,
+      "seed": 1200092604,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          32.33333333333371
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 884,
+      "versionNonce": 941762236,
+      "isDeleted": false,
+      "id": "bDmC_DZzmkK8tiBWxSM-E",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 515.2500000000002,
+      "y": -8.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 33.66666666666629,
+      "seed": 1298682884,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -33.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 933,
+      "versionNonce": 1871033092,
+      "isDeleted": false,
+      "id": "QDy2M9HHYca5nUwInQRps",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 569.2500000000002,
+      "y": -33.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 54.00000000000023,
+      "height": 26.333333333333712,
+      "seed": 1526324796,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -54.00000000000023,
+          26.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 967,
+      "versionNonce": 2066083644,
+      "isDeleted": false,
+      "id": "z-gcWYAd1s8cDr_e--w7r",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 618.2500000000002,
+      "y": -51.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 20.333333333333712,
+      "seed": 1355232132,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          20.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1032,
+      "versionNonce": 635482756,
+      "isDeleted": false,
+      "id": "8Hki4QOjOZpcaIoZOYHmE",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 668.2500000000002,
+      "y": -28.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 1763358396,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1059,
+      "versionNonce": 731686844,
+      "isDeleted": false,
+      "id": "gfdS3gWeFENltixJ4XQFK",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 722.2500000000002,
+      "y": -36.08333333333303,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 4.333333333333712,
+      "seed": 1411261188,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          4.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1172,
+      "versionNonce": 1739571716,
+      "isDeleted": false,
+      "id": "XimJqGX6fB3EoqZRM-jyk",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 788.2500000000002,
+      "y": -272.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 234.33333333333348,
+      "seed": 512524092,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          234.33333333333348
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1276,
+      "versionNonce": 2108182588,
+      "isDeleted": false,
+      "id": "cQASkarySEDymTPkzlTvs",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 858.2500000000002,
+      "y": -251.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 21.666666666666515,
+      "seed": 1038819972,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -21.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1325,
+      "versionNonce": 1274077572,
+      "isDeleted": false,
+      "id": "cCa2mEJf2Kji-al6R9DrI",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 913.2500000000002,
+      "y": -309.08333333333275,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 52.00000000000023,
+      "height": 56.333333333333485,
+      "seed": 1135687612,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -52.00000000000023,
+          56.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1373,
+      "versionNonce": 1361766588,
+      "isDeleted": false,
+      "id": "qhOk9mfkjMiF5yjW-E9AQ",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 973.2500000000002,
+      "y": -258.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 53.666666666666515,
+      "seed": 1961636356,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          -53.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1406,
+      "versionNonce": 1507987716,
+      "isDeleted": false,
+      "id": "rcuV-MfeYOz0OmpKqBO4K",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1034.2500000000002,
+      "y": -253.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 60.00000000000023,
+      "height": 1.666666666666515,
+      "seed": 1152049212,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -60.00000000000023,
+          -1.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1457,
+      "versionNonce": 969306428,
+      "isDeleted": false,
+      "id": "gOK3Z9GgQxFsU4A295i2o",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1101.2500000000005,
+      "y": -259.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 6.333333333333485,
+      "seed": 1478447492,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          6.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1499,
+      "versionNonce": 16863364,
+      "isDeleted": false,
+      "id": "r9FejQehirTlYvREYFIsm",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1158.25,
+      "y": -261.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 2.333333333333485,
+      "seed": 962295996,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206989922,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          2.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 378,
+      "versionNonce": 57224196,
+      "isDeleted": false,
+      "id": "3P9ykcnwecang56KdBMHo",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -65.74999999999909,
+      "y": -728.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 82.00000000000023,
+      "height": 47.666666666666515,
+      "seed": 115141052,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994550,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -82.00000000000023,
+          -47.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 359,
+      "versionNonce": 605193788,
+      "isDeleted": false,
+      "id": "mGz82noMlbJiopecCY0bh",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": -13.749999999999545,
+      "y": -654.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 71.66666666666629,
+      "seed": 286040068,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -71.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 365,
+      "versionNonce": 1089276804,
+      "isDeleted": false,
+      "id": "YQNO_IH4g_Km73agPlA0u",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 33.25000000000057,
+      "y": -602.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 57.66666666666629,
+      "seed": 1267749436,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -57.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 417,
+      "versionNonce": 1490968252,
+      "isDeleted": false,
+      "id": "06lFnPvk7rnBdthKpheQ8",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 88.25000000000057,
+      "y": -577.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 48.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 838991748,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -48.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 493,
+      "versionNonce": 1908996868,
+      "isDeleted": false,
+      "id": "-dF0KQvy6M3WVC70y0hw-",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 137.25000000000057,
+      "y": -590.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 46.00000000000023,
+      "height": 6.333333333333712,
+      "seed": 1724033724,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -46.00000000000023,
+          6.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 540,
+      "versionNonce": 664479548,
+      "isDeleted": false,
+      "id": "iOQUl_TrZN4jQpwWaHdHw",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 196.25000000000057,
+      "y": -575.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 19.666666666666288,
+      "seed": 1545563908,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          -19.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 568,
+      "versionNonce": 276494980,
+      "isDeleted": false,
+      "id": "Eyz6GgCCs6NUqEbllLSUz",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 258.25000000000057,
+      "y": -570.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 3.6666666666662877,
+      "seed": 185987900,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -3.6666666666662877
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 619,
+      "versionNonce": 78307260,
+      "isDeleted": false,
+      "id": "M7aaM7wufK45TGBE3acyW",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 325.25000000000057,
+      "y": -558.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 11.666666666666288,
+      "seed": 1340272260,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -11.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 686,
+      "versionNonce": 398526980,
+      "isDeleted": false,
+      "id": "FQoTBni0gwxeK6AuxtRBo",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 387.25000000000057,
+      "y": -536.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 66.00000000000023,
+      "height": 21.666666666666288,
+      "seed": 1189882812,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -66.00000000000023,
+          -21.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 802,
+      "versionNonce": 137332796,
+      "isDeleted": false,
+      "id": "jAsWQlZvSAaZ8nA28HJTm",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 440.25000000000057,
+      "y": -565.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 32.33333333333371,
+      "seed": 235568644,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          32.33333333333371
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 940,
+      "versionNonce": 2056857988,
+      "isDeleted": false,
+      "id": "LXiH91kFENdigTUn85jtJ",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 513.2500000000006,
+      "y": -530.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 72.00000000000023,
+      "height": 33.66666666666629,
+      "seed": 208098364,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -72.00000000000023,
+          -33.66666666666629
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 989,
+      "versionNonce": 1980440764,
+      "isDeleted": false,
+      "id": "E7ec3g_XVVcoZ_J7dXIKY",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 567.2500000000006,
+      "y": -555.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 54.00000000000023,
+      "height": 26.333333333333712,
+      "seed": 1335106948,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -54.00000000000023,
+          26.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1023,
+      "versionNonce": 474044676,
+      "isDeleted": false,
+      "id": "-T6d4OuKtFOtrOHs4VGN4",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 616.2500000000006,
+      "y": -573.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 20.333333333333712,
+      "seed": 963210428,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          20.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1088,
+      "versionNonce": 1172285756,
+      "isDeleted": false,
+      "id": "4rOJ6Y23yICIBTk9kiXXM",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 666.2500000000006,
+      "y": -550.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 23.666666666666288,
+      "seed": 2109590788,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          -23.666666666666288
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1115,
+      "versionNonce": 1211603076,
+      "isDeleted": false,
+      "id": "fP6TxyHo8Q58M5YWkhFxf",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 720.2500000000006,
+      "y": -558.0833333333328,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 50.00000000000023,
+      "height": 4.333333333333712,
+      "seed": 529823036,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -50.00000000000023,
+          4.333333333333712
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1228,
+      "versionNonce": 722411964,
+      "isDeleted": false,
+      "id": "POPqBgKT9V9l4a72QtcoG",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 786.2500000000006,
+      "y": -794.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 234.33333333333348,
+      "seed": 705006724,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          234.33333333333348
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1332,
+      "versionNonce": 708438020,
+      "isDeleted": false,
+      "id": "SGHnkd1VhZg-GMkOkSy1Q",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 856.2500000000006,
+      "y": -773.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 68.00000000000023,
+      "height": 21.666666666666515,
+      "seed": 206204348,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.00000000000023,
+          -21.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1381,
+      "versionNonce": 482052668,
+      "isDeleted": false,
+      "id": "NZH1bmhyjaItqRXeCxrk0",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 911.2500000000006,
+      "y": -831.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 52.00000000000023,
+      "height": 56.333333333333485,
+      "seed": 103373828,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -52.00000000000023,
+          56.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1429,
+      "versionNonce": 1179010948,
+      "isDeleted": false,
+      "id": "7uFqcKb1-36zivtDmruQ6",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 971.2500000000005,
+      "y": -780.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 62.00000000000023,
+      "height": 53.666666666666515,
+      "seed": 1353537084,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -62.00000000000023,
+          -53.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1462,
+      "versionNonce": 1573315260,
+      "isDeleted": false,
+      "id": "XDOgjPTdTsDVaKOMV4-en",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1032.2500000000005,
+      "y": -775.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 60.00000000000023,
+      "height": 1.666666666666515,
+      "seed": 1546075012,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -60.00000000000023,
+          -1.666666666666515
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1513,
+      "versionNonce": 1889187588,
+      "isDeleted": false,
+      "id": "4S6wFPEC2FbZ_d9AVFZPp",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1099.250000000001,
+      "y": -781.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 58.00000000000023,
+      "height": 6.333333333333485,
+      "seed": 1235168956,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -58.00000000000023,
+          6.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 1555,
+      "versionNonce": 80948028,
+      "isDeleted": false,
+      "id": "tGpCGCSeXvgMpKisapj27",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1156.2500000000005,
+      "y": -783.0833333333326,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 56.00000000000023,
+      "height": 2.333333333333485,
+      "seed": 104672004,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666206994551,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -56.00000000000023,
+          2.333333333333485
+        ]
+      ]
+    }
+  ],
+  "appState": {
+    "gridSize": null,
+    "viewBackgroundColor": "#ffffff"
+  },
+  "files": {}
+}
\ No newline at end of file
diff --git a/docs/resources/images/flagJumpsPic.excalidraw.license b/docs/resources/images/flagJumpsPic.excalidraw.license
new file mode 100644
index 0000000000000000000000000000000000000000..f8c6bf8cd36fb9a9a0a0dd474407f40908bf5d1f
--- /dev/null
+++ b/docs/resources/images/flagJumpsPic.excalidraw.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+
+SPDX-License-Identifier: GPL-3.0-or-later
\ No newline at end of file
diff --git a/docs/resources/images/flagJumpsPic.png b/docs/resources/images/flagJumpsPic.png
new file mode 100644
index 0000000000000000000000000000000000000000..19bdac97e25adb90713d718000c0df54d65ae40b
Binary files /dev/null and b/docs/resources/images/flagJumpsPic.png differ
diff --git a/docs/resources/images/flagJumpsPic.png.license b/docs/resources/images/flagJumpsPic.png.license
new file mode 100644
index 0000000000000000000000000000000000000000..f8c6bf8cd36fb9a9a0a0dd474407f40908bf5d1f
--- /dev/null
+++ b/docs/resources/images/flagJumpsPic.png.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+
+SPDX-License-Identifier: GPL-3.0-or-later
\ No newline at end of file
diff --git a/docs/resources/images/flagOffsetPic.excalidraw b/docs/resources/images/flagOffsetPic.excalidraw
new file mode 100644
index 0000000000000000000000000000000000000000..9ce7c1f05b86247daa9ea32981a20938c9420d70
--- /dev/null
+++ b/docs/resources/images/flagOffsetPic.excalidraw
@@ -0,0 +1,1902 @@
+{
+  "type": "excalidraw",
+  "version": 2,
+  "source": "https://excalidraw.com",
+  "elements": [
+    {
+      "id": "6n0VhQC7wT6sQINZmCg5u",
+      "type": "ellipse",
+      "x": 1388.3333333333335,
+      "y": 446.66666666666663,
+      "width": 20,
+      "height": 20,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 29926416,
+      "version": 258,
+      "versionNonce": 1784003312,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 196,
+      "versionNonce": 1529588752,
+      "isDeleted": false,
+      "id": "oYwqckhAQF9yrxAHr9zA4",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 2045.0000000000005,
+      "y": 748.3333333333336,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 111903248,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 144,
+      "versionNonce": 2143345904,
+      "isDeleted": false,
+      "id": "XL5f1DxC2KzI5XW8wZlO3",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1113.3333333333335,
+      "y": 810.0000000000002,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 760309488,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 218,
+      "versionNonce": 2130422288,
+      "isDeleted": false,
+      "id": "Hv6f-Lnt1vrsLa6uMgtcP",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1205.0000000000002,
+      "y": 760,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 403750928,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 142,
+      "versionNonce": 1064812272,
+      "isDeleted": false,
+      "id": "yOYL3C6X_2Wum36YKrW7s",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1848.3333333333333,
+      "y": 803.3333333333335,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1046939888,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 166,
+      "versionNonce": 1679550480,
+      "isDeleted": false,
+      "id": "OwcDRVBKqBVpAceLRQFuu",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1320.0000000000002,
+      "y": 811.6666666666667,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 102645264,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 212,
+      "versionNonce": 1778540784,
+      "isDeleted": false,
+      "id": "oZHDLkCj0-hx2pGKbCgsk",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1469.9999999999998,
+      "y": 400.00000000000006,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 80992784,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 217,
+      "versionNonce": 1988388368,
+      "isDeleted": false,
+      "id": "MFwjMT9k_aFmDvI4XSiXG",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1616.6666666666672,
+      "y": 441.6666666666665,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 845731856,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 398,
+      "versionNonce": 1860748016,
+      "isDeleted": false,
+      "id": "d85wTiI9aI639N612BtyO",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1685,
+      "y": 386.66666666666697,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 257211920,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 282,
+      "versionNonce": 1663902736,
+      "isDeleted": false,
+      "id": "y6D8wraZUHJXITIX3Foqo",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1548.3333333333337,
+      "y": 413.33333333333337,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 826474512,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 385,
+      "versionNonce": 213050608,
+      "isDeleted": false,
+      "id": "7fXXE2m_lCveMRUVbWSrg",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1756.6666666666667,
+      "y": 855,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 397016080,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 430,
+      "versionNonce": 2014192144,
+      "isDeleted": false,
+      "id": "_NoxfWRurseG72I7CEmpF",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1936.6666666666665,
+      "y": 696.6666666666667,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1920849648,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "K1YqN2omh9_6-_nR3Pe6w",
+      "type": "rectangle",
+      "x": 1370,
+      "y": 541.6666666666667,
+      "width": 614.9999999999999,
+      "height": 276.66666666666663,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#e64980",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 988040720,
+      "version": 217,
+      "versionNonce": 69165072,
+      "isDeleted": false,
+      "boundElements": [
+        {
+          "id": "5INmjSKU_xWcxQd4UrPVB",
+          "type": "arrow"
+        },
+        {
+          "id": "mErkVWLdxbifTrkK9mX5A",
+          "type": "arrow"
+        }
+      ],
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "mErkVWLdxbifTrkK9mX5A",
+      "type": "arrow",
+      "x": 1350.0000000000002,
+      "y": 818.3333333333334,
+      "width": 0,
+      "height": 276.66666666666663,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 726409744,
+      "version": 349,
+      "versionNonce": 1477601520,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          0,
+          -276.66666666666663
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": {
+        "elementId": "K1YqN2omh9_6-_nR3Pe6w",
+        "focus": -1.0650406504065035,
+        "gap": 19.999999999999773
+      },
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow"
+    },
+    {
+      "id": "hM315HW1de2uN0AkOUGqk",
+      "type": "text",
+      "x": 1286.6666666666667,
+      "y": 629.6666666666666,
+      "width": 88,
+      "height": 36,
+      "angle": 4.722662591522264,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 294475280,
+      "version": 203,
+      "versionNonce": 767019760,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399761,
+      "link": null,
+      "locked": false,
+      "text": "thresh",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "thresh"
+    },
+    {
+      "id": "5INmjSKU_xWcxQd4UrPVB",
+      "type": "arrow",
+      "x": 1374.9999999999998,
+      "y": 521.6666666666666,
+      "width": 611.6666666666667,
+      "height": 5.684341886080802e-14,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 691520752,
+      "version": 242,
+      "versionNonce": 45259792,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          611.6666666666667,
+          5.684341886080802e-14
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": {
+        "elementId": "K1YqN2omh9_6-_nR3Pe6w",
+        "focus": -1.1445783132530116,
+        "gap": 20.000000000000057
+      },
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow"
+    },
+    {
+      "id": "javh5gQYaRC6k7FIYZuHP",
+      "type": "line",
+      "x": 1375,
+      "y": 510,
+      "width": 1.666666666666515,
+      "height": 26.666666666666742,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 712926736,
+      "version": 43,
+      "versionNonce": 744549616,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -1.666666666666515,
+          26.666666666666742
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "WvR2EwXDCtgPTvUtcbjv2",
+      "type": "line",
+      "x": 1336.6666666666667,
+      "y": 816.6666666666667,
+      "width": 25,
+      "height": 1.6666666666667425,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 279238384,
+      "version": 39,
+      "versionNonce": 2015664656,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          25,
+          -1.6666666666667425
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "s_KWappildj1TRX_bPcW5",
+      "type": "text",
+      "x": 1741.666666666667,
+      "y": 477.5,
+      "width": 87,
+      "height": 36,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 532219120,
+      "version": 130,
+      "versionNonce": 339328752,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086469877,
+      "link": null,
+      "locked": false,
+      "text": "window",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "window"
+    },
+    {
+      "id": "EBxeHJLMtV-iHLj2_GSKN",
+      "type": "rectangle",
+      "x": 1368.3333333333333,
+      "y": 748.3333333333335,
+      "width": 613.333333333333,
+      "height": 146.66666666666677,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 2,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 60,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 2085872368,
+      "version": 382,
+      "versionNonce": 715577360,
+      "isDeleted": false,
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "--MZNnWLhjKCtrm8n3BfP",
+      "type": "line",
+      "x": 1366.666666666667,
+      "y": 816.6666666666665,
+      "width": 398.33333333333326,
+      "height": 4.999999999999773,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dashed",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 502351888,
+      "version": 367,
+      "versionNonce": 683856624,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          398.33333333333326,
+          4.999999999999773
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "NKoD62eJxcfhjS63zLKTi",
+      "type": "line",
+      "x": 1768.3333333333335,
+      "y": 820,
+      "width": 0,
+      "height": 46.66666666666674,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 4,
+      "strokeStyle": "dotted",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 966626320,
+      "version": 41,
+      "versionNonce": 1564414480,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          0,
+          46.66666666666674
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "EnjIu3Z7GFUAgADybAK86",
+      "type": "arrow",
+      "x": 1930.0000000000002,
+      "y": 815.0000000000002,
+      "width": 0,
+      "height": 68.33333333333348,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 124673264,
+      "version": 502,
+      "versionNonce": 828799216,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          0,
+          68.33333333333348
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": {
+        "elementId": "w_EP7bkhj7djd37hd0vy_",
+        "focus": 1.695281120074737,
+        "gap": 13.1034892126753
+      },
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow"
+    },
+    {
+      "type": "arrow",
+      "version": 598,
+      "versionNonce": 1494076944,
+      "isDeleted": false,
+      "id": "KmT8_bYSEJBuLgOvx2KK7",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1929.1666666666667,
+      "y": 831.6666666666667,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "width": 2.2737367544323206e-13,
+      "height": 71.66666666666674,
+      "seed": 1225209872,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "startBinding": {
+        "elementId": "w_EP7bkhj7djd37hd0vy_",
+        "focus": -1.7406094922692483,
+        "gap": 14.036209535388139
+      },
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": "arrow",
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          2.2737367544323206e-13,
+          -71.66666666666674
+        ]
+      ]
+    },
+    {
+      "id": "w_EP7bkhj7djd37hd0vy_",
+      "type": "text",
+      "x": 1896.6666666666665,
+      "y": 807.5,
+      "width": 129,
+      "height": 36,
+      "angle": 1.5648321828010552,
+      "strokeColor": "#000000",
+      "backgroundColor": "#7950f2",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1826040048,
+      "version": 201,
+      "versionNonce": 90016272,
+      "isDeleted": false,
+      "boundElements": [
+        {
+          "id": "EnjIu3Z7GFUAgADybAK86",
+          "type": "arrow"
+        },
+        {
+          "id": "KmT8_bYSEJBuLgOvx2KK7",
+          "type": "arrow"
+        }
+      ],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false,
+      "text": "tolerance",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "tolerance"
+    },
+    {
+      "type": "ellipse",
+      "version": 174,
+      "versionNonce": 54642704,
+      "isDeleted": false,
+      "id": "9rvkJ4xNTpoZpBGVQr1-_",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1011.6666666666669,
+      "y": 806.6666666666667,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1252648176,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 250,
+      "versionNonce": 548045040,
+      "isDeleted": false,
+      "id": "7HcJkbEGd5zA0X3TGluMF",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 2138.333333333334,
+      "y": 728.3333333333333,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 160005136,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 266,
+      "versionNonce": 1978549776,
+      "isDeleted": false,
+      "id": "HMTIxBjnDAc79IUCnmsly",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 2220.000000000001,
+      "y": 776.6666666666667,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1168571920,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 499,
+      "versionNonce": 442172432,
+      "isDeleted": false,
+      "id": "8WKIIPfFjqiFXARDL8dtL",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 2,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1378.3333333333333,
+      "y": 436.6666666666668,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 737882128,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399762,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 570,
+      "versionNonce": 1575794704,
+      "isDeleted": false,
+      "id": "hp3AxOlf5YDvqUBjcY9MJ",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 2,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1458.3333333333333,
+      "y": 390,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 1167475440,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399763,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 615,
+      "versionNonce": 1315344624,
+      "isDeleted": false,
+      "id": "4-X1DH9az5fEIw47p_RGB",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 2,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1541.6666666666667,
+      "y": 403.33333333333337,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 1595464720,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399763,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 648,
+      "versionNonce": 498808336,
+      "isDeleted": false,
+      "id": "kSkZ9pD21rj9XXA6LlpkC",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 2,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1610,
+      "y": 431.66666666666686,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 1830009584,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086399763,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 679,
+      "versionNonce": 1003549200,
+      "isDeleted": false,
+      "id": "04Lp3S2corYUiDyYcwi06",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 0,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1678.3333333333337,
+      "y": 381.66666666666674,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 1774916848,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086401797,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "b1waF_9TBl_MVStscT8Rn",
+      "type": "line",
+      "x": 1018.3333333333334,
+      "y": 820,
+      "width": 101.66666666666663,
+      "height": 0,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1534785264,
+      "version": 32,
+      "versionNonce": 1252184592,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086407096,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          101.66666666666663,
+          0
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "YTyHQXvJy6OP5gnXI3lN6",
+      "type": "line",
+      "x": 1215,
+      "y": 770,
+      "width": 90,
+      "height": 48.333333333333485,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1127683824,
+      "version": 28,
+      "versionNonce": 2106521104,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086412055,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -90,
+          48.333333333333485
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "iRkafVSYsGm7iOqxdIlkV",
+      "type": "line",
+      "x": 1218.3333333333335,
+      "y": 771.6666666666667,
+      "width": 113.33333333333326,
+      "height": 48.33333333333326,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 529478384,
+      "version": 36,
+      "versionNonce": 2030473744,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086415463,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          113.33333333333326,
+          48.33333333333326
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "r6eYUDbeykCKHx13C5hZS",
+      "type": "line",
+      "x": 1396.6666666666667,
+      "y": 453.33333333333337,
+      "width": 63.33333333333326,
+      "height": 365.0000000000001,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1844401904,
+      "version": 47,
+      "versionNonce": 800302320,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086420216,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -63.33333333333326,
+          365.0000000000001
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "KJ_4ncvAHoobZXevap8wr",
+      "type": "line",
+      "x": 1476.6666666666667,
+      "y": 411.66666666666674,
+      "width": 80,
+      "height": 45,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 309944848,
+      "version": 33,
+      "versionNonce": 174801424,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086423477,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -80,
+          45
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "puGbdzEBqpDZf1ZOemlFV",
+      "type": "line",
+      "x": 1560,
+      "y": 425,
+      "width": 85,
+      "height": 18.333333333333258,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 376284912,
+      "version": 29,
+      "versionNonce": 1024049904,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086426519,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -85,
+          -18.333333333333258
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "MqbLSuK3wyBQSHbXYvqTh",
+      "type": "line",
+      "x": 1630,
+      "y": 448.33333333333337,
+      "width": 71.66666666666652,
+      "height": 25,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1134306320,
+      "version": 22,
+      "versionNonce": 908556528,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086429015,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -71.66666666666652,
+          -25
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "uO5_iUDObbEsucforDUjP",
+      "type": "line",
+      "x": 1695,
+      "y": 396.66666666666674,
+      "width": 68.33333333333326,
+      "height": 58.33333333333326,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1021103632,
+      "version": 29,
+      "versionNonce": 2036331024,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086431291,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -68.33333333333326,
+          58.33333333333326
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "TU8PBsP6eRc_DXgLLPa2S",
+      "type": "line",
+      "x": 1696.6666666666667,
+      "y": 396.66666666666674,
+      "width": 73.33333333333326,
+      "height": 470,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1113323248,
+      "version": 30,
+      "versionNonce": 1543951376,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086436304,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          73.33333333333326,
+          470
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "29Zx12RKnzdy1GvwfOCN6",
+      "type": "line",
+      "x": 1855,
+      "y": 811.6666666666667,
+      "width": 86.66666666666652,
+      "height": 51.66666666666674,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1804350704,
+      "version": 30,
+      "versionNonce": 115209744,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086440794,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -86.66666666666652,
+          51.66666666666674
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "weth6y_srf3nYxfr6qeWW",
+      "type": "line",
+      "x": 1948.3333333333335,
+      "y": 710,
+      "width": 90,
+      "height": 101.66666666666674,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1454387952,
+      "version": 34,
+      "versionNonce": 2145555472,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086445742,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -90,
+          101.66666666666674
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "DqgHyF5XqVQ_j-wkpEXEm",
+      "type": "line",
+      "x": 2053.3333333333335,
+      "y": 756.6666666666667,
+      "width": 110,
+      "height": 53.33333333333326,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1414296816,
+      "version": 40,
+      "versionNonce": 26078224,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086449734,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -110,
+          -53.33333333333326
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "6--2rF6FaIjJIg7PxsQqG",
+      "type": "line",
+      "x": 2151.666666666667,
+      "y": 735,
+      "width": 95,
+      "height": 25,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 396532976,
+      "version": 21,
+      "versionNonce": 1452290288,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086455039,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -95,
+          25
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "KApLuvMdVHC5SorMDEETK",
+      "type": "line",
+      "x": 2231.666666666667,
+      "y": 783.3333333333335,
+      "width": 85,
+      "height": 45,
+      "angle": 0,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1048286736,
+      "version": 34,
+      "versionNonce": 205566704,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086458099,
+      "link": null,
+      "locked": false,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -85,
+          -45
+        ]
+      ],
+      "lastCommittedPoint": null,
+      "startBinding": null,
+      "endBinding": null,
+      "startArrowhead": null,
+      "endArrowhead": null
+    },
+    {
+      "id": "aLDREFOyyyEerjGWCr0l6",
+      "type": "rectangle",
+      "x": 988.3333333333335,
+      "y": 203.33333333333337,
+      "width": 338.3333333333332,
+      "height": 145,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 1788482576,
+      "version": 299,
+      "versionNonce": 1024099344,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 407,
+      "versionNonce": 1373340912,
+      "isDeleted": false,
+      "id": "22A7zki5z8yjgUmlTYslq",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1008.333333333333,
+      "y": 226.66666666666663,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 1419970288,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 457,
+      "versionNonce": 1866433040,
+      "isDeleted": false,
+      "id": "2-8VheBd2S9xOltaDm1dl",
+      "fillStyle": "solid",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "angle": 0,
+      "x": 1009.9999999999998,
+      "y": 286.66666666666674,
+      "strokeColor": "#000000",
+      "backgroundColor": "#000",
+      "width": 20,
+      "height": 20,
+      "seed": 836076784,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "ellipse",
+      "version": 815,
+      "versionNonce": 1207789296,
+      "isDeleted": false,
+      "id": "f7e-87z2kZlMlHT3JipkK",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "dotted",
+      "roughness": 2,
+      "opacity": 100,
+      "angle": 0,
+      "x": 998.3333333333333,
+      "y": 276.66666666666674,
+      "strokeColor": "#000000",
+      "backgroundColor": "#fa5252",
+      "width": 40.00000000000003,
+      "height": 36.66666666666663,
+      "seed": 602243600,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false
+    },
+    {
+      "id": "fJUpHXq0KQdkzp24oPD9U",
+      "type": "text",
+      "x": 1063.3333333333335,
+      "y": 219.16666666666663,
+      "width": 216,
+      "height": 36,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 2111011344,
+      "version": 270,
+      "versionNonce": 1311031312,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false,
+      "text": "unflagged value",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "unflagged value"
+    },
+    {
+      "id": "wRIFRYte5B8UdwuFnnAFK",
+      "type": "text",
+      "x": 1056.6666666666667,
+      "y": 280.83333333333326,
+      "width": 252,
+      "height": 36,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 2059429904,
+      "version": 251,
+      "versionNonce": 596838640,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086884093,
+      "link": null,
+      "locked": false,
+      "text": "flagged as offset",
+      "fontSize": 28,
+      "fontFamily": 1,
+      "textAlign": "left",
+      "verticalAlign": "top",
+      "baseline": 25,
+      "containerId": null,
+      "originalText": "flagged as offset"
+    },
+    {
+      "id": "TGTnsFkzVfxpbXKzlRY9Q",
+      "type": "rectangle",
+      "x": 941.6666666666667,
+      "y": 146.66666666666674,
+      "width": 1366.6666666666667,
+      "height": 826.6666666666667,
+      "angle": 0,
+      "strokeColor": "#000000",
+      "backgroundColor": "transparent",
+      "fillStyle": "hachure",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 1,
+      "opacity": 100,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "seed": 2386672,
+      "version": 117,
+      "versionNonce": 1141694192,
+      "isDeleted": false,
+      "boundElements": null,
+      "updated": 1666086953213,
+      "link": null,
+      "locked": false
+    },
+    {
+      "type": "line",
+      "version": 162,
+      "versionNonce": 1942913552,
+      "isDeleted": false,
+      "id": "hZOydP7WRhqDurSDb5efj",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 2310.833333333333,
+      "y": 800.8333333333337,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 81.66666666666652,
+      "height": 13.333333333333485,
+      "seed": 1593016560,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086972578,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -81.66666666666652,
+          -13.333333333333485
+        ]
+      ]
+    },
+    {
+      "type": "line",
+      "version": 75,
+      "versionNonce": 1101283568,
+      "isDeleted": false,
+      "id": "t4dVl0e2nk9RtP9pgVtQ7",
+      "fillStyle": "cross-hatch",
+      "strokeWidth": 1,
+      "strokeStyle": "solid",
+      "roughness": 0,
+      "opacity": 40,
+      "angle": 0,
+      "x": 1022.4999999999995,
+      "y": 819.1666666666667,
+      "strokeColor": "#495057",
+      "backgroundColor": "#fa5252",
+      "width": 83.33333333333348,
+      "height": 21.666666666666742,
+      "seed": 257165552,
+      "groupIds": [],
+      "strokeSharpness": "sharp",
+      "boundElements": [],
+      "updated": 1666086986701,
+      "link": null,
+      "locked": false,
+      "startBinding": null,
+      "endBinding": null,
+      "lastCommittedPoint": null,
+      "startArrowhead": null,
+      "endArrowhead": null,
+      "points": [
+        [
+          0,
+          0
+        ],
+        [
+          -83.33333333333348,
+          21.666666666666742
+        ]
+      ]
+    }
+  ],
+  "appState": {
+    "gridSize": null,
+    "viewBackgroundColor": "#ffffff"
+  },
+  "files": {}
+}
\ No newline at end of file
diff --git a/docs/resources/images/flagOffsetPic.excalidraw.license b/docs/resources/images/flagOffsetPic.excalidraw.license
new file mode 100644
index 0000000000000000000000000000000000000000..f8c6bf8cd36fb9a9a0a0dd474407f40908bf5d1f
--- /dev/null
+++ b/docs/resources/images/flagOffsetPic.excalidraw.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+
+SPDX-License-Identifier: GPL-3.0-or-later
\ No newline at end of file
diff --git a/docs/resources/images/flagOffsetPic.png b/docs/resources/images/flagOffsetPic.png
new file mode 100644
index 0000000000000000000000000000000000000000..2dd724e9553949392be2957c827482b6351bf05b
Binary files /dev/null and b/docs/resources/images/flagOffsetPic.png differ
diff --git a/docs/resources/images/flagOffsetPic.png.license b/docs/resources/images/flagOffsetPic.png.license
new file mode 100644
index 0000000000000000000000000000000000000000..f8c6bf8cd36fb9a9a0a0dd474407f40908bf5d1f
--- /dev/null
+++ b/docs/resources/images/flagOffsetPic.png.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+
+SPDX-License-Identifier: GPL-3.0-or-later
\ No newline at end of file
diff --git a/docs/resources/temp/SM1processingResults.png b/docs/resources/temp/SM1processingResults.png
index faa80afa331ad35e5828f6f2c726223c742e0c8e..84134babc58aaa1d93a39f9d3d836854d26f0228 100644
Binary files a/docs/resources/temp/SM1processingResults.png and b/docs/resources/temp/SM1processingResults.png differ
diff --git a/docs/resources/temp/SM2processingResults.png b/docs/resources/temp/SM2processingResults.png
index 644eaadfa21b5c9031ad779e71c010f9e2003fb3..d9c0dbde9d6bc93f730c763f4938396917f414c8 100644
Binary files a/docs/resources/temp/SM2processingResults.png and b/docs/resources/temp/SM2processingResults.png differ
diff --git a/requirements.txt b/requirements.txt
index 8a6bb169e96fa31961a0fa9302a933ce7472b3b1..42bd4d01c17378852f8829c0e768a60e96e5ca41 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,13 +4,13 @@
 
 Click==8.1.3
 dtw==1.4.0
-hypothesis==6.46.11
-matplotlib==3.5.2
-numba==0.55.2
+hypothesis==6.55.0
+matplotlib==3.5.3
+numba==0.56.3
 numpy==1.21.6
 outlier-utils==0.0.3
-pyarrow==8.0.0
+pyarrow==9.0.0
 pandas==1.3.5
 scikit-learn==1.0.2
 scipy==1.7.3
-typing_extensions==4.2.0
+typing_extensions==4.3.0
diff --git a/saqc/constants.py b/saqc/constants.py
index d425a9111abac83d5cf0ab3757624088d529dc40..839795e0e02e1d8cc1b3f3a3cefb7119e9e01eda 100644
--- a/saqc/constants.py
+++ b/saqc/constants.py
@@ -59,6 +59,9 @@ FILTER_NONE = np.inf
 # ----------------------------------------------------------------------
 # other
 # ----------------------------------------------------------------------
+def clip(series, lower=None, upper=None):
+    return series.clip(lower=lower, upper=upper)
+
 
 ENVIRONMENT = {
     # Infinity constant
@@ -105,6 +108,7 @@ ENVIRONMENT = {
     "madScore": ts_ops.standardizeByMedian,
     # Standardize with Median and inter quantile range.
     "iqsScore": ts_ops.standardizeByIQR,
+    "clip": clip,
     "GOOD": GOOD,
     "BAD": BAD,
     "UNFLAGGED": UNFLAGGED,
diff --git a/saqc/core/core.py b/saqc/core/core.py
index b1ea076b9c4a3a58fc92b4ac152db56a4747e7bc..abb47c7a511ef1aefac7a732e77ff83bd28cedf1 100644
--- a/saqc/core/core.py
+++ b/saqc/core/core.py
@@ -10,19 +10,15 @@ from __future__ import annotations
 import warnings
 from copy import copy as shallowcopy
 from copy import deepcopy
-from typing import Any, Callable, Hashable, List, MutableMapping, Sequence, Tuple
+from typing import Any, Hashable, MutableMapping
 
 import numpy as np
 import pandas as pd
 
-# the import is needed to trigger the registration
-# of the built-in (test-)functions
-import saqc.funcs  # noqa
 from dios import DictOfSeries, to_dios
 from saqc.core.flags import Flags, initFlagsLike
 from saqc.core.history import History
-from saqc.core.modules import FunctionsMixin
-from saqc.core.register import FUNC_MAP, FunctionWrapper
+from saqc.core.register import FUNC_MAP
 from saqc.core.translation import (
     DmpScheme,
     FloatScheme,
@@ -30,8 +26,8 @@ from saqc.core.translation import (
     SimpleScheme,
     TranslationScheme,
 )
-from saqc.lib.tools import concatDios, toSequence
-from saqc.lib.types import ExternalFlag, OptionalNone
+from saqc.funcs import FunctionsMixin
+from saqc.lib.tools import concatDios
 
 # warnings
 pd.set_option("mode.chained_assignment", "warn")
@@ -60,13 +56,13 @@ class SaQC(FunctionsMixin):
         flags=None,
         scheme: str | TranslationScheme = "float",
     ):
-        self._data = self._initData(data)
-        self._flags = self._initFlags(flags)
-        self._scheme = self._initTranslationScheme(scheme)
-        self._attrs = {}
+        self._data: DictOfSeries = self._initData(data)
+        self._flags: Flags = self._initFlags(flags)
+        self._scheme: TranslationScheme = self._initTranslationScheme(scheme)
+        self._attrs: dict = {}
         self._validate(reason="init")
 
-    def _construct(self, **attributes) -> SaQC:
+    def _construct(self, **attributes) -> "SaQC":
         """
         Construct a new `SaQC`-Object from `self` and optionally inject
         attributes with any chechking and overhead.
@@ -118,132 +114,17 @@ class SaQC(FunctionsMixin):
         flags.attrs = self._attrs.copy()
         return flags
 
-    def _expandFields(
-        self,
-        regex: bool,
-        field: str | Sequence[str],
-        target: str | Sequence[str] = None,
-    ) -> Tuple[List[str], List[str]]:
-        """
-        check and expand `field` and `target`
-        """
-
-        # expand regular expressions
-        if regex:
-            fmask = self._data.columns.str.match(field)
-            fields = self._data.columns[fmask].tolist()
-        else:
-            fields = toSequence(field)
-
-        targets = fields if target is None else toSequence(target)
-
-        return fields, targets
-
-    def _wrap(self, func: FunctionWrapper):
-        """
-        prepare user function input:
-          - expand fields and targets
-          - translate user given ``flag`` values or set the default ``BAD``
-          - translate user given ``dfilter`` values or set the scheme default
-          - dependeing on the workflow: initialize ``target`` variables
-
-        Here we add the following parameters to all registered functions, regardless
-        of their repsective definition:
-          - ``regex``
-          - ``target``
-
-        """
-
-        def inner(
-            field: str | Sequence[str],
-            *args,
-            target: str | Sequence[str] = None,
-            regex: bool = False,
-            flag: ExternalFlag | OptionalNone = OptionalNone(),
-            **kwargs,
-        ) -> SaQC:
-
-            if "dfilter" not in kwargs:
-                # let's see, if the function has an default value
-                default = func.func_signature.parameters.get("dfilter")
-                if default:
-                    default = default.default
-                kwargs["dfilter"] = default or self._scheme.DFILTER_DEFAULT
-
-            if not isinstance(flag, OptionalNone):
-                # translation schemes might want to use a flag
-                # `None` so we introduce a special class here
-                kwargs["flag"] = self._scheme(flag)
-
-            fields, targets = self._expandFields(
-                regex=regex, field=field, target=target
-            )
-            out = self.copy(deep=True)
-
-            if not func.handles_target:
-                if len(fields) != len(targets):
-                    raise ValueError(
-                        "expected the same number of 'field' and 'target' values"
-                    )
-
-                # initialize all target variables
-                for src, trg in zip(fields, targets):
-                    if src != trg:
-                        out = out._callFunction(
-                            FUNC_MAP["copyField"],
-                            field=src,
-                            target=trg,
-                            overwrite=True,
-                        )
-
-            if func.multivariate:
-                # pass all fields and targets
-                out = out._callFunction(
-                    func,
-                    field=fields,
-                    target=targets,
-                    *args,
-                    **kwargs,
-                )
-            else:
-                # call the function on target
-                for src, trg in zip(fields, targets):
-                    fkwargs = {**kwargs, "field": src, "target": trg}
-                    if not func.handles_target:
-                        fkwargs["field"] = fkwargs.pop("target")
-                    out = out._callFunction(func, *args, **fkwargs)
-
-            return out
-
-        return inner
-
-    def _callFunction(
-        self,
-        function: Callable,
-        field: str | Sequence[str],
-        *args: Any,
-        **kwargs: Any,
-    ) -> SaQC:
-
-        res = function(data=self._data, flags=self._flags, field=field, *args, **kwargs)
-
-        # keep consistence: if we modify data and flags inplace in a function,
-        # but data is the original and flags is a copy (as currently implemented),
-        # data and flags of the original saqc obj may change inconsistently.
-        self._data, self._flags = res
-        self._validate(reason=f"call to {repr(function.__name__)}")
-
-        return self._construct(_data=self._data, _flags=self._flags)
-
     def __getattr__(self, key):
         """
         All failing attribute accesses are redirected to __getattr__.
         We use this mechanism to make the registered functions appear
         as `SaQC`-methods without actually implementing them.
         """
+        from functools import partial
+
         if key not in FUNC_MAP:
             raise AttributeError(f"SaQC has no attribute {repr(key)}")
-        return self._wrap(FUNC_MAP[key])
+        return partial(FUNC_MAP[key], self)
 
     def copy(self, deep=True):
         copyfunc = deepcopy if deep else shallowcopy
@@ -286,7 +167,7 @@ class SaQC(FunctionsMixin):
 
         raise TypeError(
             "'data' must be of type pandas.Series, "
-            "pandas.DataFrame or dios.DictOfSeries or"
+            "pandas.DataFrame or dios.DictOfSeries or "
             "a list of those."
         )
 
diff --git a/saqc/core/flags.py b/saqc/core/flags.py
index c5acb68c43df3317e316043aa733e7b1fac5acb5..48482a703e22089967f15ad4c95675e47062a2c0 100644
--- a/saqc/core/flags.py
+++ b/saqc/core/flags.py
@@ -25,6 +25,7 @@ DictLike = Union[
 _Field = str
 SelectT = Union[
     _Field,
+    Tuple[np.ndarray, _Field],
     Tuple[pd.Series, _Field],
     Tuple[pd.Index, _Field],
     Tuple[slice, _Field],
@@ -194,6 +195,8 @@ class Flags:
         self, raw_data: Optional[Union[DictLike, Flags]] = None, copy: bool = False
     ):
 
+        self._data: dict[str, History]
+
         if raw_data is None:
             raw_data = {}
 
diff --git a/saqc/core/modules/__init__.py b/saqc/core/modules/__init__.py
deleted file mode 100644
index 1c9f1b6a35911fa0258044f3062beaa93600bc90..0000000000000000000000000000000000000000
--- a/saqc/core/modules/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from saqc.core.modules.breaks import Breaks
-from saqc.core.modules.changepoints import ChangePoints
-from saqc.core.modules.constants import Constants
-from saqc.core.modules.curvefit import Curvefit
-from saqc.core.modules.drift import Drift
-from saqc.core.modules.flagtools import FlagTools
-from saqc.core.modules.generic import Generic
-from saqc.core.modules.interpolation import Interpolation
-from saqc.core.modules.noise import Noise
-from saqc.core.modules.outliers import Outliers
-from saqc.core.modules.pattern import Pattern
-from saqc.core.modules.resampling import Resampling
-from saqc.core.modules.residuals import Residuals
-from saqc.core.modules.rolling import Rolling
-from saqc.core.modules.scores import Scores
-from saqc.core.modules.tools import Tools
-from saqc.core.modules.transformation import Transformation
-from saqc.core.register import FUNC_MAP
-
-
-class FunctionsMixin(
-    Breaks,
-    Noise,
-    ChangePoints,
-    Constants,
-    Curvefit,
-    Drift,
-    FlagTools,
-    Generic,
-    Interpolation,
-    Outliers,
-    Pattern,
-    Resampling,
-    Residuals,
-    Rolling,
-    Scores,
-    Tools,
-    Transformation,
-):
-    def _defer(self, fname, flocals):
-        flocals.pop("self", None)
-        fkwargs = flocals.pop("kwargs", {})
-        return self._wrap(FUNC_MAP[fname])(**flocals, **fkwargs)
diff --git a/saqc/core/modules/breaks.py b/saqc/core/modules/breaks.py
deleted file mode 100644
index 76ffee5a937a84060efa8f51bdf66924431bb11d..0000000000000000000000000000000000000000
--- a/saqc/core/modules/breaks.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD, FILTER_ALL
-from saqc.lib.docurator import doc
-
-
-class Breaks:
-    @doc(saqc.funcs.breaks.flagMissing.__doc__)
-    def flagMissing(
-        self, field: str, flag: float = BAD, dfilter: float = FILTER_ALL, **kwargs
-    ) -> saqc.SaQC:
-        return self._defer("flagMissing", locals())
-
-    @doc(saqc.funcs.breaks.flagIsolated.__doc__)
-    def flagIsolated(
-        self,
-        field: str,
-        gap_window: str,
-        group_window: str,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagIsolated", locals())
-
-    @doc(saqc.funcs.breaks.flagJumps.__doc__)
-    def flagJumps(
-        self,
-        field: str,
-        thresh: float,
-        window: str,
-        min_periods: int = 1,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagJumps", locals())
diff --git a/saqc/core/modules/changepoints.py b/saqc/core/modules/changepoints.py
deleted file mode 100644
index dd1db109676b2d5d126d9c5a370fe2224c358916..0000000000000000000000000000000000000000
--- a/saqc/core/modules/changepoints.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Tuple
-
-import numpy as np
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class ChangePoints:
-    @doc(saqc.funcs.changepoints.flagChangePoints.__doc__)
-    def flagChangePoints(
-        self,
-        field: str,
-        stat_func: Callable[[np.ndarray, np.ndarray], float],
-        thresh_func: Callable[[np.ndarray, np.ndarray], float],
-        window: str | Tuple[str, str],
-        min_periods: int | Tuple[int, int],
-        closed: Literal["right", "left", "both", "neither"] = "both",
-        reduce_window: str = None,
-        reduce_func: Callable[[np.ndarray, np.ndarray], int] = lambda x, _: x.argmax(),
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagChangePoints", locals())
-
-    @doc(saqc.funcs.changepoints.assignChangePointCluster.__doc__)
-    def assignChangePointCluster(
-        self,
-        field: str,
-        stat_func: Callable[[np.array, np.array], float],
-        thresh_func: Callable[[np.array, np.array], float],
-        window: str | Tuple[str, str],
-        min_periods: int | Tuple[int, int],
-        closed: Literal["right", "left", "both", "neither"] = "both",
-        reduce_window: str = None,
-        reduce_func: Callable[
-            [np.ndarray, np.ndarray], float
-        ] = lambda x, _: x.argmax(),
-        model_by_resids: bool = False,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("assignChangePointCluster", locals())
diff --git a/saqc/core/modules/constants.py b/saqc/core/modules/constants.py
deleted file mode 100644
index 6391bb4e903bfd4278f3ece68d63f03c773deb2d..0000000000000000000000000000000000000000
--- a/saqc/core/modules/constants.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Constants:
-    @doc(saqc.funcs.constants.flagByVariance.__doc__)
-    def flagByVariance(
-        self,
-        field: str,
-        window: str,
-        thresh: float,
-        maxna: int | None = None,
-        maxna_group: int | None = None,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagByVariance", locals())
-
-    @doc(saqc.funcs.constants.flagConstants.__doc__)
-    def flagConstants(
-        self, field: str, thresh: float, window: int | str, flag: float = BAD, **kwargs
-    ) -> saqc.SaQC:
-        return self._defer("flagConstants", locals())
diff --git a/saqc/core/modules/curvefit.py b/saqc/core/modules/curvefit.py
deleted file mode 100644
index f915ba5ce56d5a8f9c0095969529a9a7039f4995..0000000000000000000000000000000000000000
--- a/saqc/core/modules/curvefit.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Union
-
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from dios import DictOfSeries
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Curvefit:
-    @doc(saqc.funcs.curvefit.fitPolynomial.__doc__)
-    def fitPolynomial(
-        self,
-        field: str,
-        window: int | str,
-        order: int,
-        min_periods: int = 0,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("fitPolynomial", locals())
diff --git a/saqc/core/modules/drift.py b/saqc/core/modules/drift.py
deleted file mode 100644
index 2e152d7629ea9589fc4aeae3fc895d2246e4e617..0000000000000000000000000000000000000000
--- a/saqc/core/modules/drift.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Optional, Sequence, Union
-
-import numpy as np
-from scipy.spatial.distance import pdist
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.funcs import LinkageString
-from saqc.lib.docurator import doc
-from saqc.lib.types import CurveFitter
-
-
-class Drift:
-    @doc(saqc.funcs.drift.flagDriftFromNorm.__doc__)
-    def flagDriftFromNorm(
-        self,
-        field: Sequence[str],
-        freq: str,
-        spread: float,
-        frac: float = 0.5,
-        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
-            np.array([x, y]), metric="cityblock"
-        )
-        / len(x),
-        method: LinkageString = "single",
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagDriftFromNorm", locals())
-
-    @doc(saqc.funcs.drift.flagDriftFromReference.__doc__)
-    def flagDriftFromReference(
-        self,
-        field: Sequence[str],
-        reference: str,
-        freq: str,
-        thresh: float,
-        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
-            np.array([x, y]), metric="cityblock"
-        )
-        / len(x),
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagDriftFromReference", locals())
-
-    @doc(saqc.funcs.drift.correctDrift.__doc__)
-    def correctDrift(
-        self,
-        field: str,
-        maintenance_field: str,
-        model: Callable[..., float] | Literal["linear", "exponential"],
-        cal_range: int = 5,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("correctDrift", locals())
-
-    @doc(saqc.funcs.drift.correctRegimeAnomaly.__doc__)
-    def correctRegimeAnomaly(
-        self,
-        field: str,
-        cluster_field: str,
-        model: CurveFitter,
-        tolerance: Optional[str] = None,
-        epoch: bool = False,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("correctRegimeAnomaly", locals())
-
-    @doc(saqc.funcs.drift.correctOffset.__doc__)
-    def correctOffset(
-        self,
-        field: str,
-        max_jump: float,
-        spread: float,
-        window: str,
-        min_periods: int,
-        tolerance: Optional[str] = None,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("correctOffset", locals())
-
-    @doc(saqc.funcs.drift.flagRegimeAnomaly.__doc__)
-    def flagRegimeAnomaly(
-        self,
-        field: str,
-        cluster_field: str,
-        spread: float,
-        method: LinkageString = "single",
-        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
-            np.nanmean(x) - np.nanmean(y)
-        ),
-        frac: float = 0.5,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagRegimeAnomaly", locals())
-
-    @doc(saqc.funcs.drift.assignRegimeAnomaly.__doc__)
-    def assignRegimeAnomaly(
-        self,
-        field: str,
-        cluster_field: str,
-        spread: float,
-        method: LinkageString = "single",
-        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
-            np.nanmean(x) - np.nanmean(y)
-        ),
-        frac: float = 0.5,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("assignRegimeAnomaly", locals())
diff --git a/saqc/core/modules/flagtools.py b/saqc/core/modules/flagtools.py
deleted file mode 100644
index 66efc28d70f0a56bdfa12c157380fb3093628bc0..0000000000000000000000000000000000000000
--- a/saqc/core/modules/flagtools.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Any, Sequence, Union
-
-import numpy as np
-import pandas as pd
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from dios import DictOfSeries
-from saqc.constants import BAD, FILTER_ALL
-from saqc.lib.docurator import doc
-
-
-class FlagTools:
-    @doc(saqc.funcs.flagtools.clearFlags.__doc__)
-    def clearFlags(self, field: str, **kwargs) -> saqc.SaQC:
-        return self._defer("clearFlags", locals())
-
-    @doc(saqc.funcs.flagtools.forceFlags.__doc__)
-    def forceFlags(self, field: str, flag: float = BAD, **kwargs) -> saqc.SaQC:
-        return self._defer("forceFlags", locals())
-
-    @doc(saqc.funcs.flagtools.forceFlags.__doc__)
-    def flagDummy(self, field: str, **kwargs) -> saqc.SaQC:
-        return self._defer("flagDummy", locals())
-
-    @doc(saqc.funcs.flagtools.flagUnflagged.__doc__)
-    def flagUnflagged(self, field: str, flag: float = BAD, **kwargs) -> saqc.SaQC:
-        return self._defer("flagUnflagged", locals())
-
-    @doc(saqc.funcs.flagtools.flagManual.__doc__)
-    def flagManual(
-        self,
-        field: str,
-        mdata: Union[pd.Series, pd.DataFrame, DictOfSeries, list, np.array],
-        method: Literal[
-            "left-open", "right-open", "closed", "plain", "ontime"
-        ] = "left-open",
-        mformat: Literal["start-end", "mflag"] = "start-end",
-        mflag: Any = 1,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagManual", locals())
-
-    @doc(saqc.funcs.flagtools.transferFlags.__doc__)
-    def transferFlags(
-        self,
-        field: str | Sequence[str],
-        target: str | Sequence[str],
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("transferFlags", locals())
-
-    @doc(saqc.funcs.flagtools.propagateFlags.__doc__)
-    def propagateFlags(
-        self,
-        field: str | Sequence[str],
-        window: Union[str, int],
-        method: Literal["ffill", "bfill"] = "ffill",
-        flag: float = BAD,
-        dfilter: float = FILTER_ALL,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("propagateFlags", locals())
diff --git a/saqc/core/modules/generic.py b/saqc/core/modules/generic.py
deleted file mode 100644
index 8759bebd74d5583c66c8ba62a899b975306febea..0000000000000000000000000000000000000000
--- a/saqc/core/modules/generic.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Sequence, Union
-
-import numpy as np
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD, FILTER_ALL
-from saqc.lib.docurator import doc
-from saqc.lib.types import GenericFunction
-
-
-class Generic:
-    @doc(saqc.funcs.generic.processGeneric.__doc__)
-    def processGeneric(
-        self,
-        field: str | Sequence[str],
-        func: GenericFunction,
-        target: str | Sequence[str] | None = None,
-        dfilter: float = FILTER_ALL,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("processGeneric", locals())
-
-    @doc(saqc.funcs.generic.flagGeneric.__doc__)
-    def flagGeneric(
-        self,
-        field: Union[str, Sequence[str]],
-        func: GenericFunction,
-        target: Union[str, Sequence[str]] = None,
-        flag: float = BAD,
-        dfilter: float = FILTER_ALL,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagGeneric", locals())
diff --git a/saqc/core/modules/interpolation.py b/saqc/core/modules/interpolation.py
deleted file mode 100644
index 0a949a9bbe9efbf56a9621afc14b5996986810d6..0000000000000000000000000000000000000000
--- a/saqc/core/modules/interpolation.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Union
-
-import numpy as np
-import pandas as pd
-
-import saqc
-import saqc.funcs
-from saqc.constants import UNFLAGGED
-from saqc.funcs.interpolation import _SUPPORTED_METHODS
-from saqc.lib.docurator import doc
-
-
-class Interpolation:
-    @doc(saqc.funcs.interpolation.interpolateByRolling.__doc__)
-    def interpolateByRolling(
-        self,
-        field: str,
-        window: Union[str, int],
-        func: Callable[[pd.Series], float] = np.median,
-        center: bool = True,
-        min_periods: int = 0,
-        flag: float = UNFLAGGED,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("interpolateByRolling", locals())
-
-    @doc(saqc.funcs.interpolation.interpolateInvalid.__doc__)
-    def interpolateInvalid(
-        self,
-        field: str,
-        method: _SUPPORTED_METHODS,
-        order: int = 2,
-        limit: int = 2,
-        downgrade: bool = False,
-        flag: float = UNFLAGGED,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("interpolateInvalid", locals())
-
-    @doc(saqc.funcs.interpolation.interpolateIndex.__doc__)
-    def interpolateIndex(
-        self,
-        field: str,
-        freq: str,
-        method: _SUPPORTED_METHODS,
-        order: int = 2,
-        limit: int = 2,
-        downgrade: bool = False,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("interpolateIndex", locals())
diff --git a/saqc/core/modules/noise.py b/saqc/core/modules/noise.py
deleted file mode 100644
index 9b1209ad37b6860903a53542498343ad447cb919..0000000000000000000000000000000000000000
--- a/saqc/core/modules/noise.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable
-
-import numpy as np
-import pandas as pd
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Noise:
-    @doc(saqc.funcs.noise.flagByStatLowPass.__doc__)
-    def flagByStatLowPass(
-        self,
-        field: str,
-        func: Callable[[np.ndarray, pd.Series], float],
-        window: str | pd.Timedelta,
-        thresh: float,
-        sub_window: str | pd.Timedelta = None,
-        sub_thresh: float = None,
-        min_periods: int = None,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagByStatLowPass", locals())
diff --git a/saqc/core/modules/outliers.py b/saqc/core/modules/outliers.py
deleted file mode 100644
index 261c8146e734ff94c6f191a8839d0904f4eda12b..0000000000000000000000000000000000000000
--- a/saqc/core/modules/outliers.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Optional, Sequence, Union
-
-import numpy as np
-import pandas as pd
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Outliers:
-    @doc(saqc.funcs.outliers.flagByStray.__doc__)
-    def flagByStray(
-        self,
-        field: str,
-        window: Optional[Union[int, str]] = None,
-        min_periods: int = 11,
-        iter_start: float = 0.5,
-        alpha: float = 0.05,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagByStray", locals())
-
-    @doc(saqc.funcs.outliers.flagMVScores.__doc__)
-    def flagMVScores(
-        self,
-        field: Sequence[str],
-        trafo: Callable[[pd.Series], pd.Series] = lambda x: x,
-        alpha: float = 0.05,
-        n: int = 10,
-        func: Callable[[pd.Series], float] = np.sum,
-        iter_start: float = 0.5,
-        partition: Optional[Union[int, str]] = None,
-        partition_min: int = 11,
-        stray_range: Optional[str] = None,
-        drop_flagged: bool = False,  # TODO: still a case ?
-        thresh: float = 3.5,
-        min_periods: int = 1,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagMVScores", locals())
-
-    @doc(saqc.funcs.outliers.flagRaise.__doc__)
-    def flagRaise(
-        self,
-        field: str,
-        thresh: float,
-        raise_window: str,
-        freq: str,
-        average_window: Optional[str] = None,
-        raise_factor: float = 2.0,
-        slope: Optional[float] = None,
-        weight: float = 0.8,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagRaise", locals())
-
-    @doc(saqc.funcs.outliers.flagMAD.__doc__)
-    def flagMAD(
-        self,
-        field: str,
-        window: str,
-        z: float = 3.5,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagMAD", locals())
-
-    @doc(saqc.funcs.outliers.flagOffset.__doc__)
-    def flagOffset(
-        self,
-        field: str,
-        tolerance: float,
-        window: Union[int, str],
-        thresh: Optional[float] = None,
-        thresh_relative: Optional[float] = None,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagOffset", locals())
-
-    @doc(saqc.funcs.outliers.flagByGrubbs.__doc__)
-    def flagByGrubbs(
-        self,
-        field: str,
-        window: Union[str, int],
-        alpha: float = 0.05,
-        min_periods: int = 8,
-        pedantic: bool = False,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagByGrubbs", locals())
-
-    @doc(saqc.funcs.outliers.flagRange.__doc__)
-    def flagRange(
-        self,
-        field: str,
-        min: float = -np.inf,
-        max: float = np.inf,
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagRange", locals())
-
-    @doc(saqc.funcs.outliers.flagCrossStatistics.__doc__)
-    def flagCrossStatistics(
-        self,
-        field: Sequence[str],
-        thresh: float,
-        method: Literal["modZscore", "Zscore"] = "modZscore",
-        flag: float = BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagCrossStatistics", locals())
diff --git a/saqc/core/modules/pattern.py b/saqc/core/modules/pattern.py
deleted file mode 100644
index 2c37f51d88b31b483711e43f0931b32d103104ab..0000000000000000000000000000000000000000
--- a/saqc/core/modules/pattern.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Pattern:
-    @doc(saqc.funcs.pattern.flagPatternByDTW.__doc__)
-    def flagPatternByDTW(
-        self,
-        field,
-        reference,
-        max_distance=0.0,
-        normalize=True,
-        plot=False,
-        flag=BAD,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("flagPatternByDTW", locals())
diff --git a/saqc/core/modules/resampling.py b/saqc/core/modules/resampling.py
deleted file mode 100644
index 1dfa0b780d1d4121bb68f88d0aa47faec739751d..0000000000000000000000000000000000000000
--- a/saqc/core/modules/resampling.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Optional
-
-import numpy as np
-import pandas as pd
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.funcs.interpolation import _SUPPORTED_METHODS
-from saqc.lib.docurator import doc
-
-
-class Resampling:
-    @doc(saqc.funcs.resampling.linear.__doc__)
-    def linear(
-        self,
-        field: str,
-        freq: str,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("linear", locals())
-
-    @doc(saqc.funcs.resampling.interpolate.__doc__)
-    def interpolate(
-        self,
-        field: str,
-        freq: str,
-        method: _SUPPORTED_METHODS,
-        order: int = 1,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("interpolate", locals())
-
-    @doc(saqc.funcs.resampling.shift.__doc__)
-    def shift(
-        self,
-        field: str,
-        freq: str,
-        method: Literal["fshift", "bshift", "nshift"] = "nshift",
-        freq_check: Optional[Literal["check", "auto"]] = None,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("shift", locals())
-
-    @doc(saqc.funcs.resampling.resample.__doc__)
-    def resample(
-        self,
-        field: str,
-        freq: str,
-        func: Callable[[pd.Series], pd.Series] = np.mean,
-        method: Literal["fagg", "bagg", "nagg"] = "bagg",
-        maxna: Optional[int] = None,
-        maxna_group: Optional[int] = None,
-        maxna_flags: Optional[int] = None,  # TODO: still a case ??
-        maxna_group_flags: Optional[int] = None,
-        flag_func: Callable[[pd.Series], float] = max,
-        freq_check: Optional[Literal["check", "auto"]] = None,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("resample", locals())
-
-    @doc(saqc.funcs.resampling.concatFlags.__doc__)
-    def concatFlags(
-        self,
-        field: str,
-        target: str,
-        method: Literal[
-            "inverse_fagg",
-            "inverse_bagg",
-            "inverse_nagg",
-            "inverse_fshift",
-            "inverse_bshift",
-            "inverse_nshift",
-            "inverse_interpolation",
-            "match",
-        ] = "match",
-        freq: Optional[str] = None,
-        drop: Optional[bool] = False,
-        squeeze: Optional[bool] = False,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("concatFlags", locals())
diff --git a/saqc/core/modules/residuals.py b/saqc/core/modules/residuals.py
deleted file mode 100644
index e764099d1c4f0f13c4ac12d836f0f5cc6ed2242c..0000000000000000000000000000000000000000
--- a/saqc/core/modules/residuals.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Optional, Union
-
-import numpy as np
-import pandas as pd
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Residuals:
-    @doc(saqc.funcs.residuals.calculatePolynomialResiduals.__doc__)
-    def calculatePolynomialResiduals(
-        self,
-        field: str,
-        window: Union[str, int],
-        order: int,
-        min_periods: Optional[int] = 0,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("calculatePolynomialResiduals", locals())
-
-    @doc(saqc.funcs.residuals.calculateRollingResiduals.__doc__)
-    def calculateRollingResiduals(
-        self,
-        field: str,
-        window: Union[str, int],
-        func: Callable[[pd.Series], np.ndarray] = np.mean,
-        min_periods: Optional[int] = 0,
-        center: bool = True,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("calculateRollingResiduals", locals())
diff --git a/saqc/core/modules/rolling.py b/saqc/core/modules/rolling.py
deleted file mode 100644
index 108453d4e04f760f07eb35a1486afb1a7cd0cd9b..0000000000000000000000000000000000000000
--- a/saqc/core/modules/rolling.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-
-from typing import Callable, Union
-
-import numpy as np
-import pandas as pd
-
-import saqc.funcs
-from saqc.constants import BAD
-from saqc.lib.docurator import doc
-
-
-class Rolling:
-    @doc(saqc.funcs.rolling.roll.__doc__)
-    def roll(
-        self,
-        field: str,
-        window: Union[str, int],
-        func: Callable[[pd.Series], np.ndarray] = np.mean,
-        min_periods: int = 0,
-        center: bool = True,
-        **kwargs
-    ):
-        return self._defer("roll", locals())
diff --git a/saqc/core/modules/scores.py b/saqc/core/modules/scores.py
deleted file mode 100644
index 9fd3466efb716b0c088051f99876b470a41018a8..0000000000000000000000000000000000000000
--- a/saqc/core/modules/scores.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Sequence, Union
-
-import numpy as np
-import pandas as pd
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.lib.docurator import doc
-
-
-class Scores:
-    @doc(saqc.funcs.scores.assignKNNScore.__doc__)
-    def assignKNNScore(
-        self,
-        field: Sequence[str],
-        target: str,
-        n: int = 10,
-        func: Callable[[pd.Series], float] = np.sum,
-        freq: Union[float, str] = np.inf,
-        min_periods: int = 2,
-        method: Literal["ball_tree", "kd_tree", "brute", "auto"] = "ball_tree",
-        metric: str = "minkowski",
-        p: int = 2,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("assignKNNScore", locals())
diff --git a/saqc/core/modules/tools.py b/saqc/core/modules/tools.py
deleted file mode 100644
index 3eeb930feac622b810c3e187ba6508f25bc8e2ca..0000000000000000000000000000000000000000
--- a/saqc/core/modules/tools.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Optional
-
-import numpy as np
-from typing_extensions import Literal
-
-import saqc
-import saqc.funcs
-from saqc.constants import FILTER_NONE
-from saqc.lib.docurator import doc
-
-
-class Tools:
-    @doc(saqc.funcs.tools.copyField.__doc__)
-    def copyField(
-        self, field: str, target: str, overwrite: bool = False, **kwargs
-    ) -> saqc.SaQC:
-        return self._defer("copyField", locals())
-
-    @doc(saqc.funcs.tools.dropField.__doc__)
-    def dropField(self, field: str, **kwargs) -> saqc.SaQC:
-        return self._defer("dropField", locals())
-
-    @doc(saqc.funcs.tools.renameField.__doc__)
-    def renameField(self, field: str, new_name: str, **kwargs) -> saqc.SaQC:
-        return self._defer("renameField", locals())
-
-    @doc(saqc.funcs.tools.selectTime.__doc__)
-    def selectTime(
-        self,
-        field: str,
-        mode: Literal["periodic", "selection_field"],
-        selection_field: Optional[str] = None,
-        start: Optional[str] = None,
-        end: Optional[str] = None,
-        closed: bool = True,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("selectTime", locals())
-
-    @doc(saqc.funcs.tools.plot.__doc__)
-    def plot(
-        self,
-        field: str,
-        path: Optional[str] = None,
-        max_gap: Optional[str] = None,
-        history: Optional[Literal["valid", "complete"] | list] = "valid",
-        xscope: Optional[slice] = None,
-        phaseplot: Optional[str] = None,
-        store_kwargs: Optional[dict] = None,
-        ax_kwargs: Optional[dict] = None,
-        dfilter: Optional[float] = FILTER_NONE,
-        **kwargs,
-    ) -> saqc.SaQC:
-
-        return self._defer("plot", locals())
diff --git a/saqc/core/modules/transformation.py b/saqc/core/modules/transformation.py
deleted file mode 100644
index 3952f89c423339d8aef3c184e1f375f4a07c6fde..0000000000000000000000000000000000000000
--- a/saqc/core/modules/transformation.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-from __future__ import annotations
-
-from typing import Callable, Optional, Union
-
-import pandas as pd
-
-import saqc
-import saqc.funcs
-from saqc.lib.docurator import doc
-
-
-class Transformation:
-    @doc(saqc.funcs.transformation.transform.__doc__)
-    def transform(
-        self,
-        field: str,
-        func: Callable[[pd.Series], pd.Series],
-        freq: Optional[Union[float, str]] = None,
-        **kwargs,
-    ) -> saqc.SaQC:
-        return self._defer("transform", locals())
diff --git a/saqc/core/register.py b/saqc/core/register.py
index 29ddcce9765156a3b34459a51449e6178f9f2184..148f37d8d3be7ed197745b62bfd1934ab944ab4b 100644
--- a/saqc/core/register.py
+++ b/saqc/core/register.py
@@ -9,15 +9,21 @@ from __future__ import annotations
 import functools
 import inspect
 import warnings
-from typing import Any, Callable, Dict, Sequence, Tuple
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Sequence, Tuple, TypeVar
 
 import numpy as np
 import pandas as pd
+from typing_extensions import ParamSpec
 
 import dios
-from saqc.constants import FILTER_ALL, UNFLAGGED
+from saqc.constants import FILTER_ALL, FILTER_NONE, UNFLAGGED
 from saqc.core.flags import Flags, History
+from saqc.core.translation.basescheme import TranslationScheme
 from saqc.lib.tools import squeezeSequence, toSequence
+from saqc.lib.types import ExternalFlag, OptionalNone
+
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
 # NOTE:
 # the global SaQC function store,
@@ -26,329 +32,237 @@ FUNC_MAP: Dict[str, Callable] = {}
 
 _is_list_like = pd.api.types.is_list_like
 
+T = TypeVar("T")
+P = ParamSpec("P")
 
-class FunctionWrapper:
-    def __init__(
-        self,
-        func: Callable,
-        mask: list,
-        demask: list,
-        squeeze: list,
-        multivariate: bool = False,
-        handles_target: bool = False,
-    ):
-        # todo:
-        #  - meta only is written with squeeze
-
-        self.func = func
-        self.func_name = func.__name__
-        self.func_signature = inspect.signature(func)
-
-        # ensure type and all elements exist in signature
-        self._checkDecoratorKeywords(mask, demask, squeeze)
-
-        self.decorator_mask = mask
-        self.decorator_demask = demask
-        self.decorator_squeeze = squeeze
-        self.multivariate = multivariate
-        self.handles_target = handles_target
-
-        # set in __call__
-        self.data = None
-        self.flags = None
-        self.fields = None
-        self.args = None
-        self.kwargs = None
-        self.mask_thresh = None
-        self.stored_data = None
-
-        # make ourself look like the wrapped function, especially the docstring
-        functools.update_wrapper(self, func)
-
-    def _checkDecoratorKeywords(self, mask, demask, squeeze):
-        params = self.func_signature.parameters.keys()
-        for dec_arg, name in zip(
-            [mask, demask, squeeze], ["mask", "demask", "squeeze"]
-        ):
-            typeerr = TypeError(
-                f"type of decorator argument '{name}' must "
-                f"be a list of strings, not {repr(type(dec_arg))}"
-            )
-            if not isinstance(dec_arg, list):
-                raise typeerr
-            for elem in dec_arg:
-                if not isinstance(elem, str):
-                    raise typeerr
-                if elem not in params:
-                    raise ValueError(
-                        f"passed value {repr(elem)} in {repr(name)} is not an "
-                        f"parameter in decorated function {repr(self.func_name)}"
-                    )
-
-    @staticmethod
-    def _argnamesToColumns(names: list, values: dict):
-        clist = []
-        for name in names:
-            value = values.get(name)  # eg. the value behind 'field'
-
-            # NOTE: do not change order of the tests
-            if value is None:
-                pass
-            elif isinstance(value, str):
-                clist.append(value)
-            # we ignore DataFrame, Series, DictOfSeries
-            # and high order types alike
-            elif hasattr(value, "columns"):
-                pass
-            elif _is_list_like(value) and all([isinstance(e, str) for e in value]):
-                clist += value
-        return pd.Index(clist)
-
-    @staticmethod
-    def _warn(missing, source):
-        if len(missing) == 0:
-            return
-        action = source + "ed"
-        obj = "flags" if source == "squeeze" else "data"
-        warnings.warn(
-            f"Column(s) {repr(missing)} cannot not be {action} "
-            f"because they are not present in {obj}. ",
-            RuntimeWarning,
-        )
 
-    def __call__(
-        self, data: dios.DictOfSeries, field: str, flags: Flags, *args, **kwargs
-    ) -> Tuple[dios.DictOfSeries, Flags]:
-        """
-        This wraps a call to a saqc function.
-
-        Before the saqc function call it copies flags and maybe mask data (inplace).
-        After the call it maybe squeezes modified histories and maybe reinsert the
-        masked data locations.
-
-        If the squeezing and/or the masking and/or the demasking will happen, depends
-        on the decorator keywords `handles` and `datamask`. See ``_determineActions``,
-        for that.
-        """
-        # keep this the original values
-        self.data = data
-        self.flags = flags
-        self.fields = toSequence(field)
-        self.args = args
-        self.kwargs = self._checkKwargs(kwargs)
-
-        self.mask_thresh = self._getMaskingThresh()
-
-        # skip (data, field, flags)
-        names = list(self.func_signature.parameters.keys())[3 : 3 + len(args)]
-        all_args = {"field": field, **dict(zip(names, args)), **kwargs}
-
-        # find columns that need masking
-        columns = self._argnamesToColumns(self.decorator_mask, all_args)
-        self._warn(columns.difference(self.data.columns).to_list(), source="mask")
-        columns = columns.intersection(self.data.columns)
-
-        masked, stored = self._maskData(
-            data=self.data,
-            flags=self.flags,
-            columns=columns,
-            thresh=self.mask_thresh,
+def _checkDecoratorKeywords(
+    func_signature, func_name, mask, demask, squeeze, handles_target
+):
+    params = func_signature.parameters.keys()
+    if "target" in params and not handles_target:
+        raise TypeError(
+            "functions defining a parameter named 'target' "
+            "need to decorated with 'handles_target=True'"
+        )
+    for dec_arg, name in zip([mask, demask, squeeze], ["mask", "demask", "squeeze"]):
+        typeerr = TypeError(
+            f"type of decorator argument '{name}' must "
+            f"be a list of strings, not {repr(type(dec_arg))}"
         )
-        self.data = masked
-        self.stored_data = stored
-
-        args, kwargs = self._prepareArgs()
-        data, flags = self.func(*args, **kwargs)
-
-        # find columns that need squeezing
-        columns = self._argnamesToColumns(self.decorator_squeeze, all_args)
-        self._warn(columns.difference(flags.columns).to_list(), source="squeeze")
-        columns = columns.intersection(flags.columns)
-
-        # if the function did not want to set any flags at all,
-        # we assume a processing function that altered the flags
-        # in an unpredictable manner or do nothing with the flags.
-        # in either case we take the returned flags as the new truth.
-        if columns.empty:
-            result_flags = flags
-        else:
-            # even if this looks like a noop for columns=[],
-            # it returns the old instead the new flags and
-            # therefore ignores any possible processing changes
-            result_flags = self._squeezeFlags(flags, columns)
-
-        # find columns that need demasking
-        columns = self._argnamesToColumns(self.decorator_demask, all_args)
-        self._warn(columns.difference(data.columns).to_list(), source="demask")
-        columns = columns.intersection(data.columns)
-
-        result_data = self._unmaskData(data, self.stored_data, columns=columns)
-
-        return result_data, result_flags
-
-    @staticmethod
-    def _checkKwargs(kwargs: dict) -> dict[str, Any]:
-        if "dfilter" in kwargs and not isinstance(
-            kwargs["dfilter"], (bool, float, int)
-        ):
-            raise TypeError(f"'dfilter' must be of type bool or float")
-        return kwargs
-
-    def _prepareArgs(self) -> Tuple[tuple, dict[str, Any]]:
-        """
-        Prepare the args and kwargs passed to the function
-        Returns
-        -------
-        args: tuple
-            arguments to be passed to the actual call
-        kwargs: dict
-            keyword-arguments to be passed to the actual call
-        """
-        kwargs = self.kwargs.copy()
-        kwargs["dfilter"] = self.mask_thresh
-
-        # always pass a list to multivariate functions and
-        # unpack single element lists for univariate functions
-        if self.multivariate:
-            field = self.fields
-        else:
-            field = squeezeSequence(self.fields)
-
-        args = self.data, field, self.flags.copy(), *self.args
-        return args, kwargs
-
-    def _getMaskingThresh(self) -> float:
-        """
-        Generate a float threshold by the value of the `dfilter` keyword
-
-        Returns
-        -------
-        threshold: float
-            All data gets masked, if the flags are equal or worse than the threshold.
-
-        Notes
-        -----
-        If ``dfilter`` is **not** in the kwargs, the threshold defaults to `FILTER_ALL`.
-        For any floatish value, it is taken as the threshold.
-        """
-        if "dfilter" not in self.kwargs:
-            return FILTER_ALL
-        return float(self.kwargs["dfilter"])  # handle int
-
-    def _createMeta(self) -> dict:
-        return {
-            "func": self.func_name,
-            "args": self.args,
-            "kwargs": self.kwargs,
-        }
-
-    def _squeezeFlags(self, flags: Flags, columns: pd.Index) -> Flags:
-        """
-        Generate flags from the temporary result-flags and the original flags.
-
-        Parameters
-        ----------
-        flags : Flags
-            The flags-frame, which is the result from a saqc-function
-
-        Returns
-        -------
-        Flags
-        """
-        out = self.flags.copy()  # the old flags
-        meta = self._createMeta()
-        for col in columns:
-
-            # todo: shouldn't we fail or warn here or even have a explicit test upstream
-            #  because the function should ensure consistence, especially because
-            #  a empty history maybe issnt what is expected, but this happens silently
-            if col not in out:  # ensure existence
-                out.history[col] = History(index=flags.history[col].index)
-
-            old_history = out.history[col]
-            new_history = flags.history[col]
-
-            # We only want to add new columns, that were appended during the last
-            # function call. If no such columns exist, we end up with an empty
-            # new_history.
-            start = len(old_history.columns)
-            new_history = self._sliceHistory(new_history, slice(start, None))
-
-            squeezed = new_history.squeeze(raw=True)
-            out.history[col] = out.history[col].append(squeezed, meta=meta)
-
-        return out
-
-    @staticmethod
-    def _sliceHistory(history: History, sl: slice) -> History:
-        history.hist = history.hist.iloc[:, sl]
-        history.meta = history.meta[sl]
-        return history
-
-    @staticmethod
-    def _maskData(
-        data: dios.DictOfSeries, flags: Flags, columns: Sequence[str], thresh: float
-    ) -> Tuple[dios.DictOfSeries, dios.DictOfSeries]:
-        """
-        Mask data with Nans, if the flags are worse than a threshold.
-            - mask only passed `columns` (preselected by `datamask`-kw from decorator)
-
-        Returns
-        -------
-        masked : dios.DictOfSeries
-            masked data, same dim as original
-        mask : dios.DictOfSeries
-            dios holding iloc-data-pairs for every column in `data`
-        """
-        mask = dios.DictOfSeries(columns=columns)
-
-        # we use numpy here because it is faster
-        for c in columns:
-            col_mask = _isflagged(flags[c].to_numpy(), thresh)
-
-            if col_mask.any():
-                col_data = data[c].to_numpy(dtype=np.float64)
-
-                mask[c] = pd.Series(col_data[col_mask], index=np.where(col_mask)[0])
-
-                col_data[col_mask] = np.nan
-                data[c] = col_data
-
-        return data, mask
-
-    @staticmethod
-    def _unmaskData(
-        data: dios.DictOfSeries, mask: dios.DictOfSeries, columns: pd.Index = None
-    ) -> dios.DictOfSeries:
-        """
-        Restore the masked data.
-
-        Notes
-        -----
-        - Even if this returns data, it works inplace !
-        - `mask` is not a boolean mask, instead it holds the original values.
-          The index of mask is numeric and represent the integer location
-          in the original data.
-        """
-        if columns is None:
-            columns = data.columns  # field was in old, is in mask and is in new
-        columns = mask.columns.intersection(columns)
-
-        for c in columns:
-
-            # ignore
-            if data[c].empty or mask[c].empty:
-                continue
-
-            # get the positions of values to unmask
-            candidates = mask[c]
-            # if the mask was removed during the function call, don't replace
-            unmask = candidates[data[c].iloc[candidates.index].isna().to_numpy()]
-            if unmask.empty:
-                continue
-            data[c].iloc[unmask.index] = unmask
-
-        return data
+        if not isinstance(dec_arg, list):
+            raise typeerr
+        for elem in dec_arg:
+            if not isinstance(elem, str):
+                raise typeerr
+            if elem not in params:
+                raise ValueError(
+                    f"passed value {repr(elem)} in {repr(name)} is not an "
+                    f"parameter in decorated function {repr(func_name)}"
+                )
+
+
+def _argnamesToColumns(names: list, values: dict):
+    clist = []
+    for name in names:
+        value = values.get(name)  # eg. the value behind 'field'
+
+        # NOTE: do not change order of the tests
+        if value is None:
+            pass
+        elif isinstance(value, str):
+            clist.append(value)
+        # we ignore DataFrame, Series, DictOfSeries
+        # and high order types alike
+        elif hasattr(value, "columns"):
+            pass
+        elif _is_list_like(value) and all([isinstance(e, str) for e in value]):
+            clist += value
+    return pd.Index(clist)
+
+
+def _warn(missing, source):
+    if len(missing) == 0:
+        return
+    action = source + "ed"
+    obj = "flags" if source == "squeeze" else "data"
+    warnings.warn(
+        f"Column(s) {repr(missing)} cannot not be {action} "
+        f"because they are not present in {obj}. ",
+        RuntimeWarning,
+    )
+
+
+def _getDfilter(
+    func_signature: inspect.Signature,
+    translation_scheme: TranslationScheme,
+    kwargs: Dict[str, Any],
+) -> float:
+    """
+    Find a default value for dfilter, either from the choosen translation scheme
+    or a possibly defined method default value. Translate, if necessary.
+    """
+    dfilter = kwargs.get("dfilter")
+    if dfilter is None or isinstance(dfilter, OptionalNone):
+        # let's see, if the function has an default value
+        default = func_signature.parameters.get("dfilter")
+        if default:
+            default = default.default
+        if default == inspect.Signature.empty:
+            # function did not define a positional dfilter argument
+            default = None
+        dfilter = translation_scheme.DFILTER_DEFAULT or default
+    else:
+        # try to translate dfilter
+        if dfilter not in {FILTER_ALL, FILTER_NONE, translation_scheme.DFILTER_DEFAULT}:
+            dfilter = translation_scheme(dfilter)
+    return float(dfilter)
+
+
+def _squeezeFlags(old_flags, new_flags: Flags, columns: pd.Index, meta) -> Flags:
+    """
+    Generate flags from the temporary result-flags and the original flags.
+
+    Parameters
+    ----------
+    flags : Flags
+        The flags-frame, which is the result from a saqc-function
+
+    Returns
+    -------
+    Flags
+    """
+    out = old_flags.copy()  # the old flags
+
+    for col in columns.union(
+        new_flags.columns.difference(old_flags.columns)
+    ):  # account for newly added columns
+
+        if col not in out:  # ensure existence
+            out.history[col] = History(index=new_flags.history[col].index)
+
+        old_history = out.history[col]
+        new_history = new_flags.history[col]
+
+        # We only want to add new columns, that were appended during the last
+        # function call. If no such columns exist, we end up with an empty
+        # new_history.
+        start = len(old_history.columns)
+        new_history = _sliceHistory(new_history, slice(start, None))
+
+        squeezed = new_history.squeeze(raw=True)
+        out.history[col] = out.history[col].append(squeezed, meta=meta)
+
+    return out
+
+
+def _sliceHistory(history: History, sl: slice) -> History:
+    history.hist = history.hist.iloc[:, sl]
+    history.meta = history.meta[sl]
+    return history
+
+
+def _maskData(
+    data: dios.DictOfSeries, flags: Flags, columns: Sequence[str], thresh: float
+) -> Tuple[dios.DictOfSeries, dios.DictOfSeries]:
+    """
+    Mask data with Nans, if the flags are worse than a threshold.
+        - mask only passed `columns` (preselected by `datamask`-kw from decorator)
+
+    Returns
+    -------
+    masked : dios.DictOfSeries
+        masked data, same dim as original
+    mask : dios.DictOfSeries
+        dios holding iloc-data-pairs for every column in `data`
+    """
+    mask = dios.DictOfSeries(columns=columns)
+
+    # we use numpy here because it is faster
+    for c in columns:
+        col_mask = _isflagged(flags[c], thresh)
+
+        if col_mask.any():
+            col_data = data[c].to_numpy(dtype=np.float64)
+
+            mask[c] = pd.Series(col_data[col_mask], index=np.where(col_mask)[0])
+
+            col_data[col_mask] = np.nan
+            data[c] = col_data
+
+    return data, mask
+
+
+def _unmaskData(
+    data: dios.DictOfSeries, mask: dios.DictOfSeries, columns: pd.Index | None = None
+) -> dios.DictOfSeries:
+    """
+    Restore the masked data.
+
+    Notes
+    -----
+    - Even if this returns data, it works inplace !
+    - `mask` is not a boolean mask, instead it holds the original values.
+      The index of mask is numeric and represent the integer location
+      in the original data.
+    """
+    if columns is None:
+        columns = data.columns  # field was in old, is in mask and is in new
+    columns = mask.columns.intersection(columns)
+
+    for c in columns:
+
+        # ignore
+        if data[c].empty or mask[c].empty:
+            continue
+
+        # get the positions of values to unmask
+        candidates = mask[c]
+        # if the mask was removed during the function call, don't replace
+        unmask = candidates[data[c].iloc[candidates.index].isna().to_numpy()]
+        if unmask.empty:
+            continue
+        data[c].iloc[unmask.index] = unmask
+
+    return data
+
+
+def _expandField(regex, columns, field) -> List[str]:
+    """
+    Expand regular expressions to concrete field names.
+    """
+    if regex:
+        fmask = columns.str.match(field)
+        return columns[fmask].tolist()
+    return toSequence(field)
+
+
+def _homogenizeFieldsTargets(
+    multivariate,
+    handles_target,
+    fields,
+    targets,
+):
+    """
+    Ensure, that fields and flags are of identical length.
+
+    Note
+    ----
+    We have four cases to consider:
+    1. multivariate=False & handles_target=False
+    2. multivariate=False & handles_target=True
+    3. multivariate=True  & handles_target=False
+    4. multivariate=True  & handles_target=True
+    """
+
+    if not (multivariate and handles_target):
+        if len(fields) != len(targets):
+            raise ValueError("expected the same number of 'field' and 'target' values")
+
+    if multivariate:
+        fields, targets = [fields], [targets]
+
+    return fields, targets
 
 
 def register(
@@ -409,14 +323,112 @@ def register(
         itself. Mandatory for multivariate functions.
     """
 
-    def inner(func):
-        wrapper = FunctionWrapper(
-            func, mask, demask, squeeze, multivariate, handles_target
+    def outer(func: Callable[P, SaQC]) -> Callable[P, SaQC]:
+
+        func_signature = inspect.signature(func)
+        _checkDecoratorKeywords(
+            func_signature, func.__name__, mask, demask, squeeze, handles_target
         )
-        FUNC_MAP[wrapper.func_name] = wrapper
-        return wrapper
 
-    return inner
+        @functools.wraps(func)
+        def inner(
+            saqc,
+            field,
+            *args,
+            regex: bool = False,
+            flag: ExternalFlag | OptionalNone = OptionalNone(),
+            **kwargs,
+        ) -> "SaQC":
+
+            # args -> kwargs
+            paramnames = tuple(func_signature.parameters.keys())[
+                2:
+            ]  # skip (self, field)
+            kwargs = {**dict(zip(paramnames, args)), **kwargs}
+            kwargs["dfilter"] = _getDfilter(func_signature, saqc._scheme, kwargs)
+
+            # translate flag
+            if not isinstance(flag, OptionalNone):
+                # translation schemes might want to use a flag
+                # `None` so we introduce a special class here
+                kwargs["flag"] = saqc._scheme(flag)
+
+            fields = _expandField(regex, saqc._data.columns, field)
+            targets = toSequence(kwargs.pop("target", fields))
+
+            fields, targets = _homogenizeFieldsTargets(
+                multivariate, handles_target, fields, targets
+            )
+
+            out = saqc.copy(deep=True)
+
+            # initialize target fields
+            if not handles_target:
+                # initialize all target variables
+                for src, trg in zip(fields, targets):
+                    if src != trg:
+                        out = out.copyField(field=src, target=trg)
+
+            for src, trg in zip(fields, targets):
+                kwargs = {**kwargs, "field": src, "target": trg}
+                if not handles_target:
+                    kwargs["field"] = kwargs.pop("target")
+
+                # find columns that need masking
+                # func_signature = func_signature.bind(field=field)
+                columns = _argnamesToColumns(mask, kwargs)
+                _warn(columns.difference(out._data.columns).to_list(), source="mask")
+                columns = columns.intersection(out._data.columns)
+
+                out._data, stored_data = _maskData(
+                    data=out._data,
+                    flags=out._flags,
+                    columns=columns,
+                    thresh=kwargs["dfilter"],
+                )
+
+                # always pass a list to multivariate functions and
+                # unpack single element lists for univariate functions
+                if not multivariate:
+                    kwargs["field"] = squeezeSequence(kwargs["field"])
+
+                old_flags = out._flags.copy()
+
+                out = func(out, **kwargs)
+
+                # find columns that need squeezing
+                columns = _argnamesToColumns(squeeze, kwargs)
+                _warn(
+                    columns.difference(out._flags.columns).to_list(), source="squeeze"
+                )
+                columns = columns.intersection(out._flags.columns)
+
+                # if the function did not want to set any flags at all,
+                # we assume a processing function that altered the flags
+                # in an unpredictable manner or do nothing with the flags.
+                # in either case we take the returned flags as the new truth.
+                if not columns.empty:
+                    meta = {
+                        "func": func.__name__,
+                        "args": args,
+                        "kwargs": kwargs,
+                    }
+                    out._flags = _squeezeFlags(old_flags, out._flags, columns, meta)
+
+                # find columns that need demasking
+                columns = _argnamesToColumns(demask, kwargs)
+                _warn(columns.difference(out._data.columns).to_list(), source="demask")
+                columns = columns.intersection(out._data.columns)
+
+                out._data = _unmaskData(out._data, stored_data, columns=columns)
+                out._validate(reason=f"call to {repr(func.__name__)}")
+
+            return out
+
+        FUNC_MAP[func.__name__] = inner
+        return inner
+
+    return outer
 
 
 def flagging(**kwargs):
@@ -465,9 +477,10 @@ def processing(**kwargs):
     return register(mask=[], demask=[], squeeze=[])
 
 
-def _isflagged(
-    flagscol: np.ndarray | pd.Series, thresh: float
-) -> np.ndarray | pd.Series:
+A = TypeVar("A", np.ndarray, pd.Series)
+
+
+def _isflagged(flagscol: A, thresh: float) -> A:
     """
     Return a mask of flags accordingly to `thresh`. Return type is same as flags.
     """
diff --git a/saqc/core/translation/basescheme.py b/saqc/core/translation/basescheme.py
index c9889cd2043c351744ae7c6c0ff47995135cbc3f..1b42a8effeb2aa52e3706ef4b6e02add7deb1282 100644
--- a/saqc/core/translation/basescheme.py
+++ b/saqc/core/translation/basescheme.py
@@ -8,7 +8,7 @@
 
 from __future__ import annotations
 
-from typing import Any, Dict, MutableMapping, Union
+from typing import Any, Dict
 
 import numpy as np
 import pandas as pd
@@ -41,8 +41,11 @@ class TranslationScheme:
     - The scheme must be well definied, i.e. we need a backward translation for
       every forward translation (each value in `self._forward` needs a key in
       `self._backward`).
-    - We need translations for the special flags `saqc.constants.UNFLAGGED` and
-      `saqc.constants.BAD`. That implies, that every valid translation scheme
+    - We need translations for the special flags:
+      * `saqc.constants.UNFLAGGED`
+      * `saqc.constants.BAD`
+
+    . That implies, that every valid translation scheme
       provides at least one user flag that maps to `BAD` and one that maps to
       `UNFLAGGED`.
     """
diff --git a/saqc/funcs/__init__.py b/saqc/funcs/__init__.py
index 5aa3335ada54797b09eab01d0e7a18b1f22447f5..f2f9253242df6f06fcb516c0728084d92e54f714 100644
--- a/saqc/funcs/__init__.py
+++ b/saqc/funcs/__init__.py
@@ -6,22 +6,42 @@
 
 # -*- coding: utf-8 -*-
 
-# imports needed to make the functions register themself
-from saqc.core.register import register
-from saqc.funcs.breaks import *
-from saqc.funcs.changepoints import *
-from saqc.funcs.constants import *
-from saqc.funcs.curvefit import *
-from saqc.funcs.drift import *
-from saqc.funcs.flagtools import *
-from saqc.funcs.generic import *
-from saqc.funcs.interpolation import *
-from saqc.funcs.noise import *
-from saqc.funcs.outliers import *
-from saqc.funcs.pattern import *
-from saqc.funcs.resampling import *
-from saqc.funcs.residuals import *
-from saqc.funcs.rolling import *
-from saqc.funcs.scores import *
-from saqc.funcs.tools import *
-from saqc.funcs.transformation import *
+from saqc.funcs.breaks import BreaksMixin
+from saqc.funcs.changepoints import ChangepointsMixin
+from saqc.funcs.constants import ConstantsMixin
+from saqc.funcs.curvefit import CurvefitMixin
+from saqc.funcs.drift import DriftMixin
+from saqc.funcs.flagtools import FlagtoolsMixin
+from saqc.funcs.generic import GenericMixin
+from saqc.funcs.interpolation import InterpolationMixin
+from saqc.funcs.noise import NoiseMixin
+from saqc.funcs.outliers import OutliersMixin
+from saqc.funcs.pattern import PatternMixin
+from saqc.funcs.resampling import ResamplingMixin
+from saqc.funcs.residuals import ResidualsMixin
+from saqc.funcs.rolling import RollingMixin
+from saqc.funcs.scores import ScoresMixin
+from saqc.funcs.tools import ToolsMixin
+from saqc.funcs.transformation import TransformationMixin
+
+
+class FunctionsMixin(
+    BreaksMixin,
+    ChangepointsMixin,
+    ConstantsMixin,
+    CurvefitMixin,
+    DriftMixin,
+    FlagtoolsMixin,
+    GenericMixin,
+    InterpolationMixin,
+    NoiseMixin,
+    OutliersMixin,
+    PatternMixin,
+    ResamplingMixin,
+    ResidualsMixin,
+    RollingMixin,
+    ScoresMixin,
+    ToolsMixin,
+    TransformationMixin,
+):
+    pass
diff --git a/saqc/funcs/breaks.py b/saqc/funcs/breaks.py
index 754d77231e3332d6401da242c23673dea0360f00..0b700d6ae5a3c80c38e5987468fe8642d91ce195 100644
--- a/saqc/funcs/breaks.py
+++ b/saqc/funcs/breaks.py
@@ -17,205 +17,213 @@ isolated values (:py:func:`flagIsolated`).
 
 from __future__ import annotations
 
-from typing import Tuple
+from typing import TYPE_CHECKING
 
 import numpy as np
 import pandas as pd
 
-from dios import DictOfSeries
 from saqc.constants import BAD, FILTER_ALL
-from saqc.core.flags import Flags
 from saqc.core.register import _isflagged, flagging, register
 from saqc.funcs.changepoints import _assignChangePointCluster
-from saqc.lib.tools import groupConsecutives
-
-
-@register(mask=[], demask=[], squeeze=["field"])
-def flagMissing(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    flag: float = BAD,
-    dfilter: float = FILTER_ALL,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag NaNs in data.
-
-    By default only NaNs are flagged, that not already have a flag.
-    `to_mask` can be used to pass a flag that is used as threshold.
-    Each flag worse than the threshold is replaced by the function.
-    This is, because the data gets masked (with NaNs) before the
-    function evaluates the NaNs.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Column(s) in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    flag : float, default BAD
-        Flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Unmodified data container
-    flags : saqc.Flags
-        The flags container
-    """
-
-    datacol = data[field]
-    mask = datacol.isna()
-
-    mask = ~_isflagged(flags[field], dfilter) & mask
-
-    flags[mask, field] = flag
-    return data, flags
-
-
-@flagging()
-def flagIsolated(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    gap_window: str,
-    group_window: str,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Find and flag temporal isolated groups of data.
-
-    The function flags arbitrarily large groups of values, if they are surrounded by
-    sufficiently large data gaps. A gap is a timespan containing either no data at all
-    or NaNs only.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Column(s) in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    gap_window : str
-        Minimum gap size required before and after a data group to consider it
-        isolated. See condition (2) and (3)
-
-    group_window : str
-        Maximum size of a data chunk to consider it a candidate for an isolated group.
-        Data chunks that are bigger than the ``group_window`` are ignored.
-        This does not include the possible gaps surrounding it.
-        See condition (1).
-
-    flag : float, default BAD
-        Flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Unmodified data container
-    flags : saqc.Flags
-        The flags container
-
-    Notes
-    -----
-    A series of values :math:`x_k,x_{k+1},...,x_{k+n}`, with associated
-    timestamps :math:`t_k,t_{k+1},...,t_{k+n}`, is considered to be isolated, if:
-
-    1. :math:`t_{k+1} - t_n <` `group_window`
-    2. None of the :math:`x_j` with :math:`0 < t_k - t_j <` `gap_window`,
-        is valid (preceeding gap).
-    3. None of the :math:`x_j` with :math:`0 < t_j - t_(k+n) <` `gap_window`,
-        is valid (succeding gap).
-    """
-    gap_window = pd.tseries.frequencies.to_offset(gap_window)
-    group_window = pd.tseries.frequencies.to_offset(group_window)
-
-    mask = data[field].isna()
-
-    bools = pd.Series(data=0, index=mask.index, dtype=bool)
-    for srs in groupConsecutives(mask):
-        if np.all(~srs):
-            # we found a chunk of non-nan values
-            start = srs.index[0]
-            stop = srs.index[-1]
-            if stop - start <= group_window:
-                # the chunk is large enough
-                left = mask[start - gap_window : start].iloc[:-1]
-                if left.all():
-                    # the section before our chunk is nan-only
-                    right = mask[stop : stop + gap_window].iloc[1:]
-                    if right.all():
-                        # the section after our chunk is nan-only
-                        # -> we found a chunk of isolated non-values
-                        bools[start:stop] = True
-
-    flags[bools, field] = flag
-    return data, flags
-
-
-@flagging()
-def flagJumps(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    thresh: float,
-    window: str,
-    min_periods: int = 1,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag jumps and drops in data.
-
-    Flag data where the mean of its values significantly changes (the data "jumps").
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Column(s) in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    thresh : float
-        Threshold value by which the mean of data has to change to trigger flagging.
-
-    window : str
-        Size of the moving window. This is the number of observations used
-        for calculating the statistic.
-
-    min_periods : int, default 1
-        Minimum number of observations in window required to calculate a valid
-        mean value.
-
-    flag : float, default BAD
-        Flag to set.
-    """
-    return _assignChangePointCluster(
-        data,
-        field,
-        flags,
-        stat_func=lambda x, y: np.abs(np.mean(x) - np.mean(y)),
-        thresh_func=lambda x, y: thresh,
-        window=window,
-        min_periods=min_periods,
-        set_flags=True,
-        model_by_resids=False,
-        assign_cluster=False,
-        flag=flag,
+
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+class BreaksMixin:
+    @register(mask=[], demask=[], squeeze=["field"])
+    def flagMissing(
+        self: "SaQC",
+        field: str,
+        flag: float = BAD,
+        dfilter: float = FILTER_ALL,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag NaNs in data.
+
+        By default only NaNs are flagged, that not already have a flag.
+        `dfilter` can be used to pass a flag that is used as threshold.
+        Each flag worse than the threshold is replaced by the function.
+        This is, because the data gets masked (with NaNs) before the
+        function evaluates the NaNs.
+
+        Parameters
+        ----------
+        field : str
+            Column(s) in flags and data.
+
+        flag : float, default BAD
+            Flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+
+        datacol = self._data[field]
+        mask = datacol.isna()
+
+        mask = ~_isflagged(self._flags[field], dfilter) & mask
+
+        self._flags[mask, field] = flag
+        return self
+
+    @flagging()
+    def flagIsolated(
+        self: "SaQC",
+        field: str,
+        gap_window: str,
+        group_window: str,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Find and flag temporal isolated groups of data.
+
+        The function flags arbitrarily large groups of values, if they are surrounded by
+        sufficiently large data gaps. A gap is a timespan containing either no data at all
+        or NaNs only.
+
+        Parameters
+        ----------
+        field : str
+            Column(s) in flags and data.
+
+        gap_window : str
+            Minimum gap size required before and after a data group to consider it
+            isolated. See condition (2) and (3)
+
+        group_window : str
+            Maximum size of a data chunk to consider it a candidate for an isolated group.
+            Data chunks that are bigger than the ``group_window`` are ignored.
+            This does not include the possible gaps surrounding it.
+            See condition (1).
+
+        flag : float, default BAD
+            Flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        A series of values :math:`x_k,x_{k+1},...,x_{k+n}`, with associated
+        timestamps :math:`t_k,t_{k+1},...,t_{k+n}`, is considered to be isolated, if:
+
+        1. :math:`t_{k+1} - t_n <` `group_window`
+        2. None of the :math:`x_j` with :math:`0 < t_k - t_j <` `gap_window`,
+            is valid (preceeding gap).
+        3. None of the :math:`x_j` with :math:`0 < t_j - t_(k+n) <` `gap_window`,
+            is valid (succeding gap).
+        """
+
+        dat = self._data[field].dropna()
+        if dat.empty:
+            return self
+
+        gap_ends = dat.rolling(gap_window).count() == 1
+        gap_ends[0] = False
+        gap_ends = gap_ends[gap_ends]
+        gap_starts = dat[::-1].rolling(gap_window).count()[::-1] == 1
+        gap_starts[-1] = False
+        gap_starts = gap_starts[gap_starts]
+        if gap_starts.empty:
+            return self
+
+        gap_starts = gap_starts[1:]
+        gap_ends = gap_ends[:-1]
+        isolated_groups = gap_starts.index - gap_ends.index < group_window
+        gap_starts = gap_starts[isolated_groups]
+        gap_ends = gap_ends[isolated_groups]
+        to_flag = pd.Series(False, index=dat.index)
+        for s, e in zip(gap_starts.index, gap_ends.index):
+            # what gets flagged are the groups between the gaps, those range from
+            # the end of one gap (gap_end) to the beginning of the next (gap_start)
+            to_flag[e:s] = True
+
+        to_flag = to_flag.reindex(self._data[field].index, fill_value=False)
+        self._flags[to_flag.to_numpy(), field] = flag
+        return self
+
+    @flagging()
+    def flagJumps(
+        self: "SaQC",
+        field: str,
+        thresh: float,
+        window: str,
+        min_periods: int = 1,
+        flag: float = BAD,
         **kwargs,
-    )
+    ) -> "SaQC":
+        """
+        Flag jumps and drops in data.
+
+        Flag data where the mean of its values significantly changes (, where the data "jumps" from one value level to
+        another).
+        The changes in value level are detected by comparing the mean for two adjacently rolling windows.
+        Whenever the difference between the mean in the two windows exceeds `thresh`, the value between the windows
+        is flagged a jump.
+
+
+        Parameters
+        ----------
+        field : str
+            Column(s) in flags and data.
+
+        thresh : float
+            Threshold value by which the mean of data has to jump, to trigger flagging.
+
+        window : str
+            Size of the two moving windows. This determines the number of observations used
+            for calculating the mean in every window.
+            The window size should be big enough to yield enough samples for a reliable mean calculation,
+            but it should also not be arbitrarily big, since it also limits the density of jumps that can be detected.
+            More precisely: Jumps that are not distanced to each other by more than three fourth (3/4) of the
+            selected window size, will not be detected reliably.
+
+        min_periods : int, default 1
+            The minimum number of observations in window required to calculate a valid
+            mean value.
+
+        flag : float, default BAD
+            Flag to set.
+
+        Examples
+        --------
+
+        Below picture gives an abstract interpretation of the parameter interplay in case of a positive value jump,
+        initialising a new mean level.
+
+        .. figure:: /resources/images/flagJumpsPic.png
+
+           The two adjacent windows of size `window` roll through the whole data series. Whenever the mean values in
+           the two windows differ by more than `thresh`, flagging is triggered.
+
+        Notes
+        -----
+
+        Jumps that are not distanced to each other by more than three fourth (3/4) of the
+        selected window size, will not be detected reliably.
+
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self._data, self._flags = _assignChangePointCluster(
+            self._data,
+            field,
+            self._flags,
+            stat_func=lambda x, y: np.abs(np.mean(x) - np.mean(y)),
+            thresh_func=lambda x, y: thresh,
+            window=window,
+            min_periods=min_periods,
+            set_flags=True,
+            model_by_resids=False,
+            assign_cluster=False,
+            flag=flag,
+            **kwargs,
+        )
+        return self
diff --git a/saqc/funcs/changepoints.py b/saqc/funcs/changepoints.py
index 08af9cc0b399d16b95e8f2858649a7e1bf543613..c4d6db0955e555b5728b4c531487cf56f10125c7 100644
--- a/saqc/funcs/changepoints.py
+++ b/saqc/funcs/changepoints.py
@@ -7,7 +7,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Callable, Tuple
+from typing import TYPE_CHECKING, Callable, Tuple
 
 import numba
 import numpy as np
@@ -20,235 +20,211 @@ from saqc.core.flags import Flags
 from saqc.core.register import flagging, register
 from saqc.lib.tools import customRoller, filterKwargs
 
-
-@flagging()
-def flagChangePoints(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    stat_func: Callable[[np.ndarray, np.ndarray], float],
-    thresh_func: Callable[[np.ndarray, np.ndarray], float],
-    window: str | Tuple[str, str],
-    min_periods: int | Tuple[int, int],
-    closed: Literal["right", "left", "both", "neither"] = "both",
-    reduce_window: str = None,
-    reduce_func: Callable[[np.ndarray, np.ndarray], int] = lambda x, _: x.argmax(),
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag data where it significantly changes.
-
-    Flag data points, where the parametrization of the process, the data is assumed to
-    generate by, significantly changes.
-
-    The change points detection is based on a sliding window search.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        A column in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    stat_func : Callable
-         A function that assigns a value to every twin window. The backward-facing
-         window content will be passed as the first array, the forward-facing window
-         content as the second.
-
-    thresh_func : Callable
-        A function that determines the value level, exceeding wich qualifies a
-        timestamps func value as denoting a change-point.
-
-    window : str, tuple of str
-        Size of the moving windows. This is the number of observations used for
-        calculating the statistic.
-
-        If it is a single frequency offset, it applies for the backward- and the
-        forward-facing window.
-
-        If two offsets (as a tuple) is passed the first defines the size of the
-        backward facing window, the second the size of the forward facing window.
-
-    min_periods : int or tuple of int
-        Minimum number of observations in a window required to perform the changepoint
-        test. If it is a tuple of two int, the first refer to the backward-,
-        the second to the forward-facing window.
-
-    closed : {'right', 'left', 'both', 'neither'}, default 'both'
-        Determines the closure of the sliding windows.
-
-    reduce_window : str or None, default None
-        The sliding window search method is not an exact CP search method and usually
-        there wont be detected a single changepoint, but a "region" of change around
-        a changepoint.
-
-        If `reduce_window` is given, for every window of size `reduce_window`, there
-        will be selected the value with index `reduce_func(x, y)` and the others will
-        be dropped.
-
-        If `reduce_window` is None, the reduction window size equals the twin window
-        size, the changepoints have been detected with.
-
-    reduce_func : Callable, default ``lambda x, y: x.argmax()``
-        A function that must return an index value upon input of two arrays x and y.
-        First input parameter will hold the result from the stat_func evaluation for
-        every reduction window. Second input parameter holds the result from the
-        `thresh_func` evaluation.
-        The default reduction function just selects the value that maximizes the
-        `stat_func`.
-
-    flag : float, default BAD
-        flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Unmodified data container
-    flags : saqc.Flags
-        The flags container
-    """
-    return _assignChangePointCluster(
-        data,
-        field,
-        flags,
-        stat_func=stat_func,
-        thresh_func=thresh_func,
-        window=window,
-        min_periods=min_periods,
-        closed=closed,
-        reduce_window=reduce_window,
-        reduce_func=reduce_func,
-        set_flags=True,
-        model_by_resids=False,
-        assign_cluster=False,
-        flag=flag,
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+class ChangepointsMixin:
+    @flagging()
+    def flagChangePoints(
+        self: "SaQC",
+        field: str,
+        stat_func: Callable[[np.ndarray, np.ndarray], float],
+        thresh_func: Callable[[np.ndarray, np.ndarray], float],
+        window: str | Tuple[str, str],
+        min_periods: int | Tuple[int, int],
+        reduce_window: str | None = None,
+        reduce_func: Callable[[np.ndarray, np.ndarray], int] = lambda x, _: x.argmax(),
+        flag: float = BAD,
         **kwargs,
-    )
-
-
-@register(mask=["field"], demask=[], squeeze=[])
-def assignChangePointCluster(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    stat_func: Callable[[np.array, np.array], float],
-    thresh_func: Callable[[np.array, np.array], float],
-    window: str | Tuple[str, str],
-    min_periods: int | Tuple[int, int],
-    closed: Literal["right", "left", "both", "neither"] = "both",
-    reduce_window: str = None,
-    reduce_func: Callable[[np.ndarray, np.ndarray], float] = lambda x, _: x.argmax(),
-    model_by_resids: bool = False,
-    **kwargs,
-):
-    """
-    Label data where it changes significantly.
-
-    The labels will be stored in data. Unless `target` is given the labels will
-    overwrite the data in `field`. The flags will always set to `UNFLAGGED`.
-
-    Assigns label to the data, aiming to reflect continuous regimes of the processes
-    the data is assumed to be generated by. The regime change points detection is
-    based on a sliding window search.
-
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The reference variable, the deviation from wich determines the flagging.
-
-    flags : saqc.flags
-        A flags object, holding flags and additional informations related to `data`.
-
-    stat_func : Callable[[numpy.array, numpy.array], float]
-        A function that assigns a value to every twin window. Left window content will
-        be passed to first variable,
-        right window content will be passed to the second.
-
-    thresh_func : Callable[numpy.array, numpy.array], float]
-        A function that determines the value level, exceeding wich qualifies a
-        timestamps func func value as denoting a changepoint.
-
-    window : str, tuple of string
-        Size of the rolling windows the calculation is performed in. If it is a single
-        frequency offset, it applies for the backward- and the forward-facing window.
-
-        If two offsets (as a tuple) is passed the first defines the size of the
-        backward facing window, the second the size of the forward facing window.
-
-    min_periods : int or tuple of int
-        Minimum number of observations in a window required to perform the changepoint
-        test. If it is a tuple of two int, the first refer to the backward-,
-        the second to the forward-facing window.
-
-    closed : {'right', 'left', 'both', 'neither'}, default 'both'
-        Determines the closure of the sliding windows.
-
-    reduce_window : {None, str}, default None
-        The sliding window search method is not an exact CP search method and usually
-        there wont be detected a single changepoint, but a "region" of change around
-        a changepoint. If `reduce_window` is given, for every window of size
-        `reduce_window`, there will be selected the value with index `reduce_func(x,
-        y)` and the others will be dropped. If `reduce_window` is None, the reduction
-        window size equals the twin window size, the changepoints have been detected
-        with.
-
-    reduce_func : callable, default lambda x,y: x.argmax()
-        A function that must return an index value upon input of two arrays x and y.
-        First input parameter will hold the result from the stat_func evaluation for
-        every reduction window. Second input parameter holds the result from the
-        thresh_func evaluation. The default reduction function just selects the value
-        that maximizes the stat_func.
-
-    model_by_resids : bool, default False
-        If True, the results of `stat_funcs` are written, otherwise the regime labels.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Modified data.
-    flags : saqc.Flags
-        The flags container
-    """
-    reserved = ["assign_cluster", "set_flags", "flag"]
-    kwargs = filterKwargs(kwargs, reserved)
-    return _assignChangePointCluster(
-        data=data,
-        field=field,
-        flags=flags,
-        stat_func=stat_func,
-        thresh_func=thresh_func,
-        window=window,
-        min_periods=min_periods,
-        closed=closed,
-        reduce_window=reduce_window,
-        reduce_func=reduce_func,
-        model_by_resids=model_by_resids,
+    ) -> "SaQC":
+        """
+        Flag data where it significantly changes.
+
+        Flag data points, where the parametrization of the process, the data is assumed to
+        generate by, significantly changes.
+
+        The change points detection is based on a sliding window search.
+
+        Parameters
+        ----------
+        field : str
+            A column in flags and data.
+
+        stat_func : Callable
+             A function that assigns a value to every twin window. The backward-facing
+             window content will be passed as the first array, the forward-facing window
+             content as the second.
+
+        thresh_func : Callable
+            A function that determines the value level, exceeding wich qualifies a
+            timestamps func value as denoting a change-point.
+
+        window : str, tuple of str
+            Size of the moving windows. This is the number of observations used for
+            calculating the statistic.
+
+            If it is a single frequency offset, it applies for the backward- and the
+            forward-facing window.
+
+            If two offsets (as a tuple) is passed the first defines the size of the
+            backward facing window, the second the size of the forward facing window.
+
+        min_periods : int or tuple of int
+            Minimum number of observations in a window required to perform the changepoint
+            test. If it is a tuple of two int, the first refer to the backward-,
+            the second to the forward-facing window.
+
+        reduce_window : str or None, default None
+            The sliding window search method is not an exact CP search method and usually
+            there wont be detected a single changepoint, but a "region" of change around
+            a changepoint.
+
+            If `reduce_window` is given, for every window of size `reduce_window`, there
+            will be selected the value with index `reduce_func(x, y)` and the others will
+            be dropped.
+
+            If `reduce_window` is None, the reduction window size equals the twin window
+            size, the changepoints have been detected with.
+
+        reduce_func : Callable, default ``lambda x, y: x.argmax()``
+            A function that must return an index value upon input of two arrays x and y.
+            First input parameter will hold the result from the stat_func evaluation for
+            every reduction window. Second input parameter holds the result from the
+            `thresh_func` evaluation.
+            The default reduction function just selects the value that maximizes the
+            `stat_func`.
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self._data, self._flags = _assignChangePointCluster(
+            self._data,
+            field,
+            self._flags,
+            stat_func=stat_func,
+            thresh_func=thresh_func,
+            window=window,
+            min_periods=min_periods,
+            reduce_window=reduce_window,
+            reduce_func=reduce_func,
+            set_flags=True,
+            model_by_resids=False,
+            assign_cluster=False,
+            flag=flag,
+            **kwargs,
+        )
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def assignChangePointCluster(
+        self: "SaQC",
+        field: str,
+        stat_func: Callable[[np.ndarray, np.ndarray], float],
+        thresh_func: Callable[[np.ndarray, np.ndarray], float],
+        window: str | Tuple[str, str],
+        min_periods: int | Tuple[int, int],
+        reduce_window: str | None = None,
+        reduce_func: Callable[
+            [np.ndarray, np.ndarray], float
+        ] = lambda x, _: x.argmax(),
+        model_by_resids: bool = False,
         **kwargs,
-        # control args
-        assign_cluster=True,
-        set_flags=False,
-    )
+    ) -> "SaQC":
+        """
+        Label data where it changes significantly.
+
+        The labels will be stored in data. Unless `target` is given the labels will
+        overwrite the data in `field`. The flags will always set to `UNFLAGGED`.
+
+        Assigns label to the data, aiming to reflect continuous regimes of the processes
+        the data is assumed to be generated by. The regime change points detection is
+        based on a sliding window search.
+
+
+        Parameters
+        ----------
+        field : str
+            The reference variable, the deviation from wich determines the flagging.
+
+        stat_func : Callable[[numpy.array, numpy.array], float]
+            A function that assigns a value to every twin window. Left window content will
+            be passed to first variable,
+            right window content will be passed to the second.
+
+        thresh_func : Callable[numpy.array, numpy.array], float]
+            A function that determines the value level, exceeding wich qualifies a
+            timestamps func func value as denoting a changepoint.
+
+        window : str, tuple of string
+            Size of the rolling windows the calculation is performed in. If it is a single
+            frequency offset, it applies for the backward- and the forward-facing window.
+
+            If two offsets (as a tuple) is passed the first defines the size of the
+            backward facing window, the second the size of the forward facing window.
+
+        min_periods : int or tuple of int
+            Minimum number of observations in a window required to perform the changepoint
+            test. If it is a tuple of two int, the first refer to the backward-,
+            the second to the forward-facing window.
+
+        reduce_window : {None, str}, default None
+            The sliding window search method is not an exact CP search method and usually
+            there wont be detected a single changepoint, but a "region" of change around
+            a changepoint. If `reduce_window` is given, for every window of size
+            `reduce_window`, there will be selected the value with index `reduce_func(x,
+            y)` and the others will be dropped. If `reduce_window` is None, the reduction
+            window size equals the twin window size, the changepoints have been detected
+            with.
+
+        reduce_func : callable, default lambda x,y: x.argmax()
+            A function that must return an index value upon input of two arrays x and y.
+            First input parameter will hold the result from the stat_func evaluation for
+            every reduction window. Second input parameter holds the result from the
+            thresh_func evaluation. The default reduction function just selects the value
+            that maximizes the stat_func.
+
+        model_by_resids : bool, default False
+            If True, the results of `stat_funcs` are written, otherwise the regime labels.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        reserved = ["assign_cluster", "set_flags", "flag"]
+        kwargs = filterKwargs(kwargs, reserved)
+        self._data, self._flags = _assignChangePointCluster(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            stat_func=stat_func,
+            thresh_func=thresh_func,
+            window=window,
+            min_periods=min_periods,
+            reduce_window=reduce_window,
+            reduce_func=reduce_func,
+            model_by_resids=model_by_resids,
+            **kwargs,
+            # control args
+            assign_cluster=True,
+            set_flags=False,
+        )
+        return self
 
 
 def _assignChangePointCluster(
     data: DictOfSeries,
     field: str,
     flags: Flags,
-    stat_func: Callable[[np.array, np.array], float],
-    thresh_func: Callable[[np.array, np.array], float],
+    stat_func: Callable[[np.ndarray, np.ndarray], float],
+    thresh_func: Callable[[np.ndarray, np.ndarray], float],
     window: str | Tuple[str, str],
     min_periods: int | Tuple[int, int],
-    closed: Literal["right", "left", "both", "neither"] = "both",
-    reduce_window: str = None,
+    reduce_window: str | None = None,
     reduce_func: Callable[[np.ndarray, np.ndarray], float] = lambda x, _: x.argmax(),
     model_by_resids: bool = False,
     set_flags: bool = False,
@@ -274,22 +250,16 @@ def _assignChangePointCluster(
         )
         reduce_window = f"{s}s"
 
-    roller = customRoller(data_ser, window=bwd_window, min_periods=bwd_min_periods)
-    bwd_start, bwd_end = roller.window_indexer.get_window_bounds(
-        len(data_ser), min_periods=bwd_min_periods, closed=closed
-    )
+    roller = customRoller(data_ser, window=bwd_window, min_periods=0)
+    bwd_start, bwd_end = roller.window_indexer.get_window_bounds(len(data_ser))
 
-    roller = customRoller(
-        data_ser, window=fwd_window, forward=True, min_periods=fwd_min_periods
-    )
-    fwd_start, fwd_end = roller.window_indexer.get_window_bounds(
-        len(data_ser), min_periods=fwd_min_periods, closed=closed
-    )
+    roller = customRoller(data_ser, window=fwd_window, forward=True, min_periods=0)
+    fwd_start, fwd_end = roller.window_indexer.get_window_bounds(len(data_ser))
 
-    min_mask = ~(
-        (fwd_end - fwd_start <= fwd_min_periods)
-        | (bwd_end - bwd_start <= bwd_min_periods)
+    min_mask = (fwd_end - fwd_start >= fwd_min_periods) & (
+        bwd_end - bwd_start >= bwd_min_periods
     )
+
     fwd_end = fwd_end[min_mask]
     split = bwd_end[min_mask]
     bwd_start = bwd_start[min_mask]
@@ -338,6 +308,15 @@ def _assignChangePointCluster(
         )
         det_index = det_index[detected]
 
+    # the changepoint is the point "after" the change - so detected index has to be shifted once with regard to the
+    # data index:
+    shifted = (
+        pd.Series(True, index=det_index)
+        .reindex(data_ser.index, fill_value=False)
+        .shift(fill_value=False)
+    )
+    det_index = shifted.index[shifted]
+
     if assign_cluster:
         cluster = pd.Series(False, index=data[field].index)
         cluster[det_index] = True
@@ -385,7 +364,7 @@ def _reduceCPCluster(stat_arr, thresh_arr, start, end, obj_func, num_val):
         s, e = start[win_i], end[win_i]
         x = stat_arr[s:e]
         y = thresh_arr[s:e]
-        pos = s + obj_func(x, y) + 1
+        pos = s + obj_func(x, y)
         out_arr[s:e] = False
         out_arr[pos] = True
 
diff --git a/saqc/funcs/constants.py b/saqc/funcs/constants.py
index d7a98d20c4f0f88d422dac6a49efa926c99bd3b1..fc1a77f99b0435496ea607c1922522848038c464 100644
--- a/saqc/funcs/constants.py
+++ b/saqc/funcs/constants.py
@@ -9,173 +9,155 @@
 from __future__ import annotations
 
 import operator
-from typing import Tuple
+from typing import TYPE_CHECKING
 
 import numpy as np
 import pandas as pd
 
-from dios import DictOfSeries
 from saqc.constants import BAD
-from saqc.core.flags import Flags
 from saqc.core.register import flagging
 from saqc.lib.tools import customRoller, getFreqDelta, statPass
 from saqc.lib.ts_operators import varQC
 
-
-@flagging()
-def flagConstants(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    thresh: float,
-    window: int | str,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag constant data values.
-
-    Flags plateaus of constant data if their maximum total change in
-    a rolling window does not exceed a certain threshold.
-
-    Any interval of values y(t),...,y(t+n) is flagged, if:
-     - (1): n > ``window``
-     - (2): abs(y(t + i) - (t + j)) < `thresh`, for all i,j in [0, 1, ..., n]
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        A column in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    thresh : float
-        Maximum total change allowed per window.
-
-    window : str | int
-        Size of the moving window. This is the number of observations used
-        for calculating the statistic. Each window will be a fixed size.
-        If its an offset then this will be the time period of each window.
-        Each window will be a variable sized based on the observations included
-        in the time-period.
-
-    flag : float, default BAD
-        Flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Unmodified data container
-    flags : saqc.Flags
-        The flags container
-    """
-    if not isinstance(window, (str, int)):
-        raise TypeError("window must be offset string or int.")
-
-    d = data[field]
-
-    # min_periods=2 ensures that at least two non-nan values are present
-    # in each window and also min() == max() == d[i] is not possible.
-    kws = dict(window=window, min_periods=2, expand=False)
-
-    # 1. find starting points of consecutive constant values as a boolean mask
-    # 2. fill the whole window with True's
-    rolling = customRoller(d, **kws)
-    starting_points_mask = rolling.max() - rolling.min() <= thresh
-    rolling = customRoller(starting_points_mask, **kws, forward=True)
-    # mimic any()
-    mask = (rolling.sum() > 0) & d.notna()
-
-    flags[mask, field] = flag
-    return data, flags
-
-
-@flagging()
-def flagByVariance(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: str,
-    thresh: float,
-    maxna: int | None = None,
-    maxna_group: int | None = None,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag low-variance data.
-
-    Flags plateaus of constant data if the variance in a rolling window does not
-    exceed a certain threshold.
-
-    Any interval of values y(t),..y(t+n) is flagged, if:
-
-    (1) n > `window`
-    (2) variance(y(t),...,y(t+n) < `thresh`
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        A column in flags and data.
-
-    flags : saqc.Flags
-        The flags container.
-
-    window : str | int
-        Size of the moving window. This is the number of observations used
-        for calculating the statistic. Each window will be a fixed size.
-        If its an offset then this will be the time period of each window.
-        Each window will be sized, based on the number of observations included
-        in the time-period.
-
-    thresh : float, default 0.0005
-        Maximum total variance allowed per window.
-
-    maxna : int, default None
-        Maximum number of NaNs allowed in window.
-        If more NaNs are present, the window is not flagged.
-
-    maxna_group : int, default None
-        Same as `maxna` but for consecutive NaNs.
-
-    flag : float, default BAD
-        Flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Unmodified data container
-    flags : saqc.Flags
-        The flags container
-    """
-    dataseries = data[field]
-    delta = getFreqDelta(dataseries.index)
-    if not delta:
-        raise IndexError("Timeseries irregularly sampled!")
-
-    if maxna is None:
-        maxna = np.inf
-
-    if maxna_group is None:
-        maxna_group = np.inf
-
-    min_periods = int(np.ceil(pd.Timedelta(window) / pd.Timedelta(delta)))
-    window = pd.Timedelta(window)
-    to_set = statPass(
-        dataseries,
-        lambda x: varQC(x, maxna, maxna_group),
-        window,
-        thresh,
-        operator.lt,
-        min_periods=min_periods,
-    )
-
-    flags[to_set, field] = flag
-    return data, flags
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+class ConstantsMixin:
+    @flagging()
+    def flagConstants(
+        self: "SaQC",
+        field: str,
+        thresh: float,
+        window: int | str,
+        min_periods: int = 2,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag constant data values.
+
+        Flags plateaus of constant data if their maximum total change in
+        a rolling window does not exceed a certain threshold.
+
+        Any interval of values y(t),...,y(t+n) is flagged, if:
+         - (1): n > ``window``
+         - (2): abs(y(t + i) - (t + j)) < `thresh`, for all i,j in [0, 1, ..., n]
+
+        Parameters
+        ----------
+        field : str
+            A column in flags and data.
+
+        thresh : float
+            Maximum total change allowed per window.
+
+        window : str | int
+            Size of the moving window. This is the number of observations used
+            for calculating the statistic. Each window will be a fixed size.
+            If its an offset then this will be the time period of each window.
+            Each window will be a variable sized based on the observations included
+            in the time-period.
+
+        flag : float, default BAD
+            Flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        if not isinstance(window, (str, int)):
+            raise TypeError("window must be offset string or int.")
+
+        d = self._data[field]
+
+        # min_periods=2 ensures that at least two non-nan values are present
+        # in each window and also min() == max() == d[i] is not possible.
+        kws = dict(window=window, min_periods=min_periods, expand=False)
+
+        # 1. find starting points of consecutive constant values as a boolean mask
+        # 2. fill the whole window with True's
+        rolling = customRoller(d, **kws)
+        starting_points_mask = rolling.max() - rolling.min() <= thresh
+        rolling = customRoller(starting_points_mask, **kws, forward=True)
+        # mimic any()
+        mask = (rolling.sum() > 0) & d.notna()
+
+        self._flags[mask, field] = flag
+        return self
+
+    @flagging()
+    def flagByVariance(
+        self: "SaQC",
+        field: str,
+        window: str,
+        thresh: float,
+        maxna: int | None = None,
+        maxna_group: int | None = None,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag low-variance data.
+
+        Flags plateaus of constant data if the variance in a rolling window does not
+        exceed a certain threshold.
+
+        Any interval of values y(t),..y(t+n) is flagged, if:
+
+        (1) n > `window`
+        (2) variance(y(t),...,y(t+n) < `thresh`
+
+        Parameters
+        ----------
+        field : str
+            A column in flags and data.
+
+        window : str | int
+            Size of the moving window. This is the number of observations used
+            for calculating the statistic. Each window will be a fixed size.
+            If its an offset then this will be the time period of each window.
+            Each window will be sized, based on the number of observations included
+            in the time-period.
+
+        thresh : float, default 0.0005
+            Maximum total variance allowed per window.
+
+        maxna : int, default None
+            Maximum number of NaNs allowed in window.
+            If more NaNs are present, the window is not flagged.
+
+        maxna_group : int, default None
+            Same as `maxna` but for consecutive NaNs.
+
+        flag : float, default BAD
+            Flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        dataseries = self._data[field]
+        delta = getFreqDelta(dataseries.index)
+        if not delta:
+            raise IndexError("Timeseries irregularly sampled!")
+
+        if maxna is None:
+            maxna = np.inf
+
+        if maxna_group is None:
+            maxna_group = np.inf
+
+        min_periods = int(np.ceil(pd.Timedelta(window) / pd.Timedelta(delta)))
+        window = pd.Timedelta(window)
+        to_set = statPass(
+            dataseries,
+            lambda x: varQC(x, maxna, maxna_group),
+            window,
+            thresh,
+            operator.lt,
+            min_periods=min_periods,
+        )
+
+        self._flags[to_set, field] = flag
+        return self
diff --git a/saqc/funcs/curvefit.py b/saqc/funcs/curvefit.py
index aecdfa674477f256fc9ae9b00c21ce6639bf13cf..bed9088ae7accd2b0163dbd2d4a3d4d0d6d630a5 100644
--- a/saqc/funcs/curvefit.py
+++ b/saqc/funcs/curvefit.py
@@ -7,16 +7,18 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Tuple, Union
+from typing import TYPE_CHECKING, Tuple, Union
 
 import numpy as np
 import pandas as pd
+from typing_extensions import Literal
 
 from dios import DictOfSeries
 from saqc.core.flags import Flags
 from saqc.core.register import register
 from saqc.lib.tools import getFreqDelta
 from saqc.lib.ts_operators import (
+    butterFilter,
     polyRoller,
     polyRollerIrregular,
     polyRollerNoMissing,
@@ -24,83 +26,138 @@ from saqc.lib.ts_operators import (
     polyRollerNumba,
 )
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
-@register(mask=["field"], demask=[], squeeze=[])
-def fitPolynomial(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: int | str,
-    order: int,
-    min_periods: int = 0,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Fits a polynomial model to the data.
-
-    The fit is calculated by fitting a polynomial of degree `order` to a data slice
-    of size `window`, that has x at its center.
-
-    Note that the result is stored in `field` and overwrite it unless a
-    `target` is given.
-
-    In case your data is sampled at an equidistant frequency grid:
-
-    (1) If you know your data to have no significant number of missing values,
-    or if you do not want to calculate residuals for windows containing missing values
-    any way, performance can be increased by setting min_periods=window.
-
-    Note, that the initial and final window/2 values do not get fitted.
-
-    Each residual gets assigned the worst flag present in the interval of
-    the original data.
-
-    Parameters
-    ----------
-    data : DictOfSeries
-        The data container.
-
-    field : str
-        A column in flags and data.
-
-    flags : Flags
-        The flags container.
-
-    window : str, int
-        Size of the window you want to use for fitting. If an integer is passed,
-        the size refers to the number of periods for every fitting window. If an
-        offset string is passed, the size refers to the total temporal extension. The
-        window will be centered around the vaule-to-be-fitted. For regularly sampled
-        data always a odd number of periods will be used for the fit (periods-1 if
-        periods is even).
-
-    order : int
-        Degree of the polynomial used for fitting
-
-    min_periods : int or None, default 0
-        Minimum number of observations in a window required to perform the fit,
-        otherwise NaNs will be assigned.
-        If ``None``, `min_periods` defaults to 1 for integer windows and to the
-        size of the window for offset based windows.
-        Passing 0, disables the feature and will result in over-fitting for too
-        sparse windows.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        Modified data
-    flags : saqc.Flags
-        Flags
-    """
-    return _fitPolynomial(
-        data=data,
-        field=field,
-        flags=flags,
-        window=window,
-        order=order,
-        min_periods=min_periods,
+_FILL_METHODS = Literal[
+    "linear",
+    "nearest",
+    "zero",
+    "slinear",
+    "quadratic",
+    "cubic",
+    "spline",
+    "barycentric",
+    "polynomial",
+]
+
+
+class CurvefitMixin:
+    @register(mask=["field"], demask=[], squeeze=[])
+    def fitPolynomial(
+        self: "SaQC",
+        field: str,
+        window: int | str,
+        order: int,
+        min_periods: int = 0,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Fits a polynomial model to the data.
+
+        The fit is calculated by fitting a polynomial of degree `order` to a data slice
+        of size `window`, that has x at its center.
+
+        Note that the result is stored in `field` and overwrite it unless a
+        `target` is given.
+
+        In case your data is sampled at an equidistant frequency grid:
+
+        (1) If you know your data to have no significant number of missing values,
+        or if you do not want to calculate residuals for windows containing missing values
+        any way, performance can be increased by setting min_periods=window.
+
+        Note, that the initial and final window/2 values do not get fitted.
+
+        Each residual gets assigned the worst flag present in the interval of
+        the original data.
+
+        Parameters
+        ----------
+        field : str
+             A column in flags and data.
+
+        window : str, int
+            Size of the window you want to use for fitting. If an integer is passed,
+            the size refers to the number of periods for every fitting window. If an
+            offset string is passed, the size refers to the total temporal extension. The
+            window will be centered around the vaule-to-be-fitted. For regularly sampled
+            data always a odd number of periods will be used for the fit (periods-1 if
+            periods is even).
+
+        order : int
+            Degree of the polynomial used for fitting
+
+        min_periods : int or None, default 0
+            Minimum number of observations in a window required to perform the fit,
+            otherwise NaNs will be assigned.
+            If ``None``, `min_periods` defaults to 1 for integer windows and to the
+            size of the window for offset based windows.
+            Passing 0, disables the feature and will result in over-fitting for too
+            sparse windows.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self._data, self._flags = _fitPolynomial(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            window=window,
+            order=order,
+            min_periods=min_periods,
+            **kwargs,
+        )
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def fitLowpassFilter(
+        self: "SaQC",
+        field: str,
+        cutoff: float | str,
+        nyq: float = 0.5,
+        filter_order: int = 2,
+        fill_method: _FILL_METHODS = "linear",
         **kwargs,
-    )
+    ):
+        """
+        Fits the data using the butterworth filter.
+
+        Note
+        ----
+        The data is expected to be regularly sampled.
+
+        Parameters
+        ----------
+        field: str
+            A column in flags and data.
+
+        cutoff: {float, str}
+            The cutoff-frequency, either an offset freq string, or expressed in multiples of the sampling rate.
+
+        nyq: float
+            The niquist-frequency. expressed in multiples if the sampling rate.
+
+        fill_method: Literal[‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘spline’, ‘barycentric’, ‘polynomial’]
+            Fill method to be applied on the data before filtering (butterfilter cant
+            handle ''np.nan''). See documentation of pandas.Series.interpolate method for
+            details on the methods associated with the different keywords.
+
+        filter_type: Literal["lowpass", "highpass", "bandpass", "bandstop"]
+            The type of filter. Default is ‘lowpass’.
+
+        """
+
+        self._data[field] = butterFilter(
+            self._data[field],
+            cutoff=cutoff,
+            nyq=nyq,
+            filter_order=filter_order,
+            fill_method=fill_method,
+            filter_type="lowpass",
+        )
+        return self
 
 
 def _fitPolynomial(
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
index 6b07c3ef0f95b2a0226ae472b7d5b14001fe49de..9e8cf172aa8591fa4ad0663ee19f9dcc53431b66 100644
--- a/saqc/funcs/drift.py
+++ b/saqc/funcs/drift.py
@@ -11,7 +11,7 @@ from __future__ import annotations
 
 import functools
 import inspect
-from typing import Callable, Optional, Sequence, Tuple
+from typing import TYPE_CHECKING, Callable, Optional, Sequence, Tuple
 
 import numpy as np
 import pandas as pd
@@ -23,11 +23,14 @@ from dios import DictOfSeries
 from saqc.constants import BAD
 from saqc.core.register import Flags, flagging, register
 from saqc.funcs.changepoints import _assignChangePointCluster
-from saqc.funcs.tools import copyField, dropField
 from saqc.lib.tools import detectDeviants, filterKwargs, toSequence
 from saqc.lib.ts_operators import expDriftModel, linearDriftModel
 from saqc.lib.types import CurveFitter
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
 LinkageString = Literal[
     "single", "complete", "average", "weighted", "centroid", "median", "ward"
 ]
@@ -35,556 +38,674 @@ LinkageString = Literal[
 MODELDICT = {"linear": linearDriftModel, "exponential": expDriftModel}
 
 
-@register(
-    mask=["field"],
-    demask=["field"],
-    squeeze=["field"],  # reference is written !
-    multivariate=True,
-    handles_target=False,
-)
-def flagDriftFromNorm(
-    data: DictOfSeries,
-    field: Sequence[str],
-    flags: Flags,
-    freq: str,
-    spread: float,
-    frac: float = 0.5,
-    metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
-        np.array([x, y]), metric="cityblock"
+class DriftMixin:
+    @register(
+        mask=["field"],
+        demask=["field"],
+        squeeze=["field"],  # reference is written !
+        multivariate=True,
+        handles_target=False,
     )
-    / len(x),
-    method: LinkageString = "single",
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flags data that deviates from an avarage data course.
-
-    "Normality" is determined in terms of a maximum spreading distance,
-    that members of a normal group must not exceed. In addition, only a group is considered
-    "normal" if it contains more then `frac` percent of the variables in "field".
-
-    See the Notes section for a more detailed presentation of the algorithm
-
-    Parameters
-    ----------
-    data : DictOfSeries
-        The data container.
-
-    field : str
-        A column in flags and data.
-
-    flags : Flags
-        The flags container.
-
-    freq : str
-        Frequency, that split the data in chunks.
-
-    spread : float
-        Maximum spread allowed in the group of *normal* data. See Notes section for more details.
-
-    frac : float, default 0.5
-        Fraction defining the normal group. Use a value from the interval [0,1].
-        The higher the value, the more stable the algorithm will be. For values below
-        0.5 the results are undefined.
-
-    metric : Callable, default ``lambda x,y:pdist(np.array([x,y]),metric="cityblock")/len(x)``
-        Distance function that takes two arrays as input and returns a scalar float.
-        This value is interpreted as the distance of the two input arrays.
-        Defaults to the `averaged manhattan metric` (see Notes).
-
-    method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        Linkage method used for hierarchical (agglomerative) clustering of the data.
-        `method` is directly passed to ``scipy.hierarchy.linkage``. See its documentation [1] for
-        more details. For a general introduction on hierarchical clustering see [2].
-
-    flag : float, default BAD
-        flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-    flags : saqc.Flags
-
-    Notes
-    -----
-    following steps are performed for every data "segment" of length `freq` in order to find the
-    "abnormal" data:
-
-    1. Calculate distances :math:`d(x_i,x_j)` for all :math:`x_i` in parameter `field`.
-       (with :math:`d` denoting the distance function, specified by `metric`.
-    2. Calculate a dendogram with a hierarchical linkage algorithm, specified by `method`.
-    3. Flatten the dendogram at the level, the agglomeration costs exceed `spread`
-    4. check if a cluster containing more than `frac` variables.
-
-        1. if yes: flag all the variables that are not in that cluster (inside the segment)
-        2. if no: flag nothing
-
-    The main parameter giving control over the algorithms behavior is the `spread` parameter,
-    that determines the maximum spread of a normal group by limiting the costs, a cluster
-    agglomeration must not exceed in every linkage step.
-    For singleton clusters, that costs just equal half the distance, the data in the
-    clusters, have to each other. So, no data can be clustered together, that are more then
-    2*`spread` distances away from each other. When data get clustered together, this new
-    clusters distance to all the other data/clusters is calculated according to the linkage
-    method specified by `method`. By default, it is the minimum distance, the members of the
-    clusters have to each other. Having that in mind, it is advisable to choose a distance
-    function, that can be well interpreted in the units dimension of the measurement and where
-    the interpretation is invariant over the length of the data. That is, why,
-    the "averaged manhattan metric" is set as the metric default, since it corresponds to the
-    averaged value distance, two data sets have (as opposed by euclidean, for example).
-
-    References
-    ----------
-    Documentation of the underlying hierarchical clustering algorithm:
-        [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
-    Introduction to Hierarchical clustering:
-        [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
-    """
-    fields = toSequence(field)
-
-    data_to_flag = data[fields].to_df()
-    data_to_flag.dropna(inplace=True)
-
-    segments = data_to_flag.groupby(pd.Grouper(freq=freq))
-    for segment in segments:
-
-        if segment[1].shape[0] <= 1:
-            continue
-
-        drifters = detectDeviants(segment[1], metric, spread, frac, method, "variables")
-
-        for var in drifters:
-            flags[segment[1].index, fields[var]] = flag
+    def flagDriftFromNorm(
+        self: "SaQC",
+        field: Sequence[str],
+        freq: str,
+        spread: float,
+        frac: float = 0.5,
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
+            np.array([x, y]), metric="cityblock"
+        )
+        / len(x),
+        method: LinkageString = "single",
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flags data that deviates from an avarage data course.
+
+        "Normality" is determined in terms of a maximum spreading distance,
+        that members of a normal group must not exceed. In addition, only a group is considered
+        "normal" if it contains more then `frac` percent of the variables in "field".
+
+        See the Notes section for a more detailed presentation of the algorithm
+
+        Parameters
+        ----------
+        field : str
+            A column in flags and data.
+
+        freq : str
+            Frequency, that split the data in chunks.
+
+        spread : float
+            Maximum spread allowed in the group of *normal* data. See Notes section for more details.
+
+        frac : float, default 0.5
+            Fraction defining the normal group. Use a value from the interval [0,1].
+            The higher the value, the more stable the algorithm will be. For values below
+            0.5 the results are undefined.
+
+        metric : Callable, default ``lambda x,y:pdist(np.array([x,y]),metric="cityblock")/len(x)``
+            Distance function that takes two arrays as input and returns a scalar float.
+            This value is interpreted as the distance of the two input arrays.
+            Defaults to the `averaged manhattan metric` (see Notes).
+
+        method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+            Linkage method used for hierarchical (agglomerative) clustering of the data.
+            `method` is directly passed to ``scipy.hierarchy.linkage``. See its documentation [1] for
+            more details. For a general introduction on hierarchical clustering see [2].
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        following steps are performed for every data "segment" of length `freq` in order to find the
+        "abnormal" data:
+
+        1. Calculate distances :math:`d(x_i,x_j)` for all :math:`x_i` in parameter `field`.
+           (with :math:`d` denoting the distance function, specified by `metric`.
+        2. Calculate a dendogram with a hierarchical linkage algorithm, specified by `method`.
+        3. Flatten the dendogram at the level, the agglomeration costs exceed `spread`
+        4. check if a cluster containing more than `frac` variables.
+
+            1. if yes: flag all the variables that are not in that cluster (inside the segment)
+            2. if no: flag nothing
+
+        The main parameter giving control over the algorithms behavior is the `spread` parameter,
+        that determines the maximum spread of a normal group by limiting the costs, a cluster
+        agglomeration must not exceed in every linkage step.
+        For singleton clusters, that costs just equal half the distance, the data in the
+        clusters, have to each other. So, no data can be clustered together, that are more then
+        2*`spread` distances away from each other. When data get clustered together, this new
+        clusters distance to all the other data/clusters is calculated according to the linkage
+        method specified by `method`. By default, it is the minimum distance, the members of the
+        clusters have to each other. Having that in mind, it is advisable to choose a distance
+        function, that can be well interpreted in the units dimension of the measurement and where
+        the interpretation is invariant over the length of the data. That is, why,
+        the "averaged manhattan metric" is set as the metric default, since it corresponds to the
+        averaged value distance, two data sets have (as opposed by euclidean, for example).
+
+        References
+        ----------
+        Documentation of the underlying hierarchical clustering algorithm:
+            [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
+        Introduction to Hierarchical clustering:
+            [2] https://en.wikipedia.org/wiki/Hierarchical_clustering
+        """
+        fields = toSequence(field)
+
+        data_to_flag = self._data[fields].to_df()
+        data_to_flag.dropna(inplace=True)
+
+        segments = data_to_flag.groupby(pd.Grouper(freq=freq))
+        for segment in segments:
+
+            if segment[1].shape[0] <= 1:
+                continue
+
+            drifters = detectDeviants(
+                segment[1], metric, spread, frac, method, "variables"
+            )
 
-    return data, flags
+            for var in drifters:
+                self._flags[segment[1].index, fields[var]] = flag
 
+        return self
 
-@register(
-    mask=["field", "reference"],
-    demask=["field", "reference"],
-    squeeze=["field", "reference"],  # reference is written !
-    multivariate=True,
-    handles_target=False,
-)
-def flagDriftFromReference(
-    data: DictOfSeries,
-    field: Sequence[str],
-    flags: Flags,
-    reference: str,
-    freq: str,
-    thresh: float,
-    metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
-        np.array([x, y]), metric="cityblock"
+    @register(
+        mask=["field", "reference"],
+        demask=["field", "reference"],
+        squeeze=["field", "reference"],  # reference is written !
+        multivariate=True,
+        handles_target=False,
     )
-    / len(x),
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flags data that deviates from a reference course.
+    def flagDriftFromReference(
+        self: "SaQC",
+        field: Sequence[str],
+        reference: str,
+        freq: str,
+        thresh: float,
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: pdist(
+            np.array([x, y]), metric="cityblock"
+        )
+        / len(x),
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flags data that deviates from a reference course.
 
-    The deviation is measured by a passed distance function.
+        The deviation is measured by a passed distance function.
 
-    Parameters
-    ----------
-    data : DictOfSeries
-        The data container.
+        Parameters
+        ----------
+        field : str
+            A column in flags and data.
 
-    field : str
-        A column in flags and data.
+        freq : str
+            Frequency, that split the data in chunks.
 
-    flags : Flags
-        The flags container.
+        reference : str
+            Reference variable, the deviation is calculated from.
 
-    freq : str
-        Frequency, that split the data in chunks.
+        thresh : float
+            Maximum deviation from reference.
 
-    reference : str
-        Reference variable, the deviation is calculated from.
+        metric : Callable
+            Distance function. Takes two arrays as input and returns a scalar float.
+            This value is interpreted as the mutual distance of the two input arrays.
+            Defaults to the `averaged manhattan metric` (see Notes).
 
-    thresh : float
-        Maximum deviation from reference.
+        target : None
+            Ignored.
 
-    metric : Callable
-        Distance function. Takes two arrays as input and returns a scalar float.
-        This value is interpreted as the mutual distance of the two input arrays.
-        Defaults to the `averaged manhattan metric` (see Notes).
+        flag : float, default BAD
+            Flag to set.
 
-    target : None
-        Ignored.
+        Returns
+        -------
+        saqc.SaQC
 
-    flag : float, default BAD
-        Flag to set.
+        Notes
+        -----
+        It is advisable to choose a distance function, that can be well interpreted in
+        the units dimension of the measurement and where the interpretation is invariant over the
+        length of the data. That is, why, the "averaged manhatten metric" is set as the metric
+        default, since it corresponds to the averaged value distance, two data sets have (as opposed
+        by euclidean, for example).
+        """
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-    flags : saqc.Flags
+        fields = toSequence(field)
 
-    Notes
-    -----
-    It is advisable to choose a distance function, that can be well interpreted in
-    the units dimension of the measurement and where the interpretation is invariant over the
-    length of the data. That is, why, the "averaged manhatten metric" is set as the metric
-    default, since it corresponds to the averaged value distance, two data sets have (as opposed
-    by euclidean, for example).
-    """
+        if reference not in fields:
+            fields.append(reference)
 
-    fields = toSequence(field)
+        data_to_flag = self._data[fields].to_df().dropna()
 
-    if reference not in fields:
-        fields.append(reference)
+        segments = data_to_flag.groupby(pd.Grouper(freq=freq))
+        for segment in segments:
 
-    data_to_flag = data[fields].to_df().dropna()
+            if segment[1].shape[0] <= 1:
+                continue
 
-    segments = data_to_flag.groupby(pd.Grouper(freq=freq))
-    for segment in segments:
+            for i in range(len(fields)):
+                dist = metric(
+                    segment[1].iloc[:, i].values, segment[1].loc[:, reference].values
+                )
 
-        if segment[1].shape[0] <= 1:
-            continue
+                if dist > thresh:
+                    self._flags[segment[1].index, fields[i]] = flag
 
-        for i in range(len(fields)):
-            dist = metric(
-                segment[1].iloc[:, i].values, segment[1].loc[:, reference].values
-            )
+        return self
 
-            if dist > thresh:
-                flags[segment[1].index, fields[i]] = flag
+    @register(mask=["field"], demask=[], squeeze=[])
+    def correctDrift(
+        self: "SaQC",
+        field: str,
+        maintenance_field: str,
+        model: Callable[..., float] | Literal["linear", "exponential"],
+        cal_range: int = 5,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The function corrects drifting behavior.
 
-    return data, flags
+        See the Notes section for an overview over the correction algorithm.
 
+        Parameters
+        ----------
+        field : str
+            Column in data and flags.
 
-@register(mask=["field"], demask=[], squeeze=[])
-def correctDrift(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    maintenance_field: str,
-    model: Callable[..., float] | Literal["linear", "exponential"],
-    cal_range: int = 5,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The function corrects drifting behavior.
+        maintenance_field : str
+            Column holding the support-points information.
+            The data is expected to have the following form:
+            The index of the series represents the beginning of a maintenance
+            event, wheras the values represent its endings.
 
-    See the Notes section for an overview over the correction algorithm.
+        model : Callable or {'exponential', 'linear'}
+            A modelfunction describing the drift behavior, that is to be corrected.
+            Either use built-in exponential or linear drift model by passing a string, or pass a custom callable.
+            The model function must always contain the keyword parameters 'origin' and 'target'.
+            The starting parameter must always be the parameter, by wich the data is passed to the model.
+            After the data parameter, there can occure an arbitrary number of model calibration arguments in
+            the signature.
+            See the Notes section for an extensive description.
 
-    Parameters
-    ----------
-    data : DictOfSeries
-        The data container.
+        cal_range : int, default 5
+            Number of values to calculate the mean of, for obtaining the value level directly
+            after and directly before a maintenance event. Needed for shift calibration.
 
-    field : str
-        Column in data and flags.
+        Returns
+        -------
+        saqc.SaQC
 
-    flags : saqc.Flags
-        Flags container.
+        Notes
+        -----
+        It is assumed, that between support points, there is a drift effect shifting the
+        meassurements in a way, that can be described, by a model function M(t, p, origin, target).
+        (With 0<=t<=1, p being a parameter set, and origin, target being floats).
 
-    maintenance_field : str
-        Column holding the support-points information.
-        The data is expected to have the following form:
-        The index of the series represents the beginning of a maintenance
-        event, wheras the values represent its endings.
+        Note, that its possible for the model to have no free parameters p at all. (linear drift mainly)
 
-    model : Callable or {'exponential', 'linear'}
-        A modelfunction describing the drift behavior, that is to be corrected.
-        Either use built-in exponential or linear drift model by passing a string, or pass a custom callable.
-        The model function must always contain the keyword parameters 'origin' and 'target'.
-        The starting parameter must always be the parameter, by wich the data is passed to the model.
-        After the data parameter, there can occure an arbitrary number of model calibration arguments in
-        the signature.
-        See the Notes section for an extensive description.
+        The drift model, directly after the last support point (t=0),
+        should evaluate to the origin - calibration level (origin), and directly before the next
+        support point (t=1), it should evaluate to the target calibration level (target).
 
-    cal_range : int, default 5
-        Number of values to calculate the mean of, for obtaining the value level directly
-        after and directly before a maintenance event. Needed for shift calibration.
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
+            M(0, p, origin, target) = origin
+            M(1, p, origin, target) = target
 
-    Notes
-    -----
-    It is assumed, that between support points, there is a drift effect shifting the
-    meassurements in a way, that can be described, by a model function M(t, p, origin, target).
-    (With 0<=t<=1, p being a parameter set, and origin, target being floats).
 
-    Note, that its possible for the model to have no free parameters p at all. (linear drift mainly)
+        The model is than fitted to any data chunk in between support points, by optimizing
+        the parameters p, and thus, obtaining optimal parameterset P.
 
-    The drift model, directly after the last support point (t=0),
-    should evaluate to the origin - calibration level (origin), and directly before the next
-    support point (t=1), it should evaluate to the target calibration level (target).
+        The new values at t are computed via:::
 
+            new_vals(t) = old_vals(t) + M(t, P, origin, target) - M_drift(t, P, origin, new_target)
 
-        M(0, p, origin, target) = origin
-        M(1, p, origin, target) = target
+        Wheras ``new_target`` represents the value level immediately after the next support point.
 
+        Examples
+        --------
+        Some examples of meaningful driftmodels.
 
-    The model is than fitted to any data chunk in between support points, by optimizing
-    the parameters p, and thus, obtaining optimal parameterset P.
+        Linear drift modell (no free parameters).
 
-    The new values at t are computed via:::
 
-        new_vals(t) = old_vals(t) + M(t, P, origin, target) - M_drift(t, P, origin, new_target)
+        >>> Model = lambda t, origin, target: origin + t*target
 
-    Wheras ``new_target`` represents the value level immediately after the next support point.
+        exponential drift model (exponential raise!)
 
-    Examples
-    --------
-    Some examples of meaningful driftmodels.
+        >>> expFunc = lambda t, a, b, c: a + b * (np.exp(c * x) - 1)
+        >>> Model = lambda t, p, origin, target: expFunc(t, (target - origin) / (np.exp(abs(c)) - 1), abs(c))
 
-    Linear drift modell (no free parameters).
+        Exponential and linear driftmodels are part of the ``ts_operators`` library, under the names
+        ``expDriftModel`` and ``linearDriftModel``.
 
+        """
+        # extract model func:
+        if isinstance(model, str):
+            if model not in MODELDICT:
+                raise ValueError(
+                    f"invalid model '{model}', choose one of '{MODELDICT.keys()}'"
+                )
+            model = MODELDICT[model]
 
-    >>> Model = lambda t, origin, target: origin + t*target
+        # 1: extract fit intervals:
+        if self._data[maintenance_field].empty:
+            return self
 
-    exponential drift model (exponential raise!)
+        to_correct = self._data[field].copy()
+        maint_data = self._data[maintenance_field].copy()
 
-    >>> expFunc = lambda t, a, b, c: a + b * (np.exp(c * x) - 1)
-    >>> Model = lambda t, p, origin, target: expFunc(t, (target - origin) / (np.exp(abs(c)) - 1), abs(c))
+        to_correct_clean = to_correct.dropna()
+        d = {"drift_group": np.nan, to_correct.name: to_correct_clean.values}
+        drift_frame = pd.DataFrame(d, index=to_correct_clean.index)
 
-    Exponential and linear driftmodels are part of the ``ts_operators`` library, under the names
-    ``expDriftModel`` and ``linearDriftModel``.
+        # group the drift frame
+        for k in range(0, maint_data.shape[0] - 1):
+            # assign group numbers for the timespans in between one maintenance ending and the beginning of the next
+            # maintenance time itself remains np.nan assigned
+            drift_frame.loc[
+                maint_data.values[k] : pd.Timestamp(maint_data.index[k + 1]),
+                "drift_group",
+            ] = k
 
-    """
-    # extract model func:
-    if isinstance(model, str):
-        if model not in MODELDICT:
-            raise ValueError(
-                f"invalid model '{model}', choose one of '{MODELDICT.keys()}'"
-            )
-        model = MODELDICT[model]
-
-    # 1: extract fit intervals:
-    if data[maintenance_field].empty:
-        return data, flags
-
-    to_correct = data[field].copy()
-    maint_data = data[maintenance_field].copy()
-
-    to_correct_clean = to_correct.dropna()
-    d = {"drift_group": np.nan, to_correct.name: to_correct_clean.values}
-    drift_frame = pd.DataFrame(d, index=to_correct_clean.index)
-
-    # group the drift frame
-    for k in range(0, maint_data.shape[0] - 1):
-        # assign group numbers for the timespans in between one maintenance ending and the beginning of the next
-        # maintenance time itself remains np.nan assigned
-        drift_frame.loc[
-            maint_data.values[k] : pd.Timestamp(maint_data.index[k + 1]), "drift_group"
-        ] = k
-
-    # define target values for correction
-    drift_grouper = drift_frame.groupby("drift_group")
-    shift_targets = drift_grouper.aggregate(lambda x: x[:cal_range].mean()).shift(-1)
-
-    for k, group in drift_grouper:
-        data_series = group[to_correct.name]
-        data_fit, data_shiftTarget = _driftFit(
-            data_series, shift_targets.loc[k, :][0], cal_range, model
+        # define target values for correction
+        drift_grouper = drift_frame.groupby("drift_group")
+        shift_targets = drift_grouper.aggregate(lambda x: x[:cal_range].mean()).shift(
+            -1
         )
-        data_fit = pd.Series(data_fit, index=group.index)
-        data_shiftTarget = pd.Series(data_shiftTarget, index=group.index)
-        data_shiftVektor = data_shiftTarget - data_fit
-        shiftedData = data_series + data_shiftVektor
-        to_correct[shiftedData.index] = shiftedData
-
-    data[field] = to_correct
-
-    return data, flags
 
-
-@register(mask=["field", "cluster_field"], demask=["cluster_field"], squeeze=[])
-def correctRegimeAnomaly(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    cluster_field: str,
-    model: CurveFitter,
-    tolerance: Optional[str] = None,
-    epoch: bool = False,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function fits the passed model to the different regimes in data[field] and tries to correct
-    those values, that have assigned a negative label by data[cluster_field].
-
-    Currently, the only correction mode supported is the "parameter propagation."
-
-    This means, any regime :math:`z`, labeled negatively and being modeled by the parameters p, gets corrected via:
-
-    :math:`z_{correct} = z + (m(p^*) - m(p))`,
-
-    where :math:`p^*` denotes the parameter set belonging to the fit of the nearest not-negatively labeled cluster.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to correct.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    cluster_field : str
-        A string denoting the field in data, holding the cluster label for the data you want to correct.
-    model : Callable
-        The model function to be fitted to the regimes.
-        It must be a function of the form :math:`f(x, *p)`, where :math:`x` is the ``numpy.array`` holding the
-        independent variables and :math:`p` are the model parameters that are to be obtained by fitting.
-        Depending on the `x_date` parameter, independent variable x will either be the timestamps
-        of every regime transformed to seconds from epoch, or it will be just seconds, counting the regimes length.
-    tolerance : {None, str}, default None:
-        If an offset string is passed, a data chunk of length `offset` right at the
-        start and right at the end is ignored when fitting the model. This is to account for the
-        unreliability of data near the changepoints of regimes.
-    epoch : bool, default False
-        If True, use "seconds from epoch" as x input to the model func, instead of "seconds from regime start".
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    cluster_ser = data[cluster_field]
-    unique_successive = pd.unique(cluster_ser.values)
-    data_ser = data[field]
-    regimes = data_ser.groupby(cluster_ser)
-    para_dict = {}
-    x_dict = {}
-    x_mask = {}
-    if tolerance is not None:
-        # get seconds
-        tolerance = pd.Timedelta(tolerance).total_seconds()
-    for label, regime in regimes:
-        if epoch is False:
-            # get seconds data:
-            xdata = (regime.index - regime.index[0]).to_numpy(dtype=float) * 10 ** (-9)
-        else:
-            # get seconds from epoch data
-            xdata = regime.index.to_numpy(dtype=float) * 10 ** (-9)
-        ydata = regime.values
-        valid_mask = ~np.isnan(ydata)
+        for k, group in drift_grouper:
+            data_series = group[to_correct.name]
+            data_fit, data_shiftTarget = _driftFit(
+                data_series, shift_targets.loc[k, :][0], cal_range, model
+            )
+            data_fit = pd.Series(data_fit, index=group.index)
+            data_shiftTarget = pd.Series(data_shiftTarget, index=group.index)
+            data_shiftVektor = data_shiftTarget - data_fit
+            shiftedData = data_series + data_shiftVektor
+            to_correct[shiftedData.index] = shiftedData
+
+        self._data[field] = to_correct
+
+        return self
+
+    @register(mask=["field", "cluster_field"], demask=["cluster_field"], squeeze=[])
+    def correctRegimeAnomaly(
+        self: "SaQC",
+        field: str,
+        cluster_field: str,
+        model: CurveFitter,
+        tolerance: Optional[str] = None,
+        epoch: bool = False,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function fits the passed model to the different regimes in data[field] and tries to correct
+        those values, that have assigned a negative label by data[cluster_field].
+
+        Currently, the only correction mode supported is the "parameter propagation."
+
+        This means, any regime :math:`z`, labeled negatively and being modeled by the parameters p, gets corrected via:
+
+        :math:`z_{correct} = z + (m(p^*) - m(p))`,
+
+        where :math:`p^*` denotes the parameter set belonging to the fit of the nearest not-negatively labeled cluster.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the data column, you want to correct.
+
+        cluster_field : str
+            A string denoting the field in data, holding the cluster label for the data you want to correct.
+
+        model : Callable
+            The model function to be fitted to the regimes.
+            It must be a function of the form :math:`f(x, *p)`, where :math:`x` is the ``numpy.array`` holding the
+            independent variables and :math:`p` are the model parameters that are to be obtained by fitting.
+            Depending on the `x_date` parameter, independent variable x will either be the timestamps
+            of every regime transformed to seconds from epoch, or it will be just seconds, counting the regimes length.
+
+        tolerance : {None, str}, default None:
+            If an offset string is passed, a data chunk of length `offset` right at the
+            start and right at the end is ignored when fitting the model. This is to account for the
+            unreliability of data near the changepoints of regimes.
+
+        epoch : bool, default False
+            If True, use "seconds from epoch" as x input to the model func, instead of "seconds from regime start".
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        cluster_ser = self._data[cluster_field]
+        unique_successive = pd.unique(cluster_ser.values)
+        data_ser = self._data[field]
+        regimes = data_ser.groupby(cluster_ser)
+        para_dict = {}
+        x_dict = {}
+        x_mask = {}
         if tolerance is not None:
-            valid_mask &= xdata > xdata[0] + tolerance
-            valid_mask &= xdata < xdata[-1] - tolerance
-        try:
-            p, *_ = curve_fit(model, xdata[valid_mask], ydata[valid_mask])
-        except (RuntimeError, ValueError):
-            p = np.array([np.nan])
-        para_dict[label] = p
-        x_dict[label] = xdata
-        x_mask[label] = valid_mask
-
-    first_normal = unique_successive > 0
-    first_valid = np.array(
-        [
-            ~pd.isna(para_dict[unique_successive[i]]).any()
-            for i in range(0, unique_successive.shape[0])
-        ]
-    )
-    first_valid = np.where(first_normal & first_valid)[0][0]
-    last_valid = 1
-
-    for k in range(0, unique_successive.shape[0]):
-        if unique_successive[k] < 0 & (
-            not pd.isna(para_dict[unique_successive[k]]).any()
-        ):
-            ydata = data_ser[regimes.groups[unique_successive[k]]].values
-            xdata = x_dict[unique_successive[k]]
-            ypara = para_dict[unique_successive[k]]
-            if k > 0:
-                target_para = para_dict[unique_successive[k - last_valid]]
+            # get seconds
+            tolerance = pd.Timedelta(tolerance).total_seconds()
+        for label, regime in regimes:
+            if epoch is False:
+                # get seconds data:
+                xdata = (regime.index - regime.index[0]).to_numpy(dtype=float) * 10 ** (
+                    -9
+                )
             else:
-                # first regime has no "last valid" to its left, so we use first valid to the right:
-                target_para = para_dict[unique_successive[k + first_valid]]
-            y_shifted = ydata + (model(xdata, *target_para) - model(xdata, *ypara))
-            data_ser[regimes.groups[unique_successive[k]]] = y_shifted
-            if k > 0:
+                # get seconds from epoch data
+                xdata = regime.index.to_numpy(dtype=float) * 10 ** (-9)
+            ydata = regime.values
+            valid_mask = ~np.isnan(ydata)
+            if tolerance is not None:
+                valid_mask &= xdata > xdata[0] + tolerance
+                valid_mask &= xdata < xdata[-1] - tolerance
+            try:
+                p, *_ = curve_fit(model, xdata[valid_mask], ydata[valid_mask])
+            except (RuntimeError, ValueError):
+                p = np.array([np.nan])
+            para_dict[label] = p
+            x_dict[label] = xdata
+            x_mask[label] = valid_mask
+
+        first_normal = unique_successive > 0
+        first_valid = np.array(
+            [
+                ~pd.isna(para_dict[unique_successive[i]]).any()
+                for i in range(0, unique_successive.shape[0])
+            ]
+        )
+        first_valid = np.where(first_normal & first_valid)[0][0]
+        last_valid = 1
+
+        for k in range(0, unique_successive.shape[0]):
+            if unique_successive[k] < 0 & (
+                not pd.isna(para_dict[unique_successive[k]]).any()
+            ):
+                ydata = data_ser[regimes.groups[unique_successive[k]]].values
+                xdata = x_dict[unique_successive[k]]
+                ypara = para_dict[unique_successive[k]]
+                if k > 0:
+                    target_para = para_dict[unique_successive[k - last_valid]]
+                else:
+                    # first regime has no "last valid" to its left, so we use first valid to the right:
+                    target_para = para_dict[unique_successive[k + first_valid]]
+                y_shifted = ydata + (model(xdata, *target_para) - model(xdata, *ypara))
+                data_ser[regimes.groups[unique_successive[k]]] = y_shifted
+                if k > 0:
+                    last_valid += 1
+            elif pd.isna(para_dict[unique_successive[k]]).any() & (k > 0):
                 last_valid += 1
-        elif pd.isna(para_dict[unique_successive[k]]).any() & (k > 0):
-            last_valid += 1
-        else:
-            last_valid = 1
-
-    data[field] = data_ser
-    return data, flags
-
-
-@register(mask=["field"], demask=[], squeeze=[])
-def correctOffset(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    max_jump: float,
-    spread: float,
-    window: str,
-    min_periods: int,
-    tolerance: Optional[str] = None,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to correct.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    max_jump : float
-        when searching for changepoints in mean - this is the threshold a mean difference in the
-        sliding window search must exceed to trigger changepoint detection.
-    spread : float
-        threshold denoting the maximum, regimes are allowed to abolutely differ in their means
-        to form the "normal group" of values.
-    window : str
-        Size of the adjacent windows that are used to search for the mean changepoints.
-    min_periods : int
-        Minimum number of periods a search window has to contain, for the result of the changepoint
-        detection to be considered valid.
-    tolerance : {None, str}, default None:
-        If an offset string is passed, a data chunk of length `offset` right from the
-        start and right before the end of any regime is ignored when calculating a regimes mean for data correcture.
-        This is to account for the unrelyability of data near the changepoints of regimes.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    data, flags = copyField(data, field, flags, field + "_CPcluster")
-    data, flags = _assignChangePointCluster(
-        data,
-        field + "_CPcluster",
-        flags,
-        lambda x, y: np.abs(np.mean(x) - np.mean(y)),
-        lambda x, y: max_jump,
-        window=window,
-        min_periods=min_periods,
-    )
-    data, flags = _assignRegimeAnomaly(data, field, flags, field + "_CPcluster", spread)
-    data, flags = correctRegimeAnomaly(
-        data,
-        field,
-        flags,
-        field + "_CPcluster",
-        lambda x, p1: np.array([p1] * x.shape[0]),
-        tolerance=tolerance,
-    )
-    data, flags = dropField(data, field + "_CPcluster", flags)
-
-    return data, flags
+            else:
+                last_valid = 1
+
+        self._data[field] = data_ser
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def correctOffset(
+        self: "SaQC",
+        field: str,
+        max_jump: float,
+        spread: float,
+        window: str,
+        min_periods: int,
+        tolerance: Optional[str] = None,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Parameters
+        ----------
+        field : str
+            The fieldname of the data column, you want to correct.
+
+        max_jump : float
+            when searching for changepoints in mean - this is the threshold a mean difference in the
+            sliding window search must exceed to trigger changepoint detection.
+
+        spread : float
+            threshold denoting the maximum, regimes are allowed to abolutely differ in their means
+            to form the "normal group" of values.
+
+        window : str
+            Size of the adjacent windows that are used to search for the mean changepoints.
+
+        min_periods : int
+            Minimum number of periods a search window has to contain, for the result of the changepoint
+            detection to be considered valid.
+
+        tolerance : {None, str}, default None:
+            If an offset string is passed, a data chunk of length `offset` right from the
+            start and right before the end of any regime is ignored when calculating a regimes mean for data correcture.
+            This is to account for the unrelyability of data near the changepoints of regimes.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self = self.copyField(field, field + "_CPcluster")
+        self._data, self._flags = _assignChangePointCluster(
+            self._data,
+            field + "_CPcluster",
+            self._flags,
+            lambda x, y: np.abs(np.mean(x) - np.mean(y)),
+            lambda x, y: max_jump,
+            window=window,
+            min_periods=min_periods,
+        )
+        self._data, self._flags = _assignRegimeAnomaly(
+            self._data, field, self._flags, field + "_CPcluster", spread
+        )
+        self = self.correctRegimeAnomaly(
+            field,
+            field + "_CPcluster",
+            lambda x, p1: np.array([p1] * x.shape[0]),
+            tolerance=tolerance,
+        )
+        self = self.dropField(field + "_CPcluster")
+        return self
+
+    @flagging()
+    def flagRegimeAnomaly(
+        self: "SaQC",
+        field: str,
+        cluster_field: str,
+        spread: float,
+        method: LinkageString = "single",
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
+            np.nanmean(x) - np.nanmean(y)
+        ),
+        frac: float = 0.5,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flags anomalous regimes regarding to modelling regimes of field.
+
+        "Normality" is determined in terms of a maximum spreading distance,
+        regimes must not exceed in respect to a certain metric and linkage method.
+
+        In addition, only a range of regimes is considered "normal", if it models
+        more then `frac` percentage of the valid samples in "field".
+
+        Note, that you must detect the regime changepoints prior to calling this function.
+
+        Note, that it is possible to perform hypothesis tests for regime equality
+        by passing the metric a function for p-value calculation and selecting linkage
+        method "complete".
+
+        Parameters
+        ----------
+        field : str
+            Name of the column to process
+
+        cluster_field : str
+            Column in data, holding the cluster labels for the samples in field.
+            (has to be indexed equal to field)
+
+        spread : float
+            A threshold denoting the value level, up to wich clusters a agglomerated.
+
+        method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+            The linkage method for hierarchical (agglomerative) clustering of the variables.
+
+        metric : Callable, default lambda x,y: np.abs(np.nanmean(x) - np.nanmean(y))
+            A metric function for calculating the dissimilarity between 2 regimes.
+            Defaults to the difference in mean.
+
+        frac : float
+            Has to be in [0,1]. Determines the minimum percentage of samples,
+            the "normal" group has to comprise to be the normal group actually.
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        reserverd = ["set_cluster", "set_flags"]
+        kwargs = filterKwargs(kwargs, reserverd)
+        self._data, self._flags = _assignRegimeAnomaly(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            cluster_field=cluster_field,
+            spread=spread,
+            method=method,
+            metric=metric,
+            frac=frac,
+            flag=flag,
+            **kwargs,
+            set_cluster=False,
+            set_flags=True,
+        )
+        return self
+
+    @register(mask=["field", "cluster_field"], demask=["cluster_field"], squeeze=[])
+    def assignRegimeAnomaly(
+        self: "SaQC",
+        field: str,
+        cluster_field: str,
+        spread: float,
+        method: LinkageString = "single",
+        metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
+            np.nanmean(x) - np.nanmean(y)
+        ),
+        frac: float = 0.5,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        A function to detect values belonging to an anomalous regime regarding modelling
+        regimes of field.
+
+        The function changes the value of the regime cluster labels to be negative.
+        "Normality" is determined in terms of a maximum spreading distance, regimes must
+        not exceed in respect to a certain metric and linkage method. In addition,
+        only a range of regimes is considered "normal", if it models more then `frac`
+        percentage of the valid samples in "field". Note, that you must detect the regime
+        changepoints prior to calling this function. (They are expected to be stored
+        parameter `cluster_field`.)
+
+        Note, that it is possible to perform hypothesis tests for regime equality by
+        passing the metric a function for p-value calculation and selecting linkage
+        method "complete".
+
+        Parameters
+        ----------
+        field : str
+            Name of the column to process
+
+        cluster_field : str
+            Column in data, holding the cluster labels for the samples in field.
+            (has to be indexed equal to field)
+
+        spread : float
+            A threshold denoting the value level, up to wich clusters a agglomerated.
+
+        method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
+            The linkage method for hierarchical (agglomerative) clustering of the variables.
+
+        metric : Callable, default lambda x,y: np.abs(np.nanmean(x) - np.nanmean(y))
+            A metric function for calculating the dissimilarity between 2 regimes.
+            Defaults to the difference in mean.
+
+        frac : float
+            Has to be in [0,1]. Determines the minimum percentage of samples,
+            the "normal" group has to comprise to be the normal group actually.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        reserverd = ["set_cluster", "set_flags", "flag"]
+        kwargs = filterKwargs(kwargs, reserverd)
+        self._data, self._flags = _assignRegimeAnomaly(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            cluster_field=cluster_field,
+            spread=spread,
+            method=method,
+            metric=metric,
+            frac=frac,
+            **kwargs,
+            # control args
+            set_cluster=True,
+            set_flags=False,
+        )
+        return self
 
 
 def _driftFit(x, shift_target, cal_mean, driftModel):
@@ -617,165 +738,6 @@ def _driftFit(x, shift_target, cal_mean, driftModel):
     return data_fit, data_shift
 
 
-@flagging()
-def flagRegimeAnomaly(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    cluster_field: str,
-    spread: float,
-    method: LinkageString = "single",
-    metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
-        np.nanmean(x) - np.nanmean(y)
-    ),
-    frac: float = 0.5,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flags anomalous regimes regarding to modelling regimes of field.
-
-    "Normality" is determined in terms of a maximum spreading distance,
-    regimes must not exceed in respect to a certain metric and linkage method.
-
-    In addition, only a range of regimes is considered "normal", if it models
-    more then `frac` percentage of the valid samples in "field".
-
-    Note, that you must detect the regime changepoints prior to calling this function.
-
-    Note, that it is possible to perform hypothesis tests for regime equality
-    by passing the metric a function for p-value calculation and selecting linkage
-    method "complete".
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        Data to process
-    field : str
-        Name of the column to process
-    flags : saqc.Flags
-        Container to store flags of the data.
-    cluster_field : str
-        Column in data, holding the cluster labels for the samples in field.
-        (has to be indexed equal to field)
-    spread : float
-        A threshold denoting the value level, up to wich clusters a agglomerated.
-    method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        The linkage method for hierarchical (agglomerative) clustering of the variables.
-    metric : Callable, default lambda x,y: np.abs(np.nanmean(x) - np.nanmean(y))
-        A metric function for calculating the dissimilarity between 2 regimes.
-        Defaults to the difference in mean.
-    frac : float
-        Has to be in [0,1]. Determines the minimum percentage of samples,
-        the "normal" group has to comprise to be the normal group actually.
-    flag : float, default BAD
-        flag to set.
-
-    Returns
-    -------
-
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The flags object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flags input.
-    """
-    reserverd = ["set_cluster", "set_flags"]
-    kwargs = filterKwargs(kwargs, reserverd)
-    return _assignRegimeAnomaly(
-        data=data,
-        field=field,
-        flags=flags,
-        cluster_field=cluster_field,
-        spread=spread,
-        method=method,
-        metric=metric,
-        frac=frac,
-        flag=flag,
-        **kwargs,
-        set_cluster=False,
-        set_flags=True,
-    )
-
-
-@register(mask=["field", "cluster_field"], demask=["cluster_field"], squeeze=[])
-def assignRegimeAnomaly(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    cluster_field: str,
-    spread: float,
-    method: LinkageString = "single",
-    metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
-        np.nanmean(x) - np.nanmean(y)
-    ),
-    frac: float = 0.5,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    A function to detect values belonging to an anomalous regime regarding modelling
-    regimes of field.
-
-    The function changes the value of the regime cluster labels to be negative.
-    "Normality" is determined in terms of a maximum spreading distance, regimes must
-    not exceed in respect to a certain metric and linkage method. In addition,
-    only a range of regimes is considered "normal", if it models more then `frac`
-    percentage of the valid samples in "field". Note, that you must detect the regime
-    changepoints prior to calling this function. (They are expected to be stored
-    parameter `cluster_field`.)
-
-    Note, that it is possible to perform hypothesis tests for regime equality by
-    passing the metric a function for p-value calculation and selecting linkage
-    method "complete".
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        Data to process
-    field : str
-        Name of the column to process
-    flags : saqc.Flags
-        Container to store flags of the data.
-    cluster_field : str
-        Column in data, holding the cluster labels for the samples in field.
-        (has to be indexed equal to field)
-    spread : float
-        A threshold denoting the value level, up to wich clusters a agglomerated.
-    method : {"single", "complete", "average", "weighted", "centroid", "median", "ward"}, default "single"
-        The linkage method for hierarchical (agglomerative) clustering of the variables.
-    metric : Callable, default lambda x,y: np.abs(np.nanmean(x) - np.nanmean(y))
-        A metric function for calculating the dissimilarity between 2 regimes.
-        Defaults to the difference in mean.
-    frac : float
-        Has to be in [0,1]. Determines the minimum percentage of samples,
-        the "normal" group has to comprise to be the normal group actually.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The flags object, holding flags and additional informations related to `data`.
-        Flags values may have changed, relatively to the flags input.
-    """
-    reserverd = ["set_cluster", "set_flags", "flag"]
-    kwargs = filterKwargs(kwargs, reserverd)
-    return _assignRegimeAnomaly(
-        data=data,
-        field=field,
-        flags=flags,
-        cluster_field=cluster_field,
-        spread=spread,
-        method=method,
-        metric=metric,
-        frac=frac,
-        **kwargs,
-        # control args
-        set_cluster=True,
-        set_flags=False,
-    )
-
-
 def _assignRegimeAnomaly(
     data: DictOfSeries,
     field: str,
@@ -783,7 +745,7 @@ def _assignRegimeAnomaly(
     cluster_field: str,
     spread: float,
     method: LinkageString = "single",
-    metric: Callable[[np.array, np.array], float] = lambda x, y: np.abs(
+    metric: Callable[[np.ndarray, np.ndarray], float] = lambda x, y: np.abs(
         np.nanmean(x) - np.nanmean(y)
     ),
     frac: float = 0.5,
diff --git a/saqc/funcs/flagtools.py b/saqc/funcs/flagtools.py
index effa0e1dbae6bc3934c7ad1428f62ee6b740aa25..d253dc8ffaa12eaceb8f90af35f65c764a88b0cf 100644
--- a/saqc/funcs/flagtools.py
+++ b/saqc/funcs/flagtools.py
@@ -8,7 +8,7 @@
 from __future__ import annotations
 
 import warnings
-from typing import Any, Sequence, Tuple, Union
+from typing import TYPE_CHECKING, Any, Union
 
 import numpy as np
 import pandas as pd
@@ -16,553 +16,510 @@ from typing_extensions import Literal
 
 from dios import DictOfSeries
 from saqc.constants import BAD, FILTER_ALL, UNFLAGGED
-from saqc.core.flags import Flags
 from saqc.core.register import _isflagged, flagging, register
-from saqc.funcs.resampling import concatFlags
-
-
-@register(mask=[], demask=[], squeeze=["field"])
-def forceFlags(
-    data: DictOfSeries, field: str, flags: Flags, flag: float = BAD, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Set whole column to a flag value.
-
-    Parameters
-    ----------
-    data : DictOfSeries
-        data container
-    field : str
-        columns name that holds the data
-    flags : saqc.Flags
-        flags object
-    flag : float, default BAD
-        flag to set
-    kwargs : dict
-        unused
-
-    Returns
-    -------
-    data : DictOfSeries
-    flags : saqc.Flags
-
-    See Also
-    --------
-    clearFlags : set whole column to UNFLAGGED
-    flagUnflagged : set flag value at all unflagged positions
-    """
-    flags[:, field] = flag
-    return data, flags
-
-
-@register(mask=[], demask=[], squeeze=["field"])
-def clearFlags(
-    data: DictOfSeries, field: str, flags: Flags, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Set whole column to UNFLAGGED.
-
-    Parameters
-    ----------
-    data : DictOfSeries
-        data container
-    field : str
-        columns name that holds the data
-    flags : saqc.Flags
-        flags object
-    kwargs : dict
-        unused
-
-    Returns
-    -------
-    data : DictOfSeries
-    flags : saqc.Flags
-
-    Notes
-    -----
-    This function ignores the ``dfilter`` keyword, because the data is not relevant
-    for processing.
-    A warning is triggered if the ``flag`` keyword is given, because the flags are
-    always set to `UNFLAGGED`.
-
-
-    See Also
-    --------
-    forceFlags : set whole column to a flag value
-    flagUnflagged : set flag value at all unflagged positions
-    """
-    # NOTE: do we really need this?
-    if "flag" in kwargs:
-        kwargs = {**kwargs}  # copy
-        flag = kwargs.pop("flag")
-        warnings.warn(f"`flag={flag}` is ignored here.")
-
-    return forceFlags(data, field, flags, flag=UNFLAGGED, **kwargs)
-
-
-@register(mask=[], demask=[], squeeze=["field"])
-def flagUnflagged(
-    data: DictOfSeries, field: str, flags: Flags, flag: float = BAD, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function sets a flag at all unflagged positions.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        A flags object, holding flags and additional informations related to `data`.
-    flag : float, default BAD
-        flag value to set
-    kwargs : Dict
-        unused
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-
-    Notes
-    -----
-    This function ignores the ``dfilter`` keyword, because the data is not relevant
-    for processing.
-
-    See Also
-    --------
-    clearFlags : set whole column to UNFLAGGED
-    forceFlags : set whole column to a flag value
-    """
-    unflagged = flags[field].isna() | (flags[field] == UNFLAGGED)
-    flags[unflagged, field] = flag
-    return data, flags
-
-
-@register(mask=["field"], demask=["field"], squeeze=["field"])
-def flagManual(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    mdata: Union[pd.Series, pd.DataFrame, DictOfSeries, list, np.array],
-    method: Literal[
-        "left-open", "right-open", "closed", "plain", "ontime"
-    ] = "left-open",
-    mformat: Literal["start-end", "mflag"] = "start-end",
-    mflag: Any = 1,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag data by given, "manually generated" data.
-
-    The data is flagged at locations where `mdata` is equal to a provided flag (`mflag`).
-    The format of mdata can be an indexed object, like pd.Series, pd.Dataframe or dios.DictOfSeries,
-    but also can be a plain list- or array-like.
-    How indexed mdata is aligned to data is specified via the `method` parameter.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        A flags object, holding flags and additional informations related to `data`.
-    mdata : pd.Series, pd.DataFrame, DictOfSeries, str, list or np.ndarray
-        The Data determining, wich intervals are to be flagged, or a string, denoting under which field the data is
-        accessable.
-    method : {'plain', 'ontime', 'left-open', 'right-open', 'closed'}, default 'plain'
-        Defines how mdata is projected on data. Except for the 'plain' method, the methods assume mdata to have an
-        index.
-
-        * 'plain': mdata must have the same length as data and is projected one-to-one on data.
-        * 'ontime': works only with indexed mdata. mdata entries are matched with data entries that have the same index.
-        * 'right-open': mdata defines intervals, values are to be projected on.
-          The intervals are defined,
-
-          (1) Either, by any two consecutive timestamps t_1 and 1_2 where t_1 is valued with mflag, or by a series,
-          (2) Or, a Series, where the index contains in the t1 timestamps nd the values the respective t2 stamps.
-
-          The value at t_1 gets projected onto all data timestamps t with t_1 <= t < t_2.
-
-        * 'left-open': like 'right-open', but the projected interval now covers all t with t_1 < t <= t_2.
-        * 'closed': like 'right-open', but the projected interval now covers all t with t_1 <= t <= t_2.
-
-    mformat : {"start-end", "mflag"}, default "start-end"
-
-        * "start-end": mdata is a Series, where every entry indicates an interval to-flag. The index defines the left
-          bound, the value defines the right bound.
-        * "mflag": mdata is an array like, with entries containing 'mflag',where flags shall be set. See documentation
-          for examples.
-
-    mflag : scalar
-        The flag that indicates data points in `mdata`, of wich the projection in data should be flagged.
-    flag : float, default BAD
-        flag to set.
-
-    Returns
-    -------
-    data : original data
-    flags : modified flags
-
-    Examples
-    --------
-    An example for mdata
-
-    .. doctest:: ExampleFlagManual
-
-       >>> mdata = pd.Series([1, 0, 1], index=pd.to_datetime(['2000-02-01', '2000-03-01', '2000-05-01']))
-       >>> mdata
-       2000-02-01    1
-       2000-03-01    0
-       2000-05-01    1
-       dtype: int64
-
-    On *dayly* data, with the 'ontime' method, only the provided timestamps are used.
-    Bear in mind that only exact timestamps apply, any offset will result in ignoring
-    the timestamp.
-
-    .. doctest:: ExampleFlagManual
-
-       >>> data = pd.Series(0, index=pd.to_datetime(['2000-01-31', '2000-02-01', '2000-02-02', '2000-03-01', '2000-05-01']), name='daily_data')
-       >>> qc = saqc.SaQC(data)
-       >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='ontime')
-       >>> qc.flags['daily_data'] > UNFLAGGED
-       2000-01-31    False
-       2000-02-01     True
-       2000-02-02    False
-       2000-03-01    False
-       2000-05-01     True
-       Name: daily_data, dtype: bool
-
-    With the 'right-open' method, the mdata is forward fill:
-
-    .. doctest:: ExampleFlagManual
-
-       >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='right-open')
-       >>> qc.flags['daily_data'] > UNFLAGGED
-       2000-01-31    False
-       2000-02-01     True
-       2000-02-02     True
-       2000-03-01    False
-       2000-05-01     True
-       Name: daily_data, dtype: bool
-
-    With the 'left-open' method, backward filling is used:
-
-    .. doctest:: ExampleFlagManual
-
-       >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='left-open')
-       >>> qc.flags['daily_data'] > UNFLAGGED
-       2000-01-31    False
-       2000-02-01     True
-       2000-02-02     True
-       2000-03-01     True
-       2000-05-01     True
-       Name: daily_data, dtype: bool
-    """
-    dat = data[field]
-    # internal not-mflag-value -> cant go for np.nan
-    not_mflag = -1 if mflag == 0 else 0
-    if isinstance(mdata, str):
-        mdata = data[mdata]
-
-    if isinstance(mdata, (pd.DataFrame, DictOfSeries)):
-        mdata = mdata[field]
-
-    hasindex = isinstance(mdata, (pd.Series, pd.DataFrame, DictOfSeries))
-    if not hasindex:
-        if method != "plain":
-            raise ValueError("mdata has no index")
-        else:
-            mdata = pd.Series(mdata, index=dat.index)
-
-    # check, if intervals where passed in format (index:start-time, data:end-time)
-    if mformat == "start-end":
-        if method in ["plain", "ontime"]:
-            raise ValueError(
-                "'Start-End' formatting not compatible to 'plain' or 'ontime' methods"
-            )
+
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+class FlagtoolsMixin:
+    @flagging()
+    def flagDummy(self: "SaQC", field: str, **kwargs) -> "SaQC":
+        """
+        Function does nothing but returning data and flags.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        return self
+
+    @register(mask=[], demask=[], squeeze=["field"])
+    def forceFlags(self: "SaQC", field: str, flag: float = BAD, **kwargs) -> "SaQC":
+        """
+        Set whole column to a flag value.
+
+        Parameters
+        ----------
+        field : str
+            columns name that holds the data
+
+        flag : float, default BAD
+            flag to set
+
+        kwargs : dict
+            unused
+
+        Returns
+        -------
+        saqc.SaQC
+
+        See Also
+        --------
+        clearFlags : set whole column to UNFLAGGED
+        flagUnflagged : set flag value at all unflagged positions
+        """
+        self._flags[:, field] = flag
+        return self
+
+    @register(mask=[], demask=[], squeeze=["field"])
+    def clearFlags(self: "SaQC", field: str, **kwargs) -> "SaQC":
+        """
+        Set whole column to UNFLAGGED.
+
+        Parameters
+        ----------
+        field : str
+            columns name that holds the data
+
+        kwargs : dict
+            unused
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        This function ignores the ``dfilter`` keyword, because the data is not relevant
+        for processing.
+        A warning is triggered if the ``flag`` keyword is given, because the flags are
+        always set to `UNFLAGGED`.
+
+        See Also
+        --------
+        forceFlags : set whole column to a flag value
+        flagUnflagged : set flag value at all unflagged positions
+        """
+        # NOTE: do we really need this?
+        if "flag" in kwargs:
+            kwargs = {**kwargs}  # copy
+            flag = kwargs.pop("flag")
+            warnings.warn(f"`flag={flag}` is ignored here.")
+
+        return self.forceFlags(field, flag=UNFLAGGED, **kwargs)
+
+    @register(mask=[], demask=[], squeeze=["field"])
+    def flagUnflagged(self: "SaQC", field: str, flag: float = BAD, **kwargs) -> "SaQC":
+        """
+        Function sets a flag at all unflagged positions.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        flag : float, default BAD
+            flag value to set
+
+        kwargs : Dict
+            unused
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        This function ignores the ``dfilter`` keyword, because the data is not relevant
+        for processing.
+
+        See Also
+        --------
+        clearFlags : set whole column to UNFLAGGED
+        forceFlags : set whole column to a flag value
+        """
+        unflagged = self._flags[field].isna() | (self._flags[field] == UNFLAGGED)
+        self._flags[unflagged, field] = flag
+        return self
+
+    @register(mask=["field"], demask=["field"], squeeze=["field"])
+    def flagManual(
+        self: "SaQC",
+        field: str,
+        mdata: Union[pd.Series, pd.DataFrame, DictOfSeries, list, np.ndarray],
+        method: Literal[
+            "left-open", "right-open", "closed", "plain", "ontime"
+        ] = "left-open",
+        mformat: Literal["start-end", "mflag"] = "start-end",
+        mflag: Any = 1,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag data by given, "manually generated" data.
+
+        The data is flagged at locations where `mdata` is equal to a provided flag (`mflag`).
+        The format of mdata can be an indexed object, like pd.Series, pd.Dataframe or dios.DictOfSeries,
+        but also can be a plain list- or array-like.
+        How indexed mdata is aligned to data is specified via the `method` parameter.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        mdata : pd.Series, pd.DataFrame, DictOfSeries, str, list or np.ndarray
+            The Data determining, wich intervals are to be flagged, or a string, denoting under which field the data is
+            accessable.
+
+        method : {'plain', 'ontime', 'left-open', 'right-open', 'closed'}, default 'plain'
+            Defines how mdata is projected on data. Except for the 'plain' method, the methods assume mdata to have an
+            index.
+
+            * 'plain': mdata must have the same length as data and is projected one-to-one on data.
+            * 'ontime': works only with indexed mdata. mdata entries are matched with data entries that have the same index.
+            * 'right-open': mdata defines intervals, values are to be projected on.
+              The intervals are defined,
+
+              (1) Either, by any two consecutive timestamps t_1 and 1_2 where t_1 is valued with mflag, or by a series,
+              (2) Or, a Series, where the index contains in the t1 timestamps nd the values the respective t2 stamps.
+
+              The value at t_1 gets projected onto all data timestamps t with t_1 <= t < t_2.
+
+            * 'left-open': like 'right-open', but the projected interval now covers all t with t_1 < t <= t_2.
+            * 'closed': like 'right-open', but the projected interval now covers all t with t_1 <= t <= t_2.
+
+        mformat : {"start-end", "mflag"}, default "start-end"
+
+            * "start-end": mdata is a Series, where every entry indicates an interval to-flag. The index defines the left
+              bound, the value defines the right bound.
+            * "mflag": mdata is an array like, with entries containing 'mflag',where flags shall be set. See documentation
+              for examples.
+
+        mflag : scalar
+            The flag that indicates data points in `mdata`, of wich the projection in data should be flagged.
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Examples
+        --------
+        An example for mdata
+
+        .. doctest:: ExampleFlagManual
+
+           >>> mdata = pd.Series([1, 0, 1], index=pd.to_datetime(['2000-02-01', '2000-03-01', '2000-05-01']))
+           >>> mdata
+           2000-02-01    1
+           2000-03-01    0
+           2000-05-01    1
+           dtype: int64
+
+        On *dayly* data, with the 'ontime' method, only the provided timestamps are used.
+        Bear in mind that only exact timestamps apply, any offset will result in ignoring
+        the timestamp.
+
+        .. doctest:: ExampleFlagManual
+
+           >>> data = pd.Series(0, index=pd.to_datetime(['2000-01-31', '2000-02-01', '2000-02-02', '2000-03-01', '2000-05-01']), name='daily_data')
+           >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='ontime')
+           >>> qc.flags['daily_data'] > UNFLAGGED
+           2000-01-31    False
+           2000-02-01     True
+           2000-02-02    False
+           2000-03-01    False
+           2000-05-01     True
+           Name: daily_data, dtype: bool
+
+        With the 'right-open' method, the mdata is forward fill:
+
+        .. doctest:: ExampleFlagManual
+
+           >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='right-open')
+           >>> qc.flags['daily_data'] > UNFLAGGED
+           2000-01-31    False
+           2000-02-01     True
+           2000-02-02     True
+           2000-03-01    False
+           2000-05-01     True
+           Name: daily_data, dtype: bool
+
+        With the 'left-open' method, backward filling is used:
+
+        .. doctest:: ExampleFlagManual
+
+           >>> qc = qc.flagManual('daily_data', mdata, mflag=1, mformat='mdata', method='left-open')
+           >>> qc.flags['daily_data'] > UNFLAGGED
+           2000-01-31    False
+           2000-02-01     True
+           2000-02-02     True
+           2000-03-01     True
+           2000-05-01     True
+           Name: daily_data, dtype: bool
+        """
+        dat = self._data[field]
+        # internal not-mflag-value -> cant go for np.nan
+        not_mflag = -1 if mflag == 0 else 0
+        if isinstance(mdata, str):
+            mdata = self._data[mdata]
+
+        if isinstance(mdata, (pd.DataFrame, DictOfSeries)):
+            mdata = mdata[field]
+
+        hasindex = isinstance(mdata, (pd.Series, pd.DataFrame, DictOfSeries))
+        if not hasindex:
+            if method != "plain":
+                raise ValueError("mdata has no index")
+            else:
+                mdata = pd.Series(mdata, index=dat.index)
+
+        # check, if intervals where passed in format (index:start-time, data:end-time)
+        if mformat == "start-end":
+            if method in ["plain", "ontime"]:
+                raise ValueError(
+                    "'Start-End' formatting not compatible to 'plain' or 'ontime' methods"
+                )
+            else:
+                mdata = pd.Series(
+                    not_mflag,
+                    index=mdata.index.join(pd.DatetimeIndex(mdata.values), how="outer"),
+                )
+                mdata[::2] = mflag
+
+        # get rid of values that are neither mflag nor not_mflag (for bw-compatibillity mainly)
+        mdata[mdata != mflag] = not_mflag
+
+        # evaluate methods
+        if method == "plain":
+            pass
+        # reindex will do the job later
+        elif method == "ontime":
+            pass
+
+        elif method in ["left-open", "right-open", "closed"]:
+            mdata = mdata.drop(mdata.index[mdata.diff() == 0])
+            app_entry = pd.Series(mdata[-1], dat.index.shift(freq="1min")[-1:])
+            mdata = mdata.reindex(dat.index.union(mdata.index))
+
+            if method == "right-open":
+                mdata = mdata.ffill()
+
+            if method == "left-open":
+                mdata = pd.concat(
+                    [mdata.replace({mflag: not_mflag, not_mflag: mflag}), app_entry]
+                ).bfill()
+
+            if method == "closed":
+                mdata[mdata.ffill() == mflag] = mflag
+                mdata.replace({not_mflag: mflag}, inplace=True)
         else:
-            mdata = pd.Series(
-                not_mflag,
-                index=mdata.index.join(pd.DatetimeIndex(mdata.values), how="outer"),
-            )
-            mdata[::2] = mflag
-
-    # get rid of values that are neither mflag nor not_mflag (for bw-compatibillity mainly)
-    mdata[mdata != mflag] = not_mflag
-
-    # evaluate methods
-    if method == "plain":
-        pass
-    # reindex will do the job later
-    elif method == "ontime":
-        pass
-
-    elif method in ["left-open", "right-open", "closed"]:
-        mdata = mdata.drop(mdata.index[mdata.diff() == 0])
-        app_entry = pd.Series(mdata[-1], dat.index.shift(freq="1min")[-1:])
-        mdata = mdata.reindex(dat.index.union(mdata.index))
-
-        if method == "right-open":
-            mdata = mdata.ffill()
-
-        if method == "left-open":
-            mdata = pd.concat(
-                [mdata.replace({mflag: not_mflag, not_mflag: mflag}), app_entry]
-            ).bfill()
-
-        if method == "closed":
-            mdata[mdata.ffill() == mflag] = mflag
-            mdata.replace({not_mflag: mflag}, inplace=True)
-    else:
-        raise ValueError(method)
-
-    mask = mdata == mflag
-    mask = mask.reindex(dat.index).fillna(False)
-
-    flags[mask, field] = flag
-    return data, flags
-
-
-@flagging()
-def flagDummy(
-    data: DictOfSeries, field: str, flags: Flags, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function does nothing but returning data and flags.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        A flags object, holding flags and additional informations related to `data`.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    return data, flags
-
-
-@register(
-    mask=[],
-    demask=[],
-    squeeze=["target"],
-    handles_target=True,
-)
-def transferFlags(
-    data: DictOfSeries,
-    field: str | Sequence[str],
-    flags: Flags,
-    target: str | Sequence[str],
-    **kwargs,
-):
-    """
-    Transfer Flags of one variable to another.
-
-    Parameters
-    ----------
-
-    data : {pd.DataFrame, dios.DictOfSeries}
-        data
-
-    field : str or List of str
-       Variable or list of variables, the flags of which are to be transferred.
-
-    flags : {pd.DataFrame, dios.DictOfSeries, saqc.flagger}
-        Flags or flagger object
-
-    target : str or List of str
-        Variable or list of variables, the flags of `field` are to be transferred to.
-
-    See Also
-    --------
-    * :py:meth:`saqc.SaQC.flagGeneric`
-    * :py:meth:`saqc.SaQC.concatFlags`
-
-    Examples
-    --------
-    First, generate some data with some flags:
-
-    .. doctest:: exampleTransfer
-
-       >>> data = pd.DataFrame({'a': [1, 2], 'b': [1, 2], 'c': [1, 2]})
-       >>> qc = saqc.SaQC(data)
-       >>> qc = qc.flagRange('a', max=1.5)
-       >>> qc.flags.to_df()
-       columns      a    b    c
-       0         -inf -inf -inf
-       1        255.0 -inf -inf
-
-    Now we can project the flag from `a` to `b` via
-
-    .. doctest:: exampleTransfer
-
-       >>> qc = qc.transferFlags('a', target='b')
-       >>> qc.flags.to_df()
-       columns      a      b    c
-       0         -inf   -inf -inf
-       1        255.0  255.0 -inf
-
-    You can skip the explicit target parameter designation:
-
-    .. doctest:: exampleTransfer
-
-       >>> qc = qc.transferFlags('a', 'b')
-
-    To project the flags of `a` to both the variables `b` and `c` in one call, align the field and target variables in
-    2 lists:
-
-    .. doctest:: exampleTransfer
-
-       >>> qc = qc.transferFlags(['a','a'], ['b', 'c'])
-       >>> qc.flags.to_df()
-       columns      a      b      c
-       0         -inf   -inf   -inf
-       1        255.0  255.0  255.0
-    """
-
-    data, flags = concatFlags(
-        data, field, flags, target=target, method="match", squeeze=False
-    )
-    return data, flags
-
-
-@flagging()
-def propagateFlags(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    method: Literal["ffill", "bfill"] = "ffill",
-    flag: float = BAD,
-    dfilter: float = FILTER_ALL,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag values before or after flags set by the last test.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        A flags object, holding flags and additional informations related to `data`.
-    window : int, str
-        Size of the repetition window. An integer defines the exact number of repetitions,
-        strings are interpreted as time offsets to fill with .
-    method : {"ffill", "bfill"}
-        Direction of repetetion. With "ffill" the subsequent values receive the flag to
-        repeat, with "bfill" the previous values.
-    flag : float, default BAD
-        Flag to set.
-    dfilter : float, default FILTER_ALL
-        Threshold flag.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-
-    Examples
-    --------
-    First, generate some data and some flags:
-
-    .. doctest:: propagateFlags
-
-       >>> data = pd.DataFrame({"a": [-3, -2, -1, 0, 1, 2, 3]})
-       >>> flags = pd.DataFrame({"a": [-np.inf, -np.inf, -np.inf, 255.0, -np.inf, -np.inf, -np.inf]})
-       >>> qc = saqc.SaQC(data=data, flags=flags)
-       >>> qc.flags["a"]
-       0     -inf
-       1     -inf
-       2     -inf
-       3    255.0
-       4     -inf
-       5     -inf
-       6     -inf
-       Name: a, dtype: float64
-
-    Now, to repeat the flag '255.0' two times in direction of ascending indices, execute:
-
-    .. doctest:: propagateFlags
-
-       >>> qc.propagateFlags('a', window=2, method="ffill").flags["a"]
-       0     -inf
-       1     -inf
-       2     -inf
-       3    255.0
-       4    255.0
-       5    255.0
-       6     -inf
-       Name: a, dtype: float64
-
-    Choosing "bfill" will result in
-
-    .. doctest:: propagateFlags
-
-       >>> qc.propagateFlags('a', window=2, method="bfill").flags["a"]
-       0     -inf
-       1    255.0
-       2    255.0
-       3    255.0
-       4     -inf
-       5     -inf
-       6     -inf
-       Name: a, dtype: float64
-
-    If an explicit flag is passed, it will be used to fill the repetition window
-
-    .. doctest:: propagateFlags
-
-       >>> qc.propagateFlags('a', window=2, method="bfill", flag=111).flags["a"]
-       0     -inf
-       1    111.0
-       2    111.0
-       3    255.0
-       4     -inf
-       5     -inf
-       6     -inf
-       Name: a, dtype: float64
-    """
-
-    if method not in {"bfill", "ffill"}:
-        raise ValueError(f"supported methods are 'bfill', 'ffill', got '{method}'")
-
-    # get the last history column
-    hc = flags.history[field].hist.iloc[:, -1].astype(float)
-
-    if method == "bfill":
-        hc = hc[::-1]
-
-    flagged = _isflagged(hc, dfilter)
-
-    repeated = (
-        flagged.rolling(window, min_periods=1, closed="left")
-        .max()
-        .fillna(0)
-        .astype(bool)
-    )
+            raise ValueError(method)
+
+        mask = mdata == mflag
+        mask = mask.reindex(dat.index).fillna(False)
 
-    if method == "bfill":
-        repeated = repeated[::-1]
+        self._flags[mask, field] = flag
+        return self
 
-    flags[repeated, field] = flag
+    @register(
+        mask=[],
+        demask=[],
+        squeeze=["target"],
+        handles_target=True,  # function defines a target parameter, so it needs to handle it
+    )
+    def transferFlags(
+        self: "SaQC",
+        field: str,
+        target: str,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Transfer Flags of one variable to another.
+
+        Parameters
+        ----------
+
+        field : str or List of str
+           Variable or list of variables, the flags of which are to be transferred.
 
-    return data, flags
+        target : str or List of str
+            Variable or list of variables, the flags of `field` are to be transferred to.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        See Also
+        --------
+        * :py:meth:`saqc.SaQC.flagGeneric`
+        * :py:meth:`saqc.SaQC.concatFlags`
+
+        Examples
+        --------
+        First, generate some data with some flags:
+
+        .. doctest:: exampleTransfer
+
+           >>> data = pd.DataFrame({'a': [1, 2], 'b': [1, 2], 'c': [1, 2]})
+           >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagRange('a', max=1.5)
+           >>> qc.flags.to_df()
+           columns      a    b    c
+           0         -inf -inf -inf
+           1        255.0 -inf -inf
+
+        Now we can project the flag from `a` to `b` via
+
+        .. doctest:: exampleTransfer
+
+           >>> qc = qc.transferFlags('a', target='b')
+           >>> qc.flags.to_df()
+           columns      a      b    c
+           0         -inf   -inf -inf
+           1        255.0  255.0 -inf
+
+        You can skip the explicit target parameter designation:
+
+        .. doctest:: exampleTransfer
+
+           >>> qc = qc.transferFlags('a', 'b')
+
+        To project the flags of `a` to both the variables `b` and `c` in one call, align the field and target variables in
+        2 lists:
+
+        .. doctest:: exampleTransfer
+
+           >>> qc = qc.transferFlags(['a','a'], ['b', 'c'])
+           >>> qc.flags.to_df()
+           columns      a      b      c
+           0         -inf   -inf   -inf
+           1        255.0  255.0  255.0
+        """
+
+        return self.concatFlags(field, target=target, method="match", squeeze=False)
+
+    @flagging()
+    def propagateFlags(
+        self: "SaQC",
+        field: str,
+        window: Union[str, int],
+        method: Literal["ffill", "bfill"] = "ffill",
+        flag: float = BAD,
+        dfilter: float = FILTER_ALL,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag values before or after flags set by the last test.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        window : int, str
+            Size of the repetition window. An integer defines the exact number of repetitions,
+            strings are interpreted as time offsets to fill with .
+
+        method : {"ffill", "bfill"}
+            Direction of repetetion. With "ffill" the subsequent values receive the flag to
+            repeat, with "bfill" the previous values.
+
+        flag : float, default BAD
+            Flag to set.
+
+        dfilter : float, default FILTER_ALL
+            Threshold flag.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Examples
+        --------
+        First, generate some data and some flags:
+
+        .. doctest:: propagateFlags
+
+           >>> data = pd.DataFrame({"a": [-3, -2, -1, 0, 1, 2, 3]})
+           >>> flags = pd.DataFrame({"a": [-np.inf, -np.inf, -np.inf, 255.0, -np.inf, -np.inf, -np.inf]})
+           >>> qc = saqc.SaQC(data=data, flags=flags)
+           >>> qc.flags["a"]
+           0     -inf
+           1     -inf
+           2     -inf
+           3    255.0
+           4     -inf
+           5     -inf
+           6     -inf
+           Name: a, dtype: float64
+
+        Now, to repeat the flag '255.0' two times in direction of ascending indices, execute:
+
+        .. doctest:: propagateFlags
+
+           >>> qc.propagateFlags('a', window=2, method="ffill").flags["a"]
+           0     -inf
+           1     -inf
+           2     -inf
+           3    255.0
+           4    255.0
+           5    255.0
+           6     -inf
+           Name: a, dtype: float64
+
+        Choosing "bfill" will result in
+
+        .. doctest:: propagateFlags
+
+           >>> qc.propagateFlags('a', window=2, method="bfill").flags["a"]
+           0     -inf
+           1    255.0
+           2    255.0
+           3    255.0
+           4     -inf
+           5     -inf
+           6     -inf
+           Name: a, dtype: float64
+
+        If an explicit flag is passed, it will be used to fill the repetition window
+
+        .. doctest:: propagateFlags
+
+           >>> qc.propagateFlags('a', window=2, method="bfill", flag=111).flags["a"]
+           0     -inf
+           1    111.0
+           2    111.0
+           3    255.0
+           4     -inf
+           5     -inf
+           6     -inf
+           Name: a, dtype: float64
+        """
+
+        if method not in {"bfill", "ffill"}:
+            raise ValueError(f"supported methods are 'bfill', 'ffill', got '{method}'")
+
+        # get the last history column
+        hc = self._flags.history[field].hist.iloc[:, -1].astype(float)
+
+        if method == "bfill":
+            hc = hc[::-1]
+
+        # get dfilter from meta or get of rid of this and
+        # consider everything != np.nan as flag
+        flagged = _isflagged(hc, dfilter)
+
+        repeated = (
+            flagged.rolling(window, min_periods=1, closed="left")
+            .max()
+            .fillna(0)
+            .astype(bool)
+        )
+
+        if method == "bfill":
+            repeated = repeated[::-1]
+
+        self._flags[repeated, field] = flag
+
+        return self
diff --git a/saqc/funcs/generic.py b/saqc/funcs/generic.py
index 53b0d77c6e34dbe8905ff1ccf2f2c2771274048f..a4125acab22ab247d8a25404cd37f5266c66215e 100644
--- a/saqc/funcs/generic.py
+++ b/saqc/funcs/generic.py
@@ -7,7 +7,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Sequence, Tuple, Union
+from typing import TYPE_CHECKING, Sequence, Tuple, Union
 
 import numpy as np
 import pandas as pd
@@ -16,10 +16,13 @@ from dios import DictOfSeries
 from saqc.constants import BAD, ENVIRONMENT, FILTER_ALL
 from saqc.core.flags import Flags
 from saqc.core.history import History
-from saqc.core.register import FunctionWrapper, _isflagged, register
+from saqc.core.register import _isflagged, _maskData, register
 from saqc.lib.tools import toSequence
 from saqc.lib.types import GenericFunction, PandasLike
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
 
 def _flagSelect(field, flags, label=None):
     if label is None:
@@ -48,7 +51,7 @@ def _prepare(
     fchunk = Flags({f: flags[f] for f in columns})
     for f in fchunk.columns:
         fchunk.history[f] = flags.history[f]
-    dchunk, _ = FunctionWrapper._maskData(
+    dchunk, _ = _maskData(
         data=data.loc[:, columns].copy(), flags=fchunk, columns=columns, thresh=dfilter
     )
     return dchunk, fchunk.copy()
@@ -83,257 +86,248 @@ def _execGeneric(
     return DictOfSeries(out)
 
 
-@register(
-    mask=[],
-    demask=[],
-    squeeze=[],
-    multivariate=True,
-    handles_target=True,
-)
-def processGeneric(
-    data: DictOfSeries,
-    field: str | Sequence[str],
-    flags: Flags,
-    func: GenericFunction,
-    target: str | Sequence[str] | None = None,
-    dfilter: float = FILTER_ALL,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Generate/process data with user defined functions.
-
-    Formally, what the function does, is the following:
-
-    1.  Let F be a Callable, depending on fields f_1, f_2,...f_K, (F = F(f_1, f_2,...f_K))
-        Than, for every timestamp t_i that occurs in at least one of the timeseries data[f_j] (outer join),
-        The value v_i is computed via:
-        v_i = data([f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]), if all data[f_j][t_i] do exist
-        v_i = ``np.nan``, if at least one of the data[f_j][t_i] is missing.
-    2.  The result is stored to ``data[target]``, if ``target`` is given or to ``data[field]`` otherwise
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str or list of str
-        The variable(s) passed to func.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    func : callable
-        Function to call on the variables given in ``field``. The return value will be written
-        to ``target`` or ``field`` if the former is not given. This implies, that the function
-        needs to accept the same number of arguments (of type pandas.Series) as variables given
-        in ``field`` and should return an iterable of array-like objects with the same number
-        of elements as given in ``target`` (or ``field`` if ``target`` is not specified).
-    target: str or list of str
-        The variable(s) to write the result of ``func`` to. If not given, the variable(s)
-        specified in ``field`` will be overwritten. If a ``target`` is not given, it will be
-        created.
-    flag: float, default ``np.nan``
-        The quality flag to set. The default ``np.nan`` states the general idea, that
-        ``processGeneric`` generates 'new' data without any flags.
-    dfilter: float, default ``FILTER_ALL``
-        Threshold flag. Flag values greater than ``dfilter`` indicate that the associated
-        data value is inappropiate for further usage.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        The shape of the data may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        The flags shape may have changed relatively to the input flags.
-
-    Note
-    -----
-    All the numpy functions are available within the generic expressions.
-
-    Examples
-    --------
-    Compute the sum of the variables 'rainfall' and 'snowfall' and save the result to
-    a (new) variable 'precipitation'
-
-    >>> from saqc import SaQC
-    >>> qc = SaQC(pd.DataFrame({'rainfall':[1], 'snowfall':[2]}, index=pd.DatetimeIndex([0])))
-    >>> qc = qc.processGeneric(field=["rainfall", "snowfall"], target="precipitation", func=lambda x, y: x + y)
-    >>> qc.data.to_df()
-    columns     rainfall  snowfall  precipitation
-    1970-01-01         1         2              3
-    """
-
-    fields = toSequence(field)
-    targets = fields if target is None else toSequence(target)
-
-    dchunk, fchunk = _prepare(data, flags, fields, dfilter)
-    result = _execGeneric(fchunk, dchunk, func, dfilter=dfilter)
-
-    meta = {
-        "func": "procGeneric",
-        "args": (field, target),
-        "kwargs": {
-            "func": func.__name__,
-            "dfilter": dfilter,
-            **kwargs,
-        },
-    }
+class GenericMixin:
+    @register(
+        mask=[],
+        demask=[],
+        squeeze=[],
+        multivariate=True,
+        handles_target=True,
+    )
+    def processGeneric(
+        self: "SaQC",
+        field: str | Sequence[str],
+        func: GenericFunction,
+        target: str | Sequence[str] | None = None,
+        dfilter: float = FILTER_ALL,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Generate/process data with user defined functions.
+
+        Formally, what the function does, is the following:
+
+        1.  Let F be a Callable, depending on fields f_1, f_2,...f_K, (F = F(f_1, f_2,...f_K))
+            Than, for every timestamp t_i that occurs in at least one of the timeseries data[f_j] (outer join),
+            The value v_i is computed via:
+            v_i = data([f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]), if all data[f_j][t_i] do exist
+            v_i = ``np.nan``, if at least one of the data[f_j][t_i] is missing.
+        2.  The result is stored to ``data[target]``, if ``target`` is given or to ``data[field]`` otherwise
+
+        Parameters
+        ----------
+        field : str or list of str
+            The variable(s) passed to func.
+
+        func : callable
+            Function to call on the variables given in ``field``. The return value will be written
+            to ``target`` or ``field`` if the former is not given. This implies, that the function
+            needs to accept the same number of arguments (of type pandas.Series) as variables given
+            in ``field`` and should return an iterable of array-like objects with the same number
+            of elements as given in ``target`` (or ``field`` if ``target`` is not specified).
+
+        target: str or list of str
+            The variable(s) to write the result of ``func`` to. If not given, the variable(s)
+            specified in ``field`` will be overwritten. If a ``target`` is not given, it will be
+            created.
+
+        flag: float, default ``np.nan``
+            The quality flag to set. The default ``np.nan`` states the general idea, that
+            ``processGeneric`` generates 'new' data without any flags.
+
+        dfilter: float, default ``FILTER_ALL``
+            Threshold flag. Flag values greater than ``dfilter`` indicate that the associated
+            data value is inappropiate for further usage.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Note
+        -----
+        All the numpy functions are available within the generic expressions.
+
+        Examples
+        --------
+        Compute the sum of the variables 'rainfall' and 'snowfall' and save the result to
+        a (new) variable 'precipitation'
+
+        >>> from saqc import SaQC
+        >>> qc = SaQC(pd.DataFrame({'rainfall':[1], 'snowfall':[2]}, index=pd.DatetimeIndex([0])))
+        >>> qc = qc.processGeneric(field=["rainfall", "snowfall"], target="precipitation", func=lambda x, y: x + y)
+        >>> qc.data.to_df()
+        columns     rainfall  snowfall  precipitation
+        1970-01-01         1         2              3
+        """
+
+        fields = toSequence(field)
+        targets = fields if target is None else toSequence(target)
+
+        dchunk, fchunk = _prepare(self._data, self._flags, fields, dfilter)
+        result = _execGeneric(fchunk, dchunk, func, dfilter=dfilter)
+
+        meta = {
+            "func": "procGeneric",
+            "args": (field, target),
+            "kwargs": {
+                "func": func.__name__,
+                "dfilter": dfilter,
+                **kwargs,
+            },
+        }
+
+        # update data & flags
+        for i, col in enumerate(targets):
+
+            datacol = result.iloc[:, i]
+            self._data[col] = datacol
+
+            if col not in self._flags:
+                self._flags.history[col] = History(datacol.index)
+
+            if not self._flags[col].index.equals(datacol.index):
+                raise ValueError(
+                    f"cannot assign function result to the existing variable {repr(col)} "
+                    "because of incompatible indices, please choose another 'target'"
+                )
+
+            self._flags.history[col].append(
+                pd.Series(np.nan, index=datacol.index), meta
+            )
 
-    # update data & flags
-    for i, col in enumerate(targets):
+        return self
 
-        datacol = result.iloc[:, i]
-        data[col] = datacol
+    @register(
+        mask=[],
+        demask=[],
+        squeeze=[],
+        multivariate=True,
+        handles_target=True,
+    )
+    def flagGeneric(
+        self: "SaQC",
+        field: str | Sequence[str],
+        func: GenericFunction,
+        target: str | Sequence[str] | None = None,
+        flag: float = BAD,
+        dfilter: float = FILTER_ALL,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag data with user defined functions.
 
-        if col not in flags:
-            flags.history[col] = History(datacol.index)
+        Formally, what the function does, is the following:
+        Let X be a Callable, depending on fields f_1, f_2,...f_K, (X = X(f_1, f_2,...f_K))
+        Than for every timestamp t_i in data[field]:
+        data[field][t_i] is flagged if X(data[f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]) is True.
 
-        if not flags[col].index.equals(datacol.index):
-            raise ValueError(
-                f"cannot assign function result to the existing variable {repr(col)} "
-                "because of incompatible indices, please choose another 'target'"
-            )
+        Parameters
+        ----------
+        field : str or list of str
+            The variable(s) passed to func.
 
-        flags.history[col].append(pd.Series(np.nan, index=datacol.index), meta)
+        func : callable
+            Function to call on the variables given in ``field``. The function needs to accept the same
+            number of arguments (of type pandas.Series) as variables given in ``field`` and return an
+            iterable of array-like objects of with dtype bool and with the same number of elements as
+            given in ``target`` (or ``field`` if ``target`` is not specified). The function output
+            determines the values to flag.
 
-    return data, flags
+        target: str or list of str
+            The variable(s) to write the result of ``func`` to. If not given, the variable(s)
+            specified in ``field`` will be overwritten. If a ``target`` is not given, it will be
+            created.
 
+        flag: float, default ``BAD``
+            The quality flag to set. The default ``BAD`` states the general idea, that
+            ``processGeneric`` generates 'new' data without direct relation to the potentially
+            already present flags.
 
-@register(
-    mask=[],
-    demask=[],
-    squeeze=[],
-    multivariate=True,
-    handles_target=True,
-)
-def flagGeneric(
-    data: DictOfSeries,
-    field: Union[str, Sequence[str]],
-    flags: Flags,
-    func: GenericFunction,
-    target: Union[str, Sequence[str]] = None,
-    flag: float = BAD,
-    dfilter: float = FILTER_ALL,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag data with user defined functions.
+        dfilter: float, default ``FILTER_ALL``
+            Threshold flag. Flag values greater than ``dfilter`` indicate that the associated
+            data value is inappropiate for further usage.
 
-    Formally, what the function does, is the following:
-    Let X be a Callable, depending on fields f_1, f_2,...f_K, (X = X(f_1, f_2,...f_K))
-    Than for every timestamp t_i in data[field]:
-    data[field][t_i] is flagged if X(data[f_1][t_i], data[f_2][t_i], ..., data[f_K][t_i]) is True.
+        Returns
+        -------
+        saqc.SaQC
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str or list of str
-        The variable(s) passed to func.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    func : callable
-        Function to call on the variables given in ``field``. The function needs to accept the same
-        number of arguments (of type pandas.Series) as variables given in ``field`` and return an
-        iterable of array-like objects of with dtype bool and with the same number of elements as
-        given in ``target`` (or ``field`` if ``target`` is not specified). The function output
-        determines the values to flag.
-    target: str or list of str
-        The variable(s) to write the result of ``func`` to. If not given, the variable(s)
-        specified in ``field`` will be overwritten. If a ``target`` is not given, it will be
-        created.
-    flag: float, default ``BAD``
-        The quality flag to set. The default ``BAD`` states the general idea, that
-        ``processGeneric`` generates 'new' data without direct relation to the potentially
-        already present flags.
-    dfilter: float, default ``FILTER_ALL``
-        Threshold flag. Flag values greater than ``dfilter`` indicate that the associated
-        data value is inappropiate for further usage.
+        Note
+        -----
+        All the numpy functions are available within the generic expressions.
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed relatively to the flags input.
+        Examples
+        --------
 
-    Note
-    -----
-    All the numpy functions are available within the generic expressions.
+        .. testsetup:: exampleFlagGeneric
 
-    Examples
-    --------
+           qc = saqc.SaQC(pd.DataFrame({'temperature':[0], 'uncertainty':[0], 'rainfall':[0], 'fan':[0]}, index=pd.DatetimeIndex([0])))
 
-    .. testsetup:: exampleFlagGeneric
+        1. Flag the variable 'rainfall', if the sum of the variables 'temperature' and 'uncertainty' is below zero:
 
-       qc = saqc.SaQC(pd.DataFrame({'temperature':[0], 'uncertainty':[0], 'rainfall':[0], 'fan':[0]}, index=pd.DatetimeIndex([0])))
+        .. testcode:: exampleFlagGeneric
 
-    1. Flag the variable 'rainfall', if the sum of the variables 'temperature' and 'uncertainty' is below zero:
+           qc.flagGeneric(field=["temperature", "uncertainty"], target="rainfall", func= lambda x, y: x + y < 0)
 
-    .. testcode:: exampleFlagGeneric
+        2. Flag the variable 'temperature', where the variable 'fan' is flagged:
 
-       qc.flagGeneric(field=["temperature", "uncertainty"], target="rainfall", func= lambda x, y: x + y < 0)
+        .. testcode:: exampleFlagGeneric
 
-    2. Flag the variable 'temperature', where the variable 'fan' is flagged:
+           qc.flagGeneric(field="fan", target="temperature", func=lambda x: isflagged(x))
 
-    .. testcode:: exampleFlagGeneric
+        3. The generic functions also support all pandas and numpy functions:
 
-       qc.flagGeneric(field="fan", target="temperature", func=lambda x: isflagged(x))
+        .. testcode:: exampleFlagGeneric
 
-    3. The generic functions also support all pandas and numpy functions:
+           qc = qc.flagGeneric(field="fan", target="temperature", func=lambda x: np.sqrt(x) < 7)
+        """
 
-    .. testcode:: exampleFlagGeneric
+        fields = toSequence(field)
+        targets = fields if target is None else toSequence(target)
 
-       qc = qc.flagGeneric(field="fan", target="temperature", func=lambda x: np.sqrt(x) < 7)
-    """
+        dchunk, fchunk = _prepare(self._data, self._flags, fields, dfilter)
+        result = _execGeneric(fchunk, dchunk, func, dfilter=dfilter)
 
-    fields = toSequence(field)
-    targets = fields if target is None else toSequence(target)
+        if len(targets) != len(result.columns):
+            raise ValueError(
+                f"the generic function returned {len(result.columns)} field(s), but only {len(targets)} target(s) were given"
+            )
 
-    dchunk, fchunk = _prepare(data, flags, fields, dfilter)
-    result = _execGeneric(fchunk, dchunk, func, dfilter=dfilter)
+        if not result.empty and not (result.dtypes == bool).all():
+            raise TypeError(f"generic expression does not return a boolean array")
 
-    if len(targets) != len(result.columns):
-        raise ValueError(
-            f"the generic function returned {len(result.columns)} field(s), but only {len(targets)} target(s) were given"
-        )
-
-    if not result.empty and not (result.dtypes == bool).all():
-        raise TypeError(f"generic expression does not return a boolean array")
-
-    meta = {
-        "func": "flagGeneric",
-        "args": (field, target),
-        "kwargs": {
-            "func": func.__name__,
-            "flag": flag,
-            "dfilter": dfilter,
-            **kwargs,
-        },
-    }
+        meta = {
+            "func": "flagGeneric",
+            "args": (field, target),
+            "kwargs": {
+                "func": func.__name__,
+                "flag": flag,
+                "dfilter": dfilter,
+                **kwargs,
+            },
+        }
 
-    # update flags & data
-    for i, col in enumerate(targets):
+        # update flags & data
+        for i, col in enumerate(targets):
 
-        maskcol = result.iloc[:, i]
+            maskcol = result.iloc[:, i]
 
-        # make sure the column exists
-        if col not in flags:
-            flags.history[col] = History(maskcol.index)
+            # make sure the column exists
+            if col not in self._flags:
+                self._flags.history[col] = History(maskcol.index)
 
-        # dummy column to ensure consistency between flags and data
-        if col not in data:
-            data[col] = pd.Series(np.nan, index=maskcol.index)
+            # dummy column to ensure consistency between flags and data
+            if col not in self._data:
+                self._data[col] = pd.Series(np.nan, index=maskcol.index)
 
-        flagcol = maskcol.replace({False: np.nan, True: flag}).astype(float)
+            flagcol = maskcol.replace({False: np.nan, True: flag}).astype(float)
 
-        # we need equal indices to work on
-        if not flags[col].index.equals(maskcol.index):
-            raise ValueError(
-                f"cannot assign function result to the existing variable {repr(col)} "
-                "because of incompatible indices, please choose another 'target'"
-            )
+            # we need equal indices to work on
+            if not self._flags[col].index.equals(maskcol.index):
+                raise ValueError(
+                    f"cannot assign function result to the existing variable {repr(col)} "
+                    "because of incompatible indices, please choose another 'target'"
+                )
 
-        flags.history[col].append(flagcol, meta)
+            self._flags.history[col].append(flagcol, meta)
 
-    return data, flags
+        return self
diff --git a/saqc/funcs/interpolation.py b/saqc/funcs/interpolation.py
index 3a005a719f9c36172465bbe04619f5130e5f6d03..e254014291a9d5286eeedb95e38410ff85de0382 100644
--- a/saqc/funcs/interpolation.py
+++ b/saqc/funcs/interpolation.py
@@ -7,18 +7,20 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Callable, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Union
 
 import numpy as np
 import pandas as pd
 from typing_extensions import Literal
 
-from dios import DictOfSeries
 from saqc.constants import UNFLAGGED
-from saqc.core.flags import Flags
 from saqc.core.register import _isflagged, register
 from saqc.lib.ts_operators import interpolateNANs
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
 _SUPPORTED_METHODS = Literal[
     "linear",
     "time",
@@ -38,171 +40,6 @@ _SUPPORTED_METHODS = Literal[
 ]
 
 
-@register(
-    mask=["field"],
-    demask=["field"],
-    squeeze=[],  # func handles history by itself
-)
-def interpolateByRolling(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    func: Callable[[pd.Series], float] = np.median,
-    center: bool = True,
-    min_periods: int = 0,
-    flag: float = UNFLAGGED,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Interpolates nan-values in the data by assigning them the aggregation result of the window surrounding them.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Name of the column, holding the data-to-be-interpolated.
-
-    flags : saqc.Flags
-        A flags object, holding flags and additional Information related to `data`.
-
-    window : int, str
-        The size of the window, the aggregation is computed from. An integer define the number of periods to be used,
-        an string is interpreted as an offset. ( see `pandas.rolling` for more information).
-        Integer windows may result in screwed aggregations if called on none-harmonized or irregular data.
-
-    func : Callable
-        The function used for aggregation.
-
-    center : bool, default True
-        Center the window around the value. Can only be used with integer windows, otherwise it is silently ignored.
-
-    min_periods : int
-        Minimum number of valid (not np.nan) values that have to be available in a window for its aggregation to be
-        computed.
-
-    flag : float or None, default UNFLAGGED
-        Flag that is to be inserted for the interpolated values.
-        If `None` the old flags are kept, even if the data is valid now.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    datcol = data[field]
-    roller = datcol.rolling(window=window, center=center, min_periods=min_periods)
-    try:
-        func_name = func.__name__
-        if func_name[:3] == "nan":
-            func_name = func_name[3:]
-        rolled = getattr(roller, func_name)()
-    except AttributeError:
-        rolled = roller.apply(func)
-
-    na_mask = datcol.isna()
-    interpolated = na_mask & rolled.notna()
-    datcol[na_mask] = rolled[na_mask]
-    data[field] = datcol
-
-    new_col = pd.Series(np.nan, index=flags[field].index)
-    new_col.loc[interpolated] = np.nan if flag is None else flag
-
-    # todo kwargs must have all passed args except data,field,flags
-    flags.history[field].append(
-        new_col, {"func": "interpolateByRolling", "args": (), "kwargs": kwargs}
-    )
-
-    return data, flags
-
-
-@register(
-    mask=["field"],
-    demask=["field"],
-    squeeze=[],  # func handles history by itself
-)
-def interpolateInvalid(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    method: _SUPPORTED_METHODS,
-    order: int = 2,
-    limit: int = 2,
-    downgrade: bool = False,
-    flag: float = UNFLAGGED,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function to interpolate nan values in the data.
-
-    There are available all the interpolation methods from the pandas.interpolate method and they are applicable by
-    the very same key words, that you would pass to the ``pd.Series.interpolate``'s method parameter.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Name of the column, holding the data-to-be-interpolated.
-
-    flags : saqc.Flags
-        A flags object, holding flags and additional Information related to `data`.
-
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
-        The interpolation method to use.
-
-    order : int, default 2
-        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
-        order.
-
-    limit : int, default 2
-        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
-        interpolation to chunks, containing not more than `limit` successive nan entries.
-
-    flag : float or None, default UNFLAGGED
-        Flag that is set for interpolated values. If ``None``, no flags are set at all.
-
-    downgrade : bool, default False
-        If `True` and the interpolation can not be performed at current order, retry with a lower order.
-        This can happen, because the chosen ``method`` does not support the passed ``order``, or
-        simply because not enough values are present in a interval.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    inter_data = interpolateNANs(
-        data[field],
-        method,
-        order=order,
-        inter_limit=limit,
-        downgrade_interpolation=downgrade,
-    )
-
-    interpolated = data[field].isna() & inter_data.notna()
-    data[field] = inter_data
-    new_col = pd.Series(np.nan, index=flags[field].index)
-    new_col.loc[interpolated] = np.nan if flag is None else flag
-
-    # todo kwargs must have all passed args except data,field,flags
-    flags.history[field].append(
-        new_col, {"func": "interpolateInvalid", "args": (), "kwargs": kwargs}
-    )
-
-    return data, flags
-
-
 def _resampleOverlapping(data: pd.Series, freq: str, fill_value):
     """TODO: docstring needed"""
     dtype = data.dtype
@@ -214,111 +51,269 @@ def _resampleOverlapping(data: pd.Series, freq: str, fill_value):
     return data.fillna(fill_value).astype(dtype)
 
 
-@register(mask=["field"], demask=[], squeeze=[])
-def interpolateIndex(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    freq: str,
-    method: _SUPPORTED_METHODS,
-    order: int = 2,
-    limit: int = 2,
-    downgrade: bool = False,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
-
-    Note, that the interpolation will only be calculated, for grid timestamps that have a preceding AND a succeeding
-    valid data value within "freq" range.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data container.
-
-    field : str
-        Name of the column, holding the data-to-be-interpolated.
-
-    flags : saqc.Flags
-        A flags object, holding flags and additional Information related to `data`.
-
-    freq : str
-        An Offset String, interpreted as the frequency of
-        the grid you want to interpolate your data at.
-
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
-        The interpolation method you want to apply.
-
-    order : int, default 2
-        If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
-        order.
-
-    limit : int, default 2
-        Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
-        interpolation to chunks, containing not more than `limit` successive nan entries.
-
-    downgrade : bool, default False
-        If `True` and the interpolation can not be performed at current order, retry with a lower order.
-        This can happen, because the chosen ``method`` does not support the passed ``order``, or
-        simply because not enough values are present in a interval.
-
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-    if data[field].empty:
-        return data, flags
-
-    datcol = data[field].copy()
-
-    start, end = datcol.index[0].floor(freq), datcol.index[-1].ceil(freq)
-    grid_index = pd.date_range(start=start, end=end, freq=freq, name=datcol.index.name)
-
-    # todo:
-    #  in future we could use `register(mask=[field], [], [])`
-    #  and dont handle masking manually here
-    flagged = _isflagged(flags[field], kwargs["dfilter"])
-
-    # drop all points that hold no relevant grid information
-    datcol = datcol[~flagged].dropna()
-
-    # account for annoying case of subsequent frequency aligned values,
-    # that differ exactly by the margin of 2*freq
-    gaps = datcol.index[1:] - datcol.index[:-1] == 2 * pd.Timedelta(freq)
-    gaps = datcol.index[1:][gaps]
-    gaps = gaps.intersection(grid_index).shift(-1, freq)
-
-    # prepare grid interpolation:
-    datcol = datcol.reindex(datcol.index.union(grid_index))
-
-    # do the grid interpolation
-    inter_data = interpolateNANs(
-        data=datcol,
-        method=method,
-        order=order,
-        inter_limit=limit,
-        downgrade_interpolation=downgrade,
+class InterpolationMixin:
+    @register(
+        mask=["field"],
+        demask=["field"],
+        squeeze=[],  # func handles history by itself
     )
-
-    # override falsely interpolated values:
-    inter_data[gaps] = np.nan
-
-    # store interpolated grid
-    data[field] = inter_data[grid_index]
-
-    history = flags.history[field].apply(
-        index=data[field].index,
-        func=_resampleOverlapping,
-        func_kws=dict(freq=freq, fill_value=UNFLAGGED),
+    def interpolateByRolling(
+        self: "SaQC",
+        field: str,
+        window: Union[str, int],
+        func: Callable[[pd.Series], float] = np.median,
+        center: bool = True,
+        min_periods: int = 0,
+        flag: float = UNFLAGGED,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Interpolates nan-values in the data by assigning them the aggregation result of the window surrounding them.
+
+        Parameters
+        ----------
+        field : str
+            Name of the column, holding the data-to-be-interpolated.
+
+        window : int, str
+            The size of the window, the aggregation is computed from. An integer define the number of periods to be used,
+            an string is interpreted as an offset. ( see `pandas.rolling` for more information).
+            Integer windows may result in screwed aggregations if called on none-harmonized or irregular data.
+
+        func : Callable
+            The function used for aggregation.
+
+        center : bool, default True
+            Center the window around the value. Can only be used with integer windows, otherwise it is silently ignored.
+
+        min_periods : int
+            Minimum number of valid (not np.nan) values that have to be available in a window for its aggregation to be
+            computed.
+
+        flag : float or None, default UNFLAGGED
+            Flag that is to be inserted for the interpolated values.
+            If `None` the old flags are kept, even if the data is valid now.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        datcol = self._data[field]
+        roller = datcol.rolling(window=window, center=center, min_periods=min_periods)
+        try:
+            func_name = func.__name__
+            if func_name[:3] == "nan":
+                func_name = func_name[3:]
+            rolled = getattr(roller, func_name)()
+        except AttributeError:
+            rolled = roller.apply(func)
+
+        na_mask = datcol.isna()
+        interpolated = na_mask & rolled.notna()
+        datcol[na_mask] = rolled[na_mask]
+        self._data[field] = datcol
+
+        flagcol = pd.Series(np.nan, index=self._flags[field].index)
+        flagcol.loc[interpolated] = np.nan if flag is None else flag
+
+        # todo kwargs must have all passed args except data,field,flags
+        meta = {
+            "func": "interpolateByRolling",
+            "args": (field,),
+            "kwargs": {
+                "window": window,
+                "func": func,
+                "center": center,
+                "min_periods": min_periods,
+                "flag": flag,
+                **kwargs,
+            },
+        }
+        self._flags.history[field].append(flagcol, meta)
+
+        return self
+
+    @register(
+        mask=["field"],
+        demask=["field"],
+        squeeze=[],  # func handles history by itself
     )
-
-    flags.history[field] = history
-    return data, flags
+    def interpolateInvalid(
+        self: "SaQC",
+        field: str,
+        method: _SUPPORTED_METHODS,
+        order: int = 2,
+        limit: int = 2,
+        downgrade: bool = False,
+        flag: float = UNFLAGGED,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function to interpolate nan values in the data.
+
+        There are available all the interpolation methods from the pandas.interpolate method and they are applicable by
+        the very same key words, that you would pass to the ``pd.Series.interpolate``'s method parameter.
+
+        Parameters
+        ----------
+        field : str
+            Name of the column, holding the data-to-be-interpolated.
+
+        method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+            "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
+            The interpolation method to use.
+
+        order : int, default 2
+            If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
+            order.
+
+        limit : int, default 2
+            Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
+            interpolation to chunks, containing not more than `limit` successive nan entries.
+
+        flag : float or None, default UNFLAGGED
+            Flag that is set for interpolated values. If ``None``, no flags are set at all.
+
+        downgrade : bool, default False
+            If `True` and the interpolation can not be performed at current order, retry with a lower order.
+            This can happen, because the chosen ``method`` does not support the passed ``order``, or
+            simply because not enough values are present in a interval.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        inter_data = interpolateNANs(
+            self._data[field],
+            method,
+            order=order,
+            inter_limit=limit,
+            downgrade_interpolation=downgrade,
+        )
+
+        interpolated = self._data[field].isna() & inter_data.notna()
+        self._data[field] = inter_data
+        new_col = pd.Series(np.nan, index=self._flags[field].index)
+        new_col.loc[interpolated] = np.nan if flag is None else flag
+
+        # todo kwargs must have all passed args except data,field,flags
+        self._flags.history[field].append(
+            new_col, {"func": "interpolateInvalid", "args": (), "kwargs": kwargs}
+        )
+
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def interpolateIndex(
+        self: "SaQC",
+        field: str,
+        freq: str,
+        method: _SUPPORTED_METHODS,
+        order: int = 2,
+        limit: int = 2,
+        downgrade: bool = False,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function to interpolate the data at regular (equidistant) timestamps (or Grid points).
+
+        Note, that the interpolation will only be calculated, for grid timestamps that have a preceding AND a succeeding
+        valid data value within "freq" range.
+
+        Parameters
+        ----------
+        field : str
+            Name of the column, holding the data-to-be-interpolated.
+
+        freq : str
+            An Offset String, interpreted as the frequency of
+            the grid you want to interpolate your data at.
+
+        method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+            "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}: string
+            The interpolation method you want to apply.
+
+        order : int, default 2
+            If there your selected interpolation method can be performed at different 'orders' - here you pass the desired
+            order.
+
+        limit : int, default 2
+            Maximum number of consecutive 'nan' values allowed for a gap to be interpolated. This really restricts the
+            interpolation to chunks, containing not more than `limit` successive nan entries.
+
+        downgrade : bool, default False
+            If `True` and the interpolation can not be performed at current order, retry with a lower order.
+            This can happen, because the chosen ``method`` does not support the passed ``order``, or
+            simply because not enough values are present in a interval.
+
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        if self._data[field].empty:
+            return self
+
+        datcol = self._data[field].copy()
+
+        start, end = datcol.index[0].floor(freq), datcol.index[-1].ceil(freq)
+        grid_index = pd.date_range(
+            start=start, end=end, freq=freq, name=datcol.index.name
+        )
+
+        # TODO:
+        # in future we could use `register(mask=[field], [], [])`
+        # and dont handle masking manually here
+        flagged = _isflagged(self._flags[field], kwargs["dfilter"])
+
+        # drop all points that hold no relevant grid information
+        datcol = datcol[~flagged].dropna()
+
+        # account for annoying case of subsequent frequency aligned values,
+        # that differ exactly by the margin of 2*freq
+        gaps = datcol.index[1:] - datcol.index[:-1] == 2 * pd.Timedelta(freq)
+        gaps = datcol.index[1:][gaps]
+        gaps = gaps.intersection(grid_index).shift(-1, freq)
+
+        # prepare grid interpolation:
+        datcol = datcol.reindex(datcol.index.union(grid_index))
+
+        # do the grid interpolation
+        inter_data = interpolateNANs(
+            data=datcol,
+            method=method,
+            order=order,
+            inter_limit=limit,
+            downgrade_interpolation=downgrade,
+        )
+
+        # override falsely interpolated values:
+        inter_data[gaps] = np.nan
+
+        # store interpolated grid
+        self._data[field] = inter_data[grid_index]
+
+        history = self._flags.history[field].apply(
+            index=self._data[field].index,
+            func=_resampleOverlapping,
+            func_kws=dict(freq=freq, fill_value=np.nan),
+        )
+
+        meta = {
+            "func": "interpolateIndex",
+            "args": (field,),
+            "kwargs": {
+                "freq": freq,
+                "method": method,
+                "order": order,
+                "limit": limit,
+                "downgrade": downgrade,
+                **kwargs,
+            },
+        }
+        flagcol = pd.Series(UNFLAGGED, index=history.index)
+        history.append(flagcol, meta)
+
+        self._flags.history[field] = history
+
+        return self
diff --git a/saqc/funcs/noise.py b/saqc/funcs/noise.py
index 6fe5fb8c2a8c7a2b5e627e54dee9ac0687030ce9..8945f22332a763794d6beb7c4179ac34767df7dd 100644
--- a/saqc/funcs/noise.py
+++ b/saqc/funcs/noise.py
@@ -8,77 +8,89 @@
 from __future__ import annotations
 
 import operator
-from typing import Callable
+from typing import TYPE_CHECKING, Callable
 
 import numpy as np
 import pandas as pd
 
-from dios import DictOfSeries
 from saqc.constants import BAD
-from saqc.core.flags import Flags
 from saqc.core.register import flagging
 from saqc.lib.tools import statPass
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
-@flagging()
-def flagByStatLowPass(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    func: Callable[[np.ndarray, pd.Series], float],
-    window: str | pd.Timedelta,
-    thresh: float,
-    sub_window: str | pd.Timedelta = None,
-    sub_thresh: float = None,
-    min_periods: int = None,
-    flag: float = BAD,
-    **kwargs,
-):
-    """
-    Flag *chunks* of length, `window`:
-
-    1. If they excexceed `thresh` with regard to `stat`:
-    2. If all (maybe overlapping) *sub-chunks* of *chunk*, with length `sub_window`,
-       `excexceed `sub_thresh` with regard to `stat`:
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-    func: Callable[[np.array, pd.Series], float]
-        Function to aggregate chunk contnent with.
-    window: str
-        Temporal extension of the chunks to test
-    thresh: float
-        Threshold, that triggers flagging, if exceeded by stat value.
-    sub_window: str, default None,
-        Window size of the sub chunks, that are additionally tested for exceeding
-        `sub_thresh` with respect to `stat`.
-    sub_thresh: float, default None
-    min_periods: int, default None
-    flag : float, default BAD
-        flag to set
-
-    Returns
-    -------
-    """
-
-    datcol = data[field]
-    if not min_periods:
-        min_periods = 0
-    if not sub_thresh:
-        sub_thresh = thresh
-    window = pd.Timedelta(window)
-
-    if sub_window:
-        sub_window = pd.Timedelta(sub_window)
-
-    to_set = statPass(
-        datcol, func, window, thresh, operator.gt, sub_window, sub_thresh, min_periods
-    )
-    flags[to_set, field] = flag
-    return data, flags
+
+class NoiseMixin:
+    @flagging()
+    def flagByStatLowPass(
+        self: "SaQC",
+        field: str,
+        func: Callable[[np.ndarray, pd.Series], float],
+        window: str | pd.Timedelta,
+        thresh: float,
+        sub_window: str | pd.Timedelta | None = None,
+        sub_thresh: float | None = None,
+        min_periods: int | None = None,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag *chunks* of length, `window`:
+
+        1. If they excexceed `thresh` with regard to `stat`:
+        2. If all (maybe overlapping) *sub-chunks* of *chunk*, with length `sub_window`,
+           `excexceed `sub_thresh` with regard to `stat`:
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        func: Callable[[np.array, pd.Series], float]
+            Function to aggregate chunk contnent with.
+
+        window: str
+            Temporal extension of the chunks to test
+
+        thresh: float
+            Threshold, that triggers flagging, if exceeded by stat value.
+
+        sub_window: str, default None,
+            Window size of the sub chunks, that are additionally tested for exceeding
+            `sub_thresh` with respect to `stat`.
+
+        sub_thresh: float, default None
+
+        min_periods: int, default None
+
+        flag : float, default BAD
+            flag to set
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+
+        datcol = self._data[field]
+        if not min_periods:
+            min_periods = 0
+        if not sub_thresh:
+            sub_thresh = thresh
+        window = pd.Timedelta(window)
+
+        if sub_window is not None:
+            sub_window = pd.Timedelta(sub_window)
+
+        to_set = statPass(
+            datcol,
+            func,
+            window,
+            thresh,
+            operator.gt,
+            sub_window,
+            sub_thresh,
+            min_periods,
+        )
+        self._flags[to_set, field] = flag
+        return self
diff --git a/saqc/funcs/outliers.py b/saqc/funcs/outliers.py
index f7d137dcdd9ca75060c6bbde6e78b2818376ae79..7e0575e213f50cf1fcceb36a7c40abb3e5880382 100644
--- a/saqc/funcs/outliers.py
+++ b/saqc/funcs/outliers.py
@@ -9,1283 +9,1288 @@
 from __future__ import annotations
 
 import uuid
-from typing import Callable, Optional, Sequence, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Optional, Sequence, Tuple, Union
 
 import numba
 import numpy as np
 import numpy.polynomial.polynomial as poly
 import pandas as pd
 from outliers import smirnov_grubbs
+from scipy.stats import median_abs_deviation
 from typing_extensions import Literal
 
 from dios import DictOfSeries
 from saqc.constants import BAD, UNFLAGGED
 from saqc.core.flags import Flags
 from saqc.core.register import flagging, register
-from saqc.funcs.scores import assignKNNScore
-from saqc.funcs.tools import copyField, dropField
-from saqc.funcs.transformation import transform
+from saqc.funcs.scores import _univarScoring
 from saqc.lib.tools import customRoller, getFreqDelta, toSequence
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
-@flagging()
-def flagByStray(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Optional[Union[int, str]] = None,
-    min_periods: int = 11,
-    iter_start: float = 0.5,
-    alpha: float = 0.05,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Flag outliers in 1-dimensional (score) data with the STRAY Algorithm.
-
-    Find more information on the algorithm in References [1].
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-
-    freq : str, int, or None, default None
-        Determines the segmentation of the data into partitions, the kNN algorithm is
-        applied onto individually.
-
-        * ``np.inf``: Apply Scoring on whole data set at once
-        * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
-        * Offset String : Apply scoring on successive partitions of temporal extension
-          matching the passed offset string
-
-    min_periods : int, default 11
-        Minimum number of periods per partition that have to be present for a valid
-        outlier dettection to be made in this partition. (Only of effect, if `freq`
-        is an integer.) Partition min value must always be greater then the
-        nn_neighbors value.
-
-    iter_start : float, default 0.5
-        Float in [0,1] that determines which percentage of data is considered
-        "normal". 0.5 results in the stray algorithm to search only the upper 50 % of
-        the scores for the cut off point. (See reference section for more information)
-
-    alpha : float, default 0.05
-        Level of significance by which it is tested, if a score might be drawn from
-        another distribution, than the majority of the data.
-
-    flag : float, default BAD
-        flag to set.
-
-    References
-    ----------
-    [1] Talagala, P. D., Hyndman, R. J., & Smith-Miles, K. (2019). Anomaly detection in
-        high dimensional data. arXiv preprint arXiv:1908.04000.
-    """
-    scores = data[field].dropna()
-
-    if scores.empty:
-        return data, flags
-
-    if not window:
-        window = scores.shape[0]
-
-    if isinstance(window, str):
-        partitions = scores.groupby(pd.Grouper(freq=window))
-
-    else:
-        grouper_series = pd.Series(
-            data=np.arange(0, scores.shape[0]), index=scores.index
-        )
-        grouper_series = grouper_series.transform(lambda x: int(np.floor(x / window)))
-        partitions = scores.groupby(grouper_series)
-
-    # calculate flags for every partition
-    for _, partition in partitions:
-
-        if partition.empty | (partition.shape[0] < min_periods):
-            continue
-
-        sample_size = partition.shape[0]
-
-        sorted_i = partition.values.argsort()
-        resids = partition.values[sorted_i]
-        gaps = np.append(0, np.diff(resids))
-
-        tail_size = int(max(min(50, np.floor(sample_size / 4)), 2))
-        tail_indices = np.arange(2, tail_size + 1)
-
-        i_start = int(max(np.floor(sample_size * iter_start), 1) + 1)
-        ghat = np.array([np.nan] * sample_size)
-
-        for i in range(i_start - 1, sample_size):
-            ghat[i] = sum((tail_indices / (tail_size - 1)) * gaps[i - tail_indices + 1])
-
-        log_alpha = np.log(1 / alpha)
-        for iter_index in range(i_start - 1, sample_size):
-            if gaps[iter_index] > log_alpha * ghat[iter_index]:
-                index = partition.index[sorted_i[iter_index:]]
-                flags[index, field] = flag
-                break
-
-    return data, flags
-
-
-def _evalStrayLabels(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    target: Sequence[str],
-    reduction_range: Optional[str] = None,
-    reduction_drop_flagged: bool = False,  # TODO: still a case ?
-    reduction_thresh: float = 3.5,
-    reduction_min_periods: int = 1,
-    at_least_one: bool = True,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The function "reduces" an observations flag to components of it, by applying MAD
-    (See references) test onto every components temporal surrounding.
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
+class OutliersMixin:
+    @flagging()
+    def flagRange(
+        self: "SaQC",
+        field: str,
+        min: float = -np.inf,
+        max: float = np.inf,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function flags values exceeding the closed interval [`min`, `max`].
+
+        Parameters
+        ----------
+        field : str
+            The field name of the column, holding the data-to-be-flagged.
+        min : float
+            Lower bound for valid data.
+        max : float
+            Upper bound for valid data.
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        flags : saqc.Flags
+            The quality flags of data
+        """
+
+        # using .values is much faster
+        datacol = self._data[field].to_numpy()
+        mask = (datacol < min) | (datacol > max)
+        self._flags[mask, field] = flag
+        return self
+
+    @flagging()
+    def flagByStray(
+        self: "SaQC",
+        field: str,
+        window: int | str | None = None,
+        min_periods: int = 11,
+        iter_start: float = 0.5,
+        alpha: float = 0.05,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag outliers in 1-dimensional (score) data with the STRAY Algorithm.
 
-    field : str
-        The fieldname of the column, holding the labels to be evaluated.
+        Find more information on the algorithm in References [1].
 
-    flags : saqc.Flags
-        Container to store quality flags to data.
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
 
-    target : list of str
-        A list of strings, holding the column names of the variables, the stray labels
-        shall be projected onto.
+        freq : str, int, or None, default None
+            Determines the segmentation of the data into partitions, the kNN algorithm is
+            applied onto individually.
 
-    val_frame : (N,M) pd.DataFrame
-        Input NxM DataFrame of observations, where N is the number of observations and
-        M the number of components per observation.
+            * ``np.inf``: Apply Scoring on whole data set at once
+            * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
+            * Offset String : Apply scoring on successive partitions of temporal extension
+              matching the passed offset string
 
-    to_flag_frame : pandas.DataFrame
-        Input dataframe of observations to be tested, where N is the number of
-        observations and M the number of components per observation.
+        min_periods : int, default 11
+            Minimum number of periods per partition that have to be present for a valid
+            outlier dettection to be made in this partition. (Only of effect, if `freq`
+            is an integer.) Partition min value must always be greater then the
+            nn_neighbors value.
 
-    reduction_range : {None, str}
-        An offset string, denoting the range of the temporal surrounding to include
-        into the MAD testing. If ``None`` is passed, no testing will be performed and
-        all targets will have the stray flag projected.
+        iter_start : float, default 0.5
+            Float in [0,1] that determines which percentage of data is considered
+            "normal". 0.5 results in the stray algorithm to search only the upper 50 % of
+            the scores for the cut off point. (See reference section for more information)
 
-    reduction_drop_flagged : bool, default False
-        Wheather or not to drop flagged values other than the value under test, from the
-        temporal surrounding before checking the value with MAD.
+        alpha : float, default 0.05
+            Level of significance by which it is tested, if a score might be drawn from
+            another distribution, than the majority of the data.
 
-    reduction_thresh : float, default 3.5
-        The `critical` value, controlling wheather the MAD score is considered
-        referring to an outlier or not. Higher values result in less rigid flagging.
-        The default value is widely used in the literature. See references section
-        for more details ([1]).
+        flag : float, default BAD
+            flag to set.
 
-    at_least_one : bool, default True
-        If none of the variables, the outlier label shall be reduced to, is an outlier
-        with regard to the test, all (True) or none (False) of the variables are flagged
+        Returns
+        -------
+        saqc.SaQC
 
-    flag : float, default BAD
-        flag to set.
+        References
+        ----------
+        [1] Talagala, P. D., Hyndman, R. J., & Smith-Miles, K. (2019). Anomaly detection in
+            high dimensional data. arXiv preprint arXiv:1908.04000.
+        """
+        scores = self._data[field].dropna()
 
-    References
-    ----------
-    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-    """
-    val_frame = data[target].to_df()
-    stray_detects = flags[field] > UNFLAGGED
-    stray_detects = stray_detects[stray_detects]
-    to_flag_frame = pd.DataFrame(False, columns=target, index=stray_detects.index)
+        if scores.empty:
+            return self
 
-    if reduction_range is None:
-        for field in to_flag_frame.columns:
-            flags[to_flag_frame.index, field] = flag
-        return data, flags
+        if not window:
+            window = scores.shape[0]
 
-    for var in target:
-        for index in enumerate(to_flag_frame.index):
+        if isinstance(window, str):
+            partitions = scores.groupby(pd.Grouper(freq=window))
 
-            index_slice = slice(
-                index[1] - pd.Timedelta(reduction_range),
-                index[1] + pd.Timedelta(reduction_range),
+        else:
+            grouper_series = pd.Series(
+                data=np.arange(0, scores.shape[0]), index=scores.index
             )
-            test_slice = val_frame[var][index_slice].dropna()
-
-            # check, wheather value under test is sufficiently centered:
-            first = test_slice.first_valid_index()
-            last = test_slice.last_valid_index()
-            min_range = pd.Timedelta(reduction_range) / 4
-
-            if (
-                pd.Timedelta(index[1] - first) < min_range
-                or pd.Timedelta(last - index[1]) < min_range
-            ):
-                polydeg = 0
-            else:
-                polydeg = 2
+            grouper_series = grouper_series.transform(
+                lambda x: int(np.floor(x / window))
+            )
+            partitions = scores.groupby(grouper_series)
 
-            if reduction_drop_flagged:
-                test_slice = test_slice.drop(to_flag_frame.index, errors="ignore")
+        # calculate flags for every partition
+        for _, partition in partitions:
 
-            if test_slice.shape[0] < reduction_min_periods:
-                to_flag_frame.loc[index[1], var] = True
+            if partition.empty | (partition.shape[0] < min_periods):
                 continue
 
-            x = test_slice.index.values.astype(float)
-            x_0 = x[0]
-            x = (x - x_0) / 10**12
+            sample_size = partition.shape[0]
 
-            polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
+            sorted_i = partition.values.argsort()
+            resids = partition.values[sorted_i]
+            gaps = np.append(0, np.diff(resids))
 
-            testval = poly.polyval(
-                (float(index[1].to_numpy()) - x_0) / 10**12, polyfitted
-            )
-            testval = val_frame[var][index[1]] - testval
+            tail_size = int(max(min(50, np.floor(sample_size / 4)), 2))
+            tail_indices = np.arange(2, tail_size + 1)
 
-            resids = test_slice.values - poly.polyval(x, polyfitted)
-            med_resids = np.median(resids)
-            MAD = np.median(np.abs(resids - med_resids))
-            crit_val = 0.6745 * (abs(med_resids - testval)) / MAD
+            i_start = int(max(np.floor(sample_size * iter_start), 1) + 1)
+            ghat = np.array([np.nan] * sample_size)
 
-            if crit_val > reduction_thresh:
-                to_flag_frame.loc[index[1], var] = True
-
-    if at_least_one:
-        to_flag_frame[~to_flag_frame.any(axis=1)] = True
-
-    for field in to_flag_frame.columns:
-        col = to_flag_frame[field]
-        flags[col[col].index, field] = flag
+            for i in range(i_start - 1, sample_size):
+                ghat[i] = sum(
+                    (tail_indices / (tail_size - 1)) * gaps[i - tail_indices + 1]
+                )
 
-    return data, flags
+            log_alpha = np.log(1 / alpha)
+            for iter_index in range(i_start - 1, sample_size):
+                if gaps[iter_index] > log_alpha * ghat[iter_index]:
+                    index = partition.index[sorted_i[iter_index:]]
+                    self._flags[index, field] = flag
+                    break
+
+        return self
+
+    @register(
+        mask=["field"],
+        demask=["field"],
+        squeeze=["field"],
+        multivariate=True,
+        handles_target=False,
+    )
+    def flagMVScores(
+        self: "SaQC",
+        field: Sequence[str],
+        trafo: Callable[[pd.Series], pd.Series] = lambda x: x,
+        alpha: float = 0.05,
+        n: int = 10,
+        func: Callable[[pd.Series], float] = np.sum,
+        iter_start: float = 0.5,
+        partition: Optional[Union[int, str]] = None,
+        partition_min: int = 11,
+        stray_range: Optional[str] = None,
+        drop_flagged: bool = False,  # TODO: still a case ?
+        thresh: float = 3.5,
+        min_periods: int = 1,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The algorithm implements a 3-step outlier detection procedure for simultaneously
+        flagging of higher dimensional data (dimensions > 3).
+
+        In references [1], the procedure is introduced and exemplified with an
+        application on hydrological data. See the notes section for an overview over the
+        algorithms basic steps.
+
+        Parameters
+        ----------
+        field : list of str
+            List of fieldnames, corresponding to the variables that are to be included
+            into the flagging process.
+
+        trafo : callable, default lambda x:x
+            Transformation to be applied onto every column before scoring. Will likely
+            get deprecated soon. Its better to transform the data in a processing step,
+            preceeeding the call to ``flagMVScores``.
+
+        alpha : float, default 0.05
+            Level of significance by which it is tested, if an observations score might
+            be drawn from another distribution than the majority of the observation.
+
+        n : int, default 10
+            Number of neighbors included in the scoring process for every datapoint.
+
+        func : Callable[numpy.array, float], default np.sum
+            The function that maps the set of every points k-nearest neighbor distances
+            onto a certain scoring.
+
+        iter_start : float, default 0.5
+            Float in [0,1] that determines which percentage of data is considered
+            "normal". 0.5 results in the threshing algorithm to search only the upper 50
+            % of the scores for the cut off point. (See reference section for more
+            information)
+
+        partition : {None, str, int}, default None
+            Only effective when `threshing` = 'stray'. Determines the size of the data
+            partitions, the data is decomposed into. Each partition is checked seperately
+            for outliers. If a String is passed, it has to be an offset string and it
+            results in partitioning the data into parts of according temporal length. If
+            an integer is passed, the data is simply split up into continous chunks of
+            `freq` periods. if ``None`` is passed (default), all the data will be tested
+            in one run.
+
+        partition_min : int, default 11
+            Only effective when `threshing` = 'stray'. Minimum number of periods per
+            partition that have to be present for a valid outlier detection to be made in
+            this partition. (Only of effect, if `stray_partition` is an integer.)
+
+        partition_trafo : bool, default True
+            Whether or not to apply the passed transformation on every partition the
+            algorithm is applied on, separately.
+
+        stray_range : {None, str}, default None
+            If not None, it is tried to reduce the stray result onto single outlier
+            components of the input fields. An offset string, denoting the range of the
+            temporal surrounding to include into the MAD testing while trying to reduce
+            flags.
+
+        drop_flagged : bool, default False
+            Only effective when `range` is not ``None``. Whether or not to drop flagged
+            values other than the value under test from the temporal surrounding before
+            checking the value with MAD.
+
+        thresh : float, default 3.5
+            Only effective when `range` is not ``None``. The `critical` value,
+            controlling wheather the MAD score is considered referring to an outlier or
+            not. Higher values result in less rigid flagging. The default value is widely
+            considered apropriate in the literature.
+
+        min_periods : int, 1
+            Only effective when `range` is not ``None``. Minimum number of meassurements
+            necessarily present in a reduction interval for reduction actually to be
+            performed.
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        The basic steps are:
+
+        1. transforming
+
+        The different data columns are transformed via timeseries transformations to
+        (a) make them comparable and
+        (b) make outliers more stand out.
+
+        This step is usually subject to a phase of research/try and error. See [1] for more
+        details.
+
+        Note, that the data transformation as an built-in step of the algorithm,
+        will likely get deprecated soon. Its better to transform the data in a processing
+        step, preceeding the multivariate flagging process. Also, by doing so, one gets
+        mutch more control and variety in the transformation applied, since the `trafo`
+        parameter only allows for application of the same transformation to all of the
+        variables involved.
+
+        2. scoring
+
+        Every observation gets assigned a score depending on its k nearest neighbors. See
+        the `scoring_method` parameter description for details on the different scoring
+        methods. Furthermore [1], [2] may give some insight in the pro and cons of the
+        different methods.
+
+        3. threshing
+
+        The gaps between the (greatest) scores are tested for beeing drawn from the same
+        distribution as the majority of the scores. If a gap is encountered, that,
+        with sufficient significance, can be said to not be drawn from the same
+        distribution as the one all the smaller gaps are drawn from, than the observation
+        belonging to this gap, and all the observations belonging to gaps larger then
+        this gap, get flagged outliers. See description of the `threshing` parameter for
+        more details. Although [2] gives a fully detailed overview over the `stray`
+        algorithm.
+        """
+
+        fields = toSequence(field)
+
+        fields_ = []
+        for f in fields:
+            field_ = str(uuid.uuid4())
+            self = self.copyField(field=f, target=field_)
+            self = self.transform(field=field_, func=trafo, freq=partition)
+            fields_.append(field_)
+
+        knn_field = str(uuid.uuid4())
+        self = self.assignKNNScore(
+            field=fields_,
+            target=knn_field,
+            n=n,
+            func=func,
+            freq=partition,
+            method="ball_tree",
+            min_periods=partition_min,
+            **kwargs,
+        )
+        for field_ in fields_:
+            self = self.dropField(field_)
+
+        self = self.flagByStray(
+            field=knn_field,
+            freq=partition,
+            min_periods=partition_min,
+            iter_start=iter_start,
+            alpha=alpha,
+            flag=flag,
+            **kwargs,
+        )
 
+        self._data, self._flags = _evalStrayLabels(
+            data=self._data,
+            field=knn_field,
+            target=fields,
+            flags=self._flags,
+            reduction_range=stray_range,
+            reduction_drop_flagged=drop_flagged,
+            reduction_thresh=thresh,
+            reduction_min_periods=min_periods,
+            flag=flag,
+            **kwargs,
+        )
+        return self.dropField(knn_field)
+
+    @flagging()
+    def flagRaise(
+        self: "SaQC",
+        field: str,
+        thresh: float,
+        raise_window: str,
+        freq: str,
+        average_window: Optional[str] = None,
+        raise_factor: float = 2.0,
+        slope: Optional[float] = None,
+        weight: float = 0.8,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The function flags raises and drops in value courses, that exceed a certain threshold
+        within a certain timespan.
 
-@register(
-    mask=["field"],
-    demask=["field"],
-    squeeze=["field"],
-    multivariate=True,
-    handles_target=False,
-)
-def flagMVScores(
-    data: DictOfSeries,
-    field: Sequence[str],
-    flags: Flags,
-    trafo: Callable[[pd.Series], pd.Series] = lambda x: x,
-    alpha: float = 0.05,
-    n: int = 10,
-    func: Callable[[pd.Series], float] = np.sum,
-    iter_start: float = 0.5,
-    partition: Optional[Union[int, str]] = None,
-    partition_min: int = 11,
-    stray_range: Optional[str] = None,
-    drop_flagged: bool = False,  # TODO: still a case ?
-    thresh: float = 3.5,
-    min_periods: int = 1,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The algorithm implements a 3-step outlier detection procedure for simultaneously
-    flagging of higher dimensional data (dimensions > 3).
+        The parameter variety of the function is owned to the intriguing
+        case of values, that "return" from outlierish or anomalious value levels and
+        thus exceed the threshold, while actually being usual values.
 
-    In references [1], the procedure is introduced and exemplified with an
-    application on hydrological data. See the notes section for an overview over the
-    algorithms basic steps.
+        NOTE: the dataset is NOT supposed to be harmonized to a time series with an
+        equidistant frequency grid.
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
 
-    field : list of str
-        List of fieldnames, corresponding to the variables that are to be included
-        into the flagging process.
+        thresh : float
+            The threshold, for the total rise (thresh > 0), or total drop (thresh < 0),
+            value courses must not exceed within a timespan of length `raise_window`.
 
-    flags : saqc.Flags
-        Container to store quality flags to data.
+        raise_window : str
+            An offset string, determining the timespan, the rise/drop thresholding refers
+            to. Window is inclusively defined.
 
-    trafo : callable, default lambda x:x
-        Transformation to be applied onto every column before scoring. Will likely
-        get deprecated soon. Its better to transform the data in a processing step,
-        preceeeding the call to ``flagMVScores``.
-
-    alpha : float, default 0.05
-        Level of significance by which it is tested, if an observations score might
-        be drawn from another distribution than the majority of the observation.
-
-    n : int, default 10
-        Number of neighbors included in the scoring process for every datapoint.
-
-    func : Callable[numpy.array, float], default np.sum
-        The function that maps the set of every points k-nearest neighbor distances
-        onto a certain scoring.
-
-    iter_start : float, default 0.5
-        Float in [0,1] that determines which percentage of data is considered
-        "normal". 0.5 results in the threshing algorithm to search only the upper 50
-        % of the scores for the cut off point. (See reference section for more
-        information)
-
-    partition : {None, str, int}, default None
-        Only effective when `threshing` = 'stray'. Determines the size of the data
-        partitions, the data is decomposed into. Each partition is checked seperately
-        for outliers. If a String is passed, it has to be an offset string and it
-        results in partitioning the data into parts of according temporal length. If
-        an integer is passed, the data is simply split up into continous chunks of
-        `freq` periods. if ``None`` is passed (default), all the data will be tested
-        in one run.
-
-    partition_min : int, default 11
-        Only effective when `threshing` = 'stray'. Minimum number of periods per
-        partition that have to be present for a valid outlier detection to be made in
-        this partition. (Only of effect, if `stray_partition` is an integer.)
-
-    partition_trafo : bool, default True
-        Whether or not to apply the passed transformation on every partition the
-        algorithm is applied on, separately.
-
-    stray_range : {None, str}, default None
-        If not None, it is tried to reduce the stray result onto single outlier
-        components of the input fields. An offset string, denoting the range of the
-        temporal surrounding to include into the MAD testing while trying to reduce
-        flags.
-
-    drop_flagged : bool, default False
-        Only effective when `range` is not ``None``. Whether or not to drop flagged
-        values other than the value under test from the temporal surrounding before
-        checking the value with MAD.
-
-    thresh : float, default 3.5
-        Only effective when `range` is not ``None``. The `critical` value,
-        controlling wheather the MAD score is considered referring to an outlier or
-        not. Higher values result in less rigid flagging. The default value is widely
-        considered apropriate in the literature.
-
-    min_periods : int, 1
-        Only effective when `range` is not ``None``. Minimum number of meassurements
-        necessarily present in a reduction interval for reduction actually to be
-        performed.
+        freq : str
+            An offset string, determining The frequency, the timeseries to-be-flagged is
+            supposed to be sampled at. The window is inclusively defined.
 
-    flag : float, default BAD
-        flag to set.
+        average_window : {None, str}, default None
+            See condition (2) of the description linked in the references. Window is
+            inclusively defined. The window defaults to 1.5 times the size of `raise_window`
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed, relatively to the flags input.
-
-    Notes
-    -----
-    The basic steps are:
-
-    1. transforming
-
-    The different data columns are transformed via timeseries transformations to
-    (a) make them comparable and
-    (b) make outliers more stand out.
-
-    This step is usually subject to a phase of research/try and error. See [1] for more
-    details.
-
-    Note, that the data transformation as an built-in step of the algorithm,
-    will likely get deprecated soon. Its better to transform the data in a processing
-    step, preceeding the multivariate flagging process. Also, by doing so, one gets
-    mutch more control and variety in the transformation applied, since the `trafo`
-    parameter only allows for application of the same transformation to all of the
-    variables involved.
-
-    2. scoring
-
-    Every observation gets assigned a score depending on its k nearest neighbors. See
-    the `scoring_method` parameter description for details on the different scoring
-    methods. Furthermore [1], [2] may give some insight in the pro and cons of the
-    different methods.
-
-    3. threshing
-
-    The gaps between the (greatest) scores are tested for beeing drawn from the same
-    distribution as the majority of the scores. If a gap is encountered, that,
-    with sufficient significance, can be said to not be drawn from the same
-    distribution as the one all the smaller gaps are drawn from, than the observation
-    belonging to this gap, and all the observations belonging to gaps larger then
-    this gap, get flagged outliers. See description of the `threshing` parameter for
-    more details. Although [2] gives a fully detailed overview over the `stray`
-    algorithm.
-    """
+        raise_factor : float, default 2
+            See second condition listed in the notes below.
 
-    fields = toSequence(field)
+        slope : {None, float}, default None
+            See third condition listed in the notes below.
 
-    fields_ = []
-    for f in fields:
-        field_ = str(uuid.uuid4())
-        data, flags = copyField(data, field=f, flags=flags, target=field_)
-        data, flags = transform(
-            data, field=field_, flags=flags, func=trafo, freq=partition
-        )
-        fields_.append(field_)
-
-    knn_field = str(uuid.uuid4())
-    data, flags = assignKNNScore(
-        data=data,
-        field=fields_,
-        flags=flags,
-        target=knn_field,
-        n=n,
-        func=func,
-        freq=partition,
-        method="ball_tree",
-        min_periods=partition_min,
-        **kwargs,
-    )
-    for field_ in fields_:
-        data, flags = dropField(data, field_, flags)
-
-    data, flags = flagByStray(
-        data=data,
-        field=knn_field,
-        flags=flags,
-        freq=partition,
-        min_periods=partition_min,
-        iter_start=iter_start,
-        alpha=alpha,
-        flag=flag,
-        **kwargs,
-    )
+        weight : float, default 0.8
+            See third condition listed in the notes below.
 
-    data, flags = _evalStrayLabels(
-        data=data,
-        field=knn_field,
-        target=fields,
-        flags=flags,
-        reduction_range=stray_range,
-        reduction_drop_flagged=drop_flagged,
-        reduction_thresh=thresh,
-        reduction_min_periods=min_periods,
-        flag=flag,
-        **kwargs,
-    )
-    data, flags = dropField(data, knn_field, flags)
+        flag : float, default BAD
+            flag to set.
 
-    return data, flags
+        Returns
+        -------
+        saqc.SaQC
 
+        Notes
+        -----
+        The value :math:`x_{k}` of a time series :math:`x` with associated
+        timestamps :math:`t_i`, is flagged a raise, if:
 
-@flagging()
-def flagRaise(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    thresh: float,
-    raise_window: str,
-    freq: str,
-    average_window: Optional[str] = None,
-    raise_factor: float = 2.0,
-    slope: Optional[float] = None,
-    weight: float = 0.8,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The function flags raises and drops in value courses, that exceed a certain threshold
-    within a certain timespan.
+        * There is any value :math:`x_{s}`, preceeding :math:`x_{k}` within `raise_window`
+          range, so that:
 
-    The parameter variety of the function is owned to the intriguing
-    case of values, that "return" from outlierish or anomalious value levels and
-    thus exceed the threshold, while actually being usual values.
+          * :math:`M = |x_k - x_s | >`  `thresh` :math:`> 0`
 
-    NOTE, the dataset is NOT supposed to be harmonized to a time series with an
-    equidistant frequency grid.
+        * The weighted average :math:`\\mu^{*}` of the values, preceding :math:`x_{k}`
+          within `average_window`
+          range indicates, that :math:`x_{k}` does not return from an "outlierish" value
+          course, meaning that:
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    thresh : float
-        The threshold, for the total rise (thresh > 0), or total drop (thresh < 0),
-        value courses must not exceed within a timespan of length `raise_window`.
-    raise_window : str
-        An offset string, determining the timespan, the rise/drop thresholding refers
-        to. Window is inclusively defined.
-    freq : str
-        An offset string, determining The frequency, the timeseries to-be-flagged is
-        supposed to be sampled at. The window is inclusively defined.
-    average_window : {None, str}, default None
-        See condition (2) of the description linked in the references. Window is
-        inclusively defined. The window defaults to 1.5 times the size of `raise_window`
-    raise_factor : float, default 2
-        See second condition listed in the notes below.
-    slope : {None, float}, default None
-        See third condition listed in the notes below.
-    weight : float, default 0.8
-        See third condition listed in the notes below.
-    flag : float, default BAD
-        flag to set.
+          * :math:`x_k > \\mu^* + ( M` / `mean_raise_factor` :math:`)`
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed, relatively to the flags input.
+        * Additionally, if ``min_slope`` is not `None`, :math:`x_{k}` is checked for being
+          sufficiently divergent from its very predecessor :math:`x_{k-1}`, meaning that, it
+          is additionally checked if:
 
-    Notes
-    -----
-    The value :math:`x_{k}` of a time series :math:`x` with associated
-    timestamps :math:`t_i`, is flagged a raise, if:
+          * :math:`x_k - x_{k-1} >` `min_slope`
+          * :math:`t_k - t_{k-1} >` `weight` :math:`\\times` `freq`
 
-    * There is any value :math:`x_{s}`, preceeding :math:`x_{k}` within `raise_window`
-      range, so that:
+        """
 
-      * :math:`M = |x_k - x_s | >`  `thresh` :math:`> 0`
+        # prepare input args
+        dataseries = self._data[field].dropna()
+        raise_window_td = pd.Timedelta(raise_window)
+        freq_dt = pd.Timedelta(freq)
+        if slope is not None:
+            slope = np.abs(slope)
 
-    * The weighted average :math:`\\mu^{*}` of the values, preceding :math:`x_{k}`
-      within `average_window`
-      range indicates, that :math:`x_{k}` does not return from an "outlierish" value
-      course, meaning that:
+        if average_window is None:
+            average_window = 1.5 * raise_window_td
 
-      * :math:`x_k > \\mu^* + ( M` / `mean_raise_factor` :math:`)`
+        if thresh < 0:
+            dataseries *= -1
+            thresh *= -1
 
-    * Additionally, if ``min_slope`` is not `None`, :math:`x_{k}` is checked for being
-      sufficiently divergent from its very predecessor :math:`x_{k-1}`, meaning that, it
-      is additionally checked if:
+        def raise_check(x, thresh):
+            test_set = x[-1] - x[0:-1]
+            max_val = np.max(test_set)
+            if max_val >= thresh:
+                return max_val
+            else:
+                return np.nan
 
-      * :math:`x_k - x_{k-1} >` `min_slope`
-      * :math:`t_k - t_{k-1} >` `weight` :math:`\\times` `freq`
+        def custom_rolling_mean(x):
+            return np.sum(x[:-1])
 
-    """
+        # get invalid-raise/drop mask:
+        raise_series = dataseries.rolling(raise_window_td, min_periods=2, closed="both")
 
-    # prepare input args
-    dataseries = data[field].dropna()
-    raise_window = pd.Timedelta(raise_window)
-    freq = pd.Timedelta(freq)
-    if slope is not None:
-        slope = np.abs(slope)
-
-    if average_window is None:
-        average_window = 1.5 * pd.Timedelta(raise_window)
-
-    if thresh < 0:
-        dataseries *= -1
-        thresh *= -1
-
-    def raise_check(x, thresh):
-        test_set = x[-1] - x[0:-1]
-        max_val = np.max(test_set)
-        if max_val >= thresh:
-            return max_val
+        numba_boost = True
+        if numba_boost:
+            raise_check_boosted = numba.jit(raise_check, nopython=True)
+            raise_series = raise_series.apply(
+                raise_check_boosted, args=(thresh,), raw=True, engine="numba"
+            )
         else:
-            return np.nan
-
-    def custom_rolling_mean(x):
-        return np.sum(x[:-1])
-
-    # get invalid-raise/drop mask:
-    raise_series = dataseries.rolling(raise_window, min_periods=2, closed="both")
-
-    numba_boost = True
-    if numba_boost:
-        raise_check = numba.jit(raise_check, nopython=True)
-        raise_series = raise_series.apply(
-            raise_check, args=(thresh,), raw=True, engine="numba"
+            raise_series = raise_series.apply(raise_check, args=(thresh,), raw=True)
+
+        if raise_series.isna().all():
+            return self
+
+        # "unflag" values of insufficient deviation to their predecessors
+        if slope is not None:
+            w_mask = (
+                pd.Series(dataseries.index).diff().dt.total_seconds()
+                / freq_dt.total_seconds()
+            ) > weight
+            slope_mask = np.abs(dataseries.diff()) < slope
+            to_unflag = raise_series.notna() & w_mask.values & slope_mask
+            raise_series[to_unflag] = np.nan
+
+        # calculate and apply the weighted mean weights (pseudo-harmonization):
+        weights = (
+            pd.Series(dataseries.index).diff(periods=2).shift(-1).dt.total_seconds()
+            / freq_dt.total_seconds()
+            / 2
         )
-    else:
-        raise_series = raise_series.apply(raise_check, args=(thresh,), raw=True)
 
-    if raise_series.isna().all():
-        return data, flags
+        weights.iloc[0] = 0.5 + (
+            pd.Timestamp(dataseries.index[1]) - pd.Timestamp(dataseries.index[0])
+        ).total_seconds() / (freq_dt.total_seconds() * 2)
 
-    # "unflag" values of insufficient deviation to their predecessors
-    if slope is not None:
-        w_mask = (
-            pd.Series(dataseries.index).diff().dt.total_seconds() / freq.total_seconds()
-        ) > weight
-        slope_mask = np.abs(dataseries.diff()) < slope
-        to_unflag = raise_series.notna() & w_mask.values & slope_mask
-        raise_series[to_unflag] = np.nan
-
-    # calculate and apply the weighted mean weights (pseudo-harmonization):
-    weights = (
-        pd.Series(dataseries.index).diff(periods=2).shift(-1).dt.total_seconds()
-        / freq.total_seconds()
-        / 2
-    )
-
-    weights.iloc[0] = 0.5 + (
-        dataseries.index[1] - dataseries.index[0]
-    ).total_seconds() / (freq.total_seconds() * 2)
+        weights.iloc[-1] = 0.5 + (
+            pd.Timestamp(dataseries.index[-1]) - pd.Timestamp(dataseries.index[-2])
+        ).total_seconds() / (freq_dt.total_seconds() * 2)
 
-    weights.iloc[-1] = 0.5 + (
-        dataseries.index[-1] - dataseries.index[-2]
-    ).total_seconds() / (freq.total_seconds() * 2)
+        weights[weights > 1.5] = 1.5
+        weights.index = dataseries.index
+        weighted_data = dataseries.mul(weights)
 
-    weights[weights > 1.5] = 1.5
-    weights.index = dataseries.index
-    weighted_data = dataseries.mul(weights)
-
-    # rolling weighted mean calculation
-    weighted_rolling_mean = weighted_data.rolling(
-        average_window, min_periods=2, closed="both"
-    )
-    weights_rolling_sum = weights.rolling(average_window, min_periods=2, closed="both")
-    if numba_boost:
-        custom_rolling_mean = numba.jit(custom_rolling_mean, nopython=True)
-        weighted_rolling_mean = weighted_rolling_mean.apply(
-            custom_rolling_mean, raw=True, engine="numba"
+        # rolling weighted mean calculation
+        weighted_rolling_mean = weighted_data.rolling(
+            average_window, min_periods=2, closed="both"
         )
-        weights_rolling_sum = weights_rolling_sum.apply(
-            custom_rolling_mean, raw=True, engine="numba"
+        weights_rolling_sum = weights.rolling(
+            average_window, min_periods=2, closed="both"
         )
-    else:
-        weighted_rolling_mean = weighted_rolling_mean.apply(
-            custom_rolling_mean, raw=True
-        )
-        weights_rolling_sum = weights_rolling_sum.apply(
-            custom_rolling_mean, raw=True, engine="numba"
-        )
-
-    weighted_rolling_mean = weighted_rolling_mean / weights_rolling_sum
-    # check means against critical raise value:
-    to_flag = dataseries >= weighted_rolling_mean + (raise_series / raise_factor)
-    to_flag &= raise_series.notna()
-    flags[to_flag[to_flag].index, field] = flag
-
-    return data, flags
-
-
-@flagging()
-def flagMAD(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: str,
-    z: float = 3.5,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The function represents an implementation of the modyfied Z-score outlier detection method.
-
-    See references [1] for more details on the algorithm.
-
-    Note, that the test needs the input data to be sampled regularly (fixed sampling rate).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged. (Here a dummy, for structural reasons)
-    flags : saqc.Flags
-        Container to store flags of the data.
-    window : str
-       Offset string. Denoting the windows size that the "Z-scored" values have to lie in.
-    z: float, default 3.5
-        The value the Z-score is tested against. Defaulting to 3.5 (Recommendation of [1])
-    flag : float, default BAD
-        flag to set.
+        if numba_boost:
+            custom_rolling_mean_boosted = numba.jit(custom_rolling_mean, nopython=True)
+            weighted_rolling_mean = weighted_rolling_mean.apply(
+                custom_rolling_mean_boosted, raw=True, engine="numba"
+            )
+            weights_rolling_sum = weights_rolling_sum.apply(
+                custom_rolling_mean_boosted, raw=True, engine="numba"
+            )
+        else:
+            weighted_rolling_mean = weighted_rolling_mean.apply(
+                custom_rolling_mean, raw=True
+            )
+            weights_rolling_sum = weights_rolling_sum.apply(
+                custom_rolling_mean, raw=True, engine="numba"
+            )
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed, relatively to the flags input.
+        weighted_rolling_mean = weighted_rolling_mean / weights_rolling_sum
+        # check means against critical raise value:
+        to_flag = dataseries >= weighted_rolling_mean + (raise_series / raise_factor)
+        to_flag &= raise_series.notna()
+        self._flags[to_flag[to_flag].index, field] = flag
+
+        return self
+
+    @flagging()
+    def flagMAD(
+        self: "SaQC",
+        field: str,
+        window: Optional[str, int] = None,
+        z: float = 3.5,
+        min_residuals: Optional[int] = None,
+        min_periods: Optional[int] = None,
+        center: bool = False,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The function represents an implementation of the modyfied Z-score outlier detection method.
+
+        See references [1] for more details on the algorithm.
+
+        Note, that the test needs the input data to be sampled regularly (fixed sampling rate).
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged. (Here a dummy, for structural reasons)
+        window : {str, int}, default None
+            Size of the window. Either determined via an Offset String, denoting the windows temporal extension or
+            by an integer, denoting the windows number of periods.
+            `NaN` measurements also count as periods.
+            If `None` is passed, All data points share the same scoring window, which than equals the whole
+            data.
+        z: float, default 3.5
+            The value the Z-score is tested against. Defaulting to 3.5 (Recommendation of [1])
+        min_periods
+            Minimum number of valid meassurements in a scoring window, to consider the resulting score valid.
+        center
+            Weather or not to center the target value in the scoring window. If `False`, the
+            target value is the last value in the window.
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        flags : saqc.Flags
+            The quality flags of data
+            Flags values may have changed, relatively to the flags input.
+
+        References
+        ----------
+        [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
+        """
+
+        self = self.flagZScore(
+            field,
+            window=window,
+            thresh=z,
+            min_residuals=min_residuals,
+            model_func=np.median,
+            norm_func=lambda x: median_abs_deviation(
+                x, scale="normal", nan_policy="omit"
+            ),
+            center=center,
+            min_periods=min_periods,
+            flag=flag,
+        )
 
-    References
-    ----------
-    [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-    """
-    d = data[field]
-    if d.empty:
-        return data, flags
+        return self
+
+    @flagging()
+    def flagOffset(
+        self: "SaQC",
+        field: str,
+        tolerance: float,
+        window: Union[int, str],
+        thresh: Optional[float] = None,
+        thresh_relative: Optional[float] = None,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        A basic outlier test that works on regularly and irregularly sampled data.
+
+        The test classifies values/value courses as outliers by detecting not only a rise
+        in value, but also, by checking for a return to the initial value level.
+
+        Values :math:`x_n, x_{n+1}, .... , x_{n+k}` of a timeseries :math:`x` with
+        associated timestamps :math:`t_n, t_{n+1}, .... , t_{n+k}` are considered spikes, if
+
+        1. :math:`|x_{n-1} - x_{n + s}| >` `thresh`, for all :math:`s \\in [0,1,2,...,k]`
+
+        2. if `thresh_relative` > 0, :math:`x_{n + s} > x_{n - 1}*(1+` `thresh_relative` :math:`)`
+
+        3. if `thresh_relative` < 0, :math:`x_{n + s} < x_{n - 1}*(1+` `thresh_relative` :math:`)`
+
+        4. :math:`|x_{n-1} - x_{n+k+1}| <` `tolerance`
+
+        5. :math:`|t_{n-1} - t_{n+k+1}| <` `window`
+
+        Note, that this definition of a "spike" not only includes one-value outliers, but
+        also plateau-ish value courses.
+
+        Parameters
+        ----------
+        field : str
+            The field in data.
+        tolerance : float
+            Maximum difference allowed, between the value, directly preceding and the value, directly succeeding an offset,
+            to trigger flagging of the values forming the offset.
+            See condition (4).
+        window : {str, int}, default '15min'
+            Maximum length allowed for offset value courses, to trigger flagging of the values forming the offset.
+            See condition (5). Integer defined window length are only allowed for regularly sampled timeseries.
+        thresh : float: {float, None}, default None
+            Minimum difference between a value and its successors, to consider the successors an anomalous offset group.
+            See condition (1). If None is passed, condition (1) is not tested.
+        thresh_relative : {float, None}, default None
+            Minimum relative change between a value and its successors, to consider the successors an anomalous offset group.
+            See condition (2). If None is passed, condition (2) is not tested.
+        flag : float, default BAD
+            flag to set.
 
-    median = d.rolling(window=window, closed="both").median()
-    diff = (d - median).abs()
-    mad = diff.rolling(window=window, closed="both").median()
-    mask = (mad > 0) & (0.6745 * diff > z * mad)
-    # NOTE:
-    # In pandas <= 0.25.3, the window size is not fixed if the
-    # window-argument to rolling is a frequency. That implies,
-    # that during the first iterations the window has a size of
-    # 1, 2, 3, ... until it eventually covers the desired time
-    # span. For stuff like the calculation of median, that is rather
-    # unfortunate, as the size of the calculation base might differ
-    # heavily. So don't flag something until, the window reaches
-    # its target size
-    if not isinstance(window, int):
-        index = mask.index
-        mask.loc[index < index[0] + pd.to_timedelta(window)] = False
-
-    flags[mask, field] = flag
-    return data, flags
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        flags : saqc.Flags
+            The quality flags of data
+            Flags values may have changed, relatively to the flags input.
 
+        Examples
+        --------
+        Below picture gives an abstract interpretation of the parameter interplay in case of a positive value jump,
+        initialising an offset course.
 
-@flagging()
-def flagOffset(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    tolerance: float,
-    window: Union[int, str],
-    thresh: Optional[float] = None,
-    thresh_relative: Optional[float] = None,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    A basic outlier test that works on regularly and irregularly sampled data.
+        .. figure:: /resources/images/flagOffsetPic.png
 
-    The test classifies values/value courses as outliers by detecting not only a rise
-    in value, but also, by checking for a return to the initial value level.
+           The four values marked red, are flagged, because (1) the initial value jump *exceeds* the value given by `thresh`,
+           (2) the temporal extension of the group does *not exceed* the range given by `window` and (3) the returning
+           value after the group, lies *within* the value range determined by `tolerance`
 
-    Values :math:`x_n, x_{n+1}, .... , x_{n+k}` of a timeseries :math:`x` with
-    associated timestamps :math:`t_n, t_{n+1}, .... , t_{n+k}` are considered spikes, if
 
-    1. :math:`|x_{n-1} - x_{n + s}| >` `thresh`, for all :math:`s \\in [0,1,2,...,k]`
+        .. plot::
+           :context:
+           :include-source: False
 
-    2. :math:`(x_{n + s} - x_{n - 1}) / x_{n - 1} >` `thresh_relative`
+           import matplotlib
+           import saqc
+           import pandas as pd
+           data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
 
-    3. :math:`|x_{n-1} - x_{n+k+1}| <` `tolerance`
 
-    4. :math:`|t_{n-1} - t_{n+k+1}| <` `window`
+        Lets generate a simple, regularly sampled timeseries with an hourly sampling rate and generate an
+        :py:class:`saqc.SaQC` instance from it.
 
-    Note, that this definition of a "spike" not only includes one-value outliers, but
-    also plateau-ish value courses.
+        .. doctest:: flagOffsetExample
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The field in data.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    tolerance : float
-        Maximum difference allowed, between the value, directly preceding and the value, directly succeeding an offset,
-        to trigger flagging of the values forming the offset.
-        See condition (3).
-    window : {str, int}, default '15min'
-        Maximum length allowed for offset value courses, to trigger flagging of the values forming the offset.
-        See condition (4). Integer defined window length are only allowed for regularly sampled timeseries.
-    thresh : float: {float, None}, default None
-        Minimum difference between a value and its successors, to consider the successors an anomalous offset group.
-        See condition (1). If None is passed, condition (1) is not tested.
-    thresh_relative : {float, None}, default None
-        Minimum relative change between and its successors, to consider the successors an anomalous offset group.
-        See condition (2). If None is passed, condition (2) is not tested.
-    flag : float, default BAD
-        flag to set.
+           >>> data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
+           >>> data
+                                data
+           2000-01-01 00:00:00     5
+           2000-01-01 01:00:00     5
+           2000-01-01 02:00:00     8
+           2000-01-01 03:00:00    16
+           2000-01-01 04:00:00    17
+           2000-01-01 05:00:00     7
+           2000-01-01 06:00:00     4
+           2000-01-01 07:00:00     4
+           2000-01-01 08:00:00     4
+           2000-01-01 09:00:00     1
+           2000-01-01 10:00:00     1
+           2000-01-01 11:00:00     4
+           >>> qc = saqc.SaQC(data)
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed, relatively to the flags input.
-
-    Examples
-    --------
+        Now we are applying :py:meth:`~saqc.SaQC.flagOffset` and try to flag offset courses, that dont extend longer than
+        *6 hours* in time (``window``) and that have an initial value jump higher than *2* (``thresh``), and that do return
+        to the initial value level within a tolerance of *1.5* (``tolerance``).
 
-    .. plot::
-       :context:
-       :include-source: False
+        .. doctest:: flagOffsetExample
 
-       import matplotlib
-       import saqc
-       import pandas as pd
-       data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
+           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
+           >>> qc.plot('data') # doctest:+SKIP
 
+        .. plot::
+           :context: close-figs
+           :include-source: False
 
+           >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
+           >>> qc.plot('data')
 
-    Lets generate a simple, regularly sampled timeseries with an hourly sampling rate and generate an
-    :py:class:`saqc.SaQC` instance from it.
+        Note, that both, negative and positive jumps are considered starting points of negative or positive offsets.
+        If you want to impose the additional condition, that the initial value jump must exceed *+90%* of the value level,
+        you can additionally set the ``thresh_relative`` parameter:
+
+        .. doctest:: flagOffsetExample
+
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
+           >>> qc.plot('data') # doctest:+SKIP
+
+        .. plot::
+           :context: close-figs
+           :include-source: False
+
+           >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
+           >>> qc.plot('data')
+
+        Now, only positive jumps, that exceed a value gain of *+90%* are considered starting points of offsets.
+
+        In the same way, you can aim for only negative offsets, by setting a negative relative threshold. The below
+        example only flags offsets, that fall off by at least *50 %* in value, with an absolute value drop of at least *2*.
 
-    .. doctest:: flagOffsetExample
+        .. doctest:: flagOffsetExample
 
-       >>> data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
-       >>> data
-                            data
-       2000-01-01 00:00:00     5
-       2000-01-01 01:00:00     5
-       2000-01-01 02:00:00     8
-       2000-01-01 03:00:00    16
-       2000-01-01 04:00:00    17
-       2000-01-01 05:00:00     7
-       2000-01-01 06:00:00     4
-       2000-01-01 07:00:00     4
-       2000-01-01 08:00:00     4
-       2000-01-01 09:00:00     1
-       2000-01-01 10:00:00     1
-       2000-01-01 11:00:00     4
-       >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
+           >>> qc.plot('data') # doctest:+SKIP
 
-    Now we are applying :py:meth:`~saqc.SaQC.flagOffset` and try to flag offset courses, that dont extend longer than
-    *6 hours* in time (``window``) and that have an initial value jump higher than *2* (``thresh``), and that do return
-    to the initial value level within a tolerance of *1.5* (``tolerance``).
+        .. plot::
+           :context: close-figs
+           :include-source: False
 
-    .. doctest:: flagOffsetExample
+           >>> qc = saqc.SaQC(data)
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
+           >>> qc.plot('data')
 
-       >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
-       >>> qc.plot('data') # doctest:+SKIP
 
-    .. plot::
-       :context: close-figs
-       :include-source: False
+        References
+        ----------
+        The implementation is a time-window based version of an outlier test from the UFZ Python library,
+        that can be found here:
 
-       >>> qc = saqc.SaQC(data)
-       >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
-       >>> qc.plot('data')
+        https://git.ufz.de/chs/python/blob/master/ufz/level1/spike.py
 
-    Note, that both, negative and positive jumps are considered starting points of negative or positive offsets.
-    If you want to impose the additional condition, that the initial value jump must exceed *+90%* of the value level,
-    you can additionally set the ``thresh_relative`` parameter:
+        """
+        if (thresh is None) and (thresh_relative is None):
+            raise ValueError(
+                "At least one of parameters 'thresh' and 'thresh_relative' has to be given. Got 'thresh'=None, "
+                "'thresh_relative'=None instead."
+            )
+        if thresh is None:
+            thresh = 0
+
+        dat = self._data[field].dropna()
+        if thresh_relative is not None:
+            rel_jumps = np.sign(thresh_relative) * dat > np.sign(
+                thresh_relative
+            ) * dat.shift(+1) * (1 + thresh_relative)
+
+        data_diff = dat.diff()
+        initial_jumps = data_diff.abs() > thresh
+        if thresh_relative:
+            initial_jumps &= rel_jumps
+        return_in_time = (
+            dat[::-1]
+            .rolling(window, min_periods=2)
+            .apply(lambda x: np.abs(x[-1] - x[:-1]).min() < tolerance, raw=True)[::-1]
+            .astype(bool)
+        )
+        return_in_time = return_in_time & initial_jumps.reindex(
+            dat.index, fill_value=False
+        ).shift(-1, fill_value=False)
+        offset_start_candidates = dat[return_in_time]
+        win_delta = pd.Timedelta(window)
+        corners = pd.Series(False, index=dat.index)
+        to_flag = pd.Series(False, index=dat.index)
+        ns = pd.Timedelta("1ns")
+        for c in zip(offset_start_candidates.index, offset_start_candidates.values):
+            ret = (dat[c[0]] - dat[c[0] + ns : c[0] + win_delta]).abs()[1:] < tolerance
+            if not ret.empty:
+                r = ret.idxmax()
+                chunk = dat[c[0] : r]
+                sgn = np.sign(chunk[1] - c[1])
+                t_val = ((chunk[1:-1] - c[1]) * sgn > thresh).all()
+                r_val = True
+                if thresh_relative:
+                    r_val = (
+                        np.sign(thresh_relative) * chunk[1:-1]
+                        > np.sign(thresh_relative) * c[1] * (1 + thresh_relative)
+                    ).all()
+                if t_val and r_val and (not corners[c[0]]):
+                    flag_i = dat[c[0] + ns : chunk.index[-1] - ns].index
+                    to_flag[flag_i] = True
+                    corners.loc[flag_i[-1]] = True
+        to_flag = to_flag.reindex(self._data[field].index, fill_value=False)
+
+        self._flags[to_flag, field] = flag
+        return self
+
+    @flagging()
+    def flagByGrubbs(
+        self: "SaQC",
+        field: str,
+        window: Union[str, int],
+        alpha: float = 0.05,
+        min_periods: int = 8,
+        pedantic: bool = False,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The function flags values that are regarded outliers due to the grubbs test.
+
+        See reference [1] for more information on the grubbs tests definition.
+
+        The (two-sided) test gets applied onto data chunks of size "window". The tests
+        application  will be iterated on each data-chunk under test, till no more
+        outliers are detected in that chunk.
+
+        Note, that the test performs poorely for small data chunks (resulting in heavy
+        overflagging). Therefor you should select "window" so that every window contains
+        at least > 8 values and also adjust the min_periods values accordingly.
+
+        Note, that the data to be tested by the grubbs test are expected to be distributed
+        "normalish".
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged.
+
+        window : {int, str}
+            The size of the window you want to use for outlier testing. If an integer is
+            passed, the size refers to the number of periods of every testing window. If a
+            string is passed, it has to be an offset string, and will denote the total
+            temporal extension of every window.
+
+        alpha : float, default 0.05
+            The level of significance, the grubbs test is to be performed at. (between 0 and 1)
+
+        min_periods : int, default 8
+            The minimum number of values that have to be present in an interval under test,
+            for a grubbs test result to be accepted. Only makes sence in case `window` is
+            an offset string.
+
+        pedantic: boolean, default False
+            If True, every value gets checked twice for being an outlier. Ones in the
+            initial rolling window and one more time in a rolling window that is lagged
+            by half the windows delimeter (window/2). Recommended for avoiding false
+            positives at the window edges. Only available when rolling with integer
+            defined window size.
+
+        flag : float, default BAD
+            flag to set.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        References
+        ----------
+        introduction to the grubbs test:
+
+        [1] https://en.wikipedia.org/wiki/Grubbs%27s_test_for_outliers
+        """
+        datcol = self._data[field].copy()
+        rate = getFreqDelta(datcol.index)
+
+        # if timeseries that is analyzed, is regular,
+        # window size can be transformed to a number of periods:
+        if rate and isinstance(window, str):
+            window = pd.Timedelta(window) // rate
+
+        to_group = pd.DataFrame(data={"ts": datcol.index, "data": datcol})
+        to_flag = pd.Series(False, index=datcol.index)
+
+        # period number defined test intervals
+        if isinstance(window, int):
+            grouper_series = pd.Series(
+                data=np.arange(0, datcol.shape[0]), index=datcol.index
+            )
+            grouper_series_lagged = grouper_series + (window / 2)
+            grouper_series = grouper_series.transform(lambda x: x // window)
+            grouper_series_lagged = grouper_series_lagged.transform(
+                lambda x: x // window
+            )
+            partitions = to_group.groupby(grouper_series)
+            partitions_lagged = to_group.groupby(grouper_series_lagged)
 
-    .. doctest:: flagOffsetExample
+        # offset defined test intervals:
+        else:
+            partitions = to_group.groupby(pd.Grouper(freq=window))
+            partitions_lagged = []
 
-       >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
-       >>> qc.plot('data') # doctest:+SKIP
+        for _, partition in partitions:
+            if partition.shape[0] > min_periods:
+                detected = smirnov_grubbs.two_sided_test_indices(
+                    partition["data"].values, alpha=alpha
+                )
+                detected = partition["ts"].iloc[detected]
+                to_flag[detected.index] = True
+
+        if isinstance(window, int) and pedantic:
+            to_flag_lagged = pd.Series(False, index=datcol.index)
+
+            for _, partition in partitions_lagged:
+                if partition.shape[0] > min_periods:
+                    detected = smirnov_grubbs.two_sided_test_indices(
+                        partition["data"].values, alpha=alpha
+                    )
+                    detected = partition["ts"].iloc[detected]
+                    to_flag_lagged[detected.index] = True
+
+            to_flag &= to_flag_lagged
+
+        self._flags[to_flag, field] = flag
+        return self
+
+    @register(
+        mask=["field"],
+        demask=["field"],
+        squeeze=["field"],
+        multivariate=True,
+        handles_target=False,
+    )
+    def flagCrossStatistics(
+        self: "SaQC",
+        field: Sequence[str],
+        thresh: float,
+        method: Literal["modZscore", "Zscore"] = "modZscore",
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function checks for outliers relatively to the "horizontal" input data axis.
 
-    .. plot::
-       :context: close-figs
-       :include-source: False
+        For `fields` :math:`=[f_1,f_2,...,f_N]` and timestamps :math:`[t_1,t_2,...,t_K]`, the following steps are taken
+        for outlier detection:
 
-       >>> qc = saqc.SaQC(data)
-       >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
-       >>> qc.plot('data')
+        1. All timestamps :math:`t_i`, where there is one :math:`f_k`, with :math:`data[f_K]` having no entry at
+           :math:`t_i`, are excluded from the following process (inner join of the :math:`f_i` fields.)
+        2. for every :math:`0 <= i <= K`, the value
+           :math:`m_j = median(\\{data[f_1][t_i], data[f_2][t_i], ..., data[f_N][t_i]\\})` is calculated
+        3. for every :math:`0 <= i <= K`, the set
+           :math:`\\{data[f_1][t_i] - m_j, data[f_2][t_i] - m_j, ..., data[f_N][t_i] - m_j\\}` is tested for outliers with the
+           specified method (`cross_stat` parameter).
 
-    Now, only positive jumps, that exceed a value gain of *+90%* are considered starting points of offsets.
+        Parameters
+        ----------
+        field : list of str
+            List of fieldnames in data, determining wich variables are to be included into the flagging process.
 
-    In the same way, you can aim for only negative offsets, by setting a negative relative threshold. The below
-    example only flags offsets, that fall off by at least *50 %* in value, with an absolute value drop of at least *2*.
+        thresh : float
+            Threshold which the outlier score of an value must exceed, for being flagged an outlier.
 
-    .. doctest:: flagOffsetExample
+        method : {'modZscore', 'Zscore'}, default 'modZscore'
+            Method used for calculating the outlier scores.
 
-       >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
-       >>> qc.plot('data') # doctest:+SKIP
+            * ``'modZscore'``: Median based "sigma"-ish approach. See Referenecs [1].
+            * ``'Zscore'``: Score values by how many times the standard deviation they differ from the median.
+              See References [1]
 
-    .. plot::
-       :context: close-figs
-       :include-source: False
+        flag : float, default BAD
+            flag to set.
 
-       >>> qc = saqc.SaQC(data)
-       >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
-       >>> qc.plot('data')
+        Returns
+        -------
+        saqc.SaQC
 
 
+        Notes
+        -----
 
+        The input variables dont necessarily have to be aligned. If the variables are unaligned, scoring
+        and flagging will be only performed on the subset of inices shared among all input variables.
 
 
+        References
+        ----------
+        [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
+        """
 
-    References
-    ----------
-    The implementation is a time-window based version of an outlier test from the UFZ Python library,
-    that can be found here:
+        fields = toSequence(field)
 
-    https://git.ufz.de/chs/python/blob/master/ufz/level1/spike.py
+        df = self._data[fields].loc[self._data[fields].index_of("shared")].to_df()
 
-    """
-    if (thresh is None) and (thresh_relative is None):
-        raise ValueError(
-            "At least one of parameters 'thresh' and 'thresh_relative' has to be given. Got 'thresh'=None, "
-            "'thresh_relative'=None instead."
-        )
+        if isinstance(method, str):
 
-    dataseries = data[field].dropna()
-    if dataseries.empty:
-        return data, flags
+            if method == "modZscore":
+                MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
+                diff_scores = (
+                    (0.6745 * (df.subtract(df.median(axis=1), axis=0)))
+                    .divide(MAD_series, axis=0)
+                    .abs()
+                )
 
-    # using reverted series - because ... long story.
-    ind = dataseries.index
-    rev_ind = ind[0] + ((ind[-1] - ind)[::-1])
-    map_i = pd.Series(ind, index=rev_ind)
-    dataseries = pd.Series(dataseries.values, index=rev_ind)
-
-    if isinstance(window, int):
-        delta = getFreqDelta(dataseries.index)
-        window = delta * window
-        if not delta:
-            raise TypeError(
-                "Only offset string defined window sizes allowed for timeseries not sampled regularly."
-            )
+            elif method == "Zscore":
+                diff_scores = (
+                    df.subtract(df.mean(axis=1), axis=0)
+                    .divide(df.std(axis=1), axis=0)
+                    .abs()
+                )
 
-    # get all the entries preceding a significant jump
-    if thresh is not None:
-        post_jumps = dataseries.diff().abs() > thresh
+            else:
+                raise ValueError(method)
 
-    if thresh_relative is not None:
-        s = np.sign(thresh_relative)
-        rel_jumps = s * (dataseries.shift(1) - dataseries).div(dataseries.abs()) > abs(
-            thresh_relative
-        )
-        if thresh is not None:
-            post_jumps = rel_jumps & post_jumps
         else:
-            post_jumps = rel_jumps
-
-    post_jumps = post_jumps[post_jumps]
-    if post_jumps.empty:
-        return data, flags
-
-    # get all the entries preceding a significant jump
-    # and its successors within "length" range
-    to_roll = post_jumps.reindex(
-        dataseries.index, method="bfill", tolerance=window, fill_value=False
-    ).dropna()
-    to_roll = dataseries[to_roll]
-
-    if thresh_relative is not None:
-
-        def spikeTester(
-            chunk, thresh_r=abs(thresh_relative), thresh_a=thresh or 0, tol=tolerance
-        ):
-            jump = chunk[-2] - chunk[-1]
-            thresh = max(thresh_r * abs(chunk[-1]), thresh_a)
-            chunk_stair = (np.sign(jump) * (chunk - chunk[-1]) < thresh)[::-1].cumsum()
-            initial = np.searchsorted(chunk_stair, 2)
-            if initial == len(chunk):
-                return 0
-            if np.abs(chunk[-initial - 1] - chunk[-1]) < tol:
-                return initial - 1
-            return 0
-
-    else:
-
-        # define spike testing function to roll with (no  rel_check):
-        def spikeTester(chunk, thresh=thresh, tol=tolerance):
-            # signum change!!!
-            chunk_stair = (
-                np.sign(chunk[-2] - chunk[-1]) * (chunk - chunk[-1]) < thresh
-            )[::-1].cumsum()
-            initial = np.searchsorted(chunk_stair, 2)
-            if initial == len(chunk):
-                return 0
-            if np.abs(chunk[-initial - 1] - chunk[-1]) < tol:
-                return initial - 1
-            return 0
-
-    roller = customRoller(to_roll, window=window, min_periods=2, closed="both")
-    engine = None if len(to_roll) < 200000 else "numba"
-    result = roller.apply(spikeTester, raw=True, engine=engine)
-
-    ignore = pd.Series(True, index=to_roll.index)
-    ignore[post_jumps.index] = False
-    result[ignore] = np.nan
-
-    result.index = map_i[result.index]
-
-    # correct the result: only those values define plateaus, that do not have
-    # values at their left starting point, that belong to other plateaus themself:
-    def calcResult(result):
-        var_num = result.shape[0]
-        flag_scopes = np.zeros(var_num, dtype=bool)
-        for k in range(var_num):
-            if result[k] > 0:
-                k_r = int(result[k])
-                # validity check: plateuas start isnt another plateaus end:
-                if not flag_scopes[k - k_r - 1]:
-                    flag_scopes[(k - k_r) : k] = True
-        return pd.Series(flag_scopes, index=result.index)
-
-    cresult = calcResult(result)
-    cresult = cresult[cresult].index
-    flags[cresult, field] = flag
-    return data, flags
-
-
-@flagging()
-def flagByGrubbs(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    alpha: float = 0.05,
-    min_periods: int = 8,
-    pedantic: bool = False,
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The function flags values that are regarded outliers due to the grubbs test.
-
-    See reference [1] for more information on the grubbs tests definition.
-
-    The (two-sided) test gets applied onto data chunks of size "window". The tests
-    application  will be iterated on each data-chunk under test, till no more
-    outliers are detected in that chunk.
 
-    Note, that the test performs poorely for small data chunks (resulting in heavy
-    overflagging). Therefor you should select "window" so that every window contains
-    at least > 8 values and also adjust the min_periods values accordingly.
-
-    Note, that the data to be tested by the grubbs test are expected to be distributed
-    "normalish".
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    window : {int, str}
-        The size of the window you want to use for outlier testing. If an integer is
-        passed, the size refers to the number of periods of every testing window. If a
-        string is passed, it has to be an offset string, and will denote the total
-        temporal extension of every window.
-    alpha : float, default 0.05
-        The level of significance, the grubbs test is to be performed at. (between 0 and 1)
-    min_periods : int, default 8
-        The minimum number of values that have to be present in an interval under test,
-        for a grubbs test result to be accepted. Only makes sence in case `window` is
-        an offset string.
-    pedantic: boolean, default False
-        If True, every value gets checked twice for being an outlier. Ones in the
-        initial rolling window and one more time in a rolling window that is lagged
-        by half the windows delimeter (window/2). Recommended for avoiding false
-        positives at the window edges. Only available when rolling with integer
-        defined window size.
-    flag : float, default BAD
-        flag to set.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed relatively to the flags input.
-
-    References
-    ----------
-    introduction to the grubbs test:
-
-    [1] https://en.wikipedia.org/wiki/Grubbs%27s_test_for_outliers
-    """
-    datcol = data[field].copy()
-    rate = getFreqDelta(datcol.index)
-
-    # if timeseries that is analyzed, is regular,
-    # window size can be transformed to a number of periods:
-    if rate and isinstance(window, str):
-        window = pd.Timedelta(window) // rate
-
-    to_group = pd.DataFrame(data={"ts": datcol.index, "data": datcol})
-    to_flag = pd.Series(False, index=datcol.index)
-
-    # period number defined test intervals
-    if isinstance(window, int):
-        grouper_series = pd.Series(
-            data=np.arange(0, datcol.shape[0]), index=datcol.index
+            try:
+                stat = getattr(df, method.__name__)(axis=1)
+            except AttributeError:
+                stat = df.aggregate(method, axis=1)
+
+            diff_scores = df.subtract(stat, axis=0).abs()
+
+        mask = diff_scores > thresh
+        if mask.empty:
+            return self
+
+        for f in fields:
+            m = mask[f].reindex(index=self._flags[f].index, fill_value=False)
+            self._flags[m, f] = flag
+
+        return self
+
+    @flagging()
+    def flagZScore(
+        self: "SaQC",
+        field: str,
+        window: str | int | None = None,
+        thresh: float = 3,
+        min_residuals: int | None = None,
+        min_periods: int | None = None,
+        model_func: Callable = np.nanmean,
+        norm_func: Callable = np.nanstd,
+        center: bool = True,
+        flag: float = BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Flag data where its (rolling) Zscore exceeds a threshold.
+
+        The function implements flagging derived from a basic Zscore calculation.
+        To handle non stationary data, the Zscoring can be applied with a rolling window.
+        Therefor, the function allows for a minimum residual to be specified in order to mitigate overflagging in
+        local regimes of low variance.
+
+        See the Notes section for a detailed overview of the calculation
+
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged. (Here a dummy, for structural reasons)
+        window : {str, int}, default None
+            Size of the window. Either determined via an Offset String, denoting the windows temporal extension or
+            by an integer, denoting the windows number of periods.
+            `NaN` measurements also count as periods.
+            If `None` is passed, All data points share the same scoring window, which than equals the whole
+            data.
+        thresh
+            Cutoff level for the Zscores, above which associated points are getting flagged.
+        min_residuals
+            Minimum residual level points must have to be considered outliers.
+        min_periods
+            Minimum number of valid meassurements in a scoring window, to consider the resulting score valid.
+        model_func
+            Function to calculate the center moment in every window.
+        norm_func
+            Function to calculate the scaling for every window
+        center
+            Weather or not to center the target value in the scoring window. If `False`, the
+            target value is the last value in the window.
+
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        flags : saqc.Flags
+            The quality flags of data
+            Flags values may have changed, relatively to the flags input.
+
+        Notes
+        -----
+        Steps of calculation:
+
+        1. Consider a window :math:`W` of successive points :math:`W = x_{1},...x_{w}`
+        containing the value :math:`y_{K}` which is to be checked.
+        (The index of :math:`K` depends on the selection of the parameter `center`.)
+
+        2. The "moment" :math:`M` for the window gets calculated via :math:`M=` `model_func(:math:`W`)
+
+        3. The "scaling" :math:`N` for the window gets calculated via :math:`N=` `norm_func(:math:`W`)
+
+        4. The "score" :math:`S` for the point :math:`x_{k}`gets calculated via :math:`S=(x_{k} - M) / N`
+
+        5. Finally, :math:`x_{k}` gets flagged, if :math:`|S| >` `thresh` and :math:`|M - x_{k}| >= `min_residuals`
+        """
+        datser = self._data[field]
+        if min_residuals is None:
+            min_residuals = 0
+
+        score, model, _ = _univarScoring(
+            datser,
+            window=window,
+            norm_func=norm_func,
+            model_func=model_func,
+            center=center,
+            min_periods=min_periods,
         )
-        grouper_series_lagged = grouper_series + (window / 2)
-        grouper_series = grouper_series.transform(lambda x: x // window)
-        grouper_series_lagged = grouper_series_lagged.transform(lambda x: x // window)
-        partitions = to_group.groupby(grouper_series)
-        partitions_lagged = to_group.groupby(grouper_series_lagged)
-
-    # offset defined test intervals:
-    else:
-        partitions = to_group.groupby(pd.Grouper(freq=window))
-        partitions_lagged = []
-
-    for _, partition in partitions:
-        if partition.shape[0] > min_periods:
-            detected = smirnov_grubbs.two_sided_test_indices(
-                partition["data"].values, alpha=alpha
-            )
-            detected = partition["ts"].iloc[detected]
-            to_flag[detected.index] = True
-
-    if isinstance(window, int) and pedantic:
-        to_flag_lagged = pd.Series(False, index=datcol.index)
-
-        for _, partition in partitions_lagged:
-            if partition.shape[0] > min_periods:
-                detected = smirnov_grubbs.two_sided_test_indices(
-                    partition["data"].values, alpha=alpha
-                )
-                detected = partition["ts"].iloc[detected]
-                to_flag_lagged[detected.index] = True
+        to_flag = (score.abs() > thresh) & ((model - datser).abs() >= min_residuals)
+        self._flags[to_flag, field] = flag
+        return self
 
-        to_flag &= to_flag_lagged
-
-    flags[to_flag, field] = flag
-    return data, flags
 
-
-@flagging()
-def flagRange(
+def _evalStrayLabels(
     data: DictOfSeries,
     field: str,
     flags: Flags,
-    min: float = -np.inf,
-    max: float = np.inf,
+    target: Sequence[str],
+    reduction_range: Optional[str] = None,
+    reduction_drop_flagged: bool = False,  # TODO: still a case ?
+    reduction_thresh: float = 3.5,
+    reduction_min_periods: int = 1,
+    at_least_one: bool = True,
     flag: float = BAD,
     **kwargs,
 ) -> Tuple[DictOfSeries, Flags]:
     """
-    Function flags values not covered by the closed interval [`min`, `max`].
+    The function "reduces" an observations flag to components of it, by applying MAD
+    (See references) test onto every components temporal surrounding.
 
     Parameters
     ----------
     data : dios.DictOfSeries
         A dictionary of pandas.Series, holding all the data.
+
     field : str
-        The fieldname of the column, holding the data-to-be-flagged.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    min : float
-        Lower bound for valid data.
-    max : float
-        Upper bound for valid data.
-    flag : float, default BAD
-        flag to set.
+        The fieldname of the column, holding the labels to be evaluated.
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
     flags : saqc.Flags
-        The quality flags of data
-    """
+        Container to store quality flags to data.
 
-    # using .values is much faster
-    datacol = data[field].values
-    mask = (datacol < min) | (datacol > max)
-    flags[mask, field] = flag
-    return data, flags
+    target : list of str
+        A list of strings, holding the column names of the variables, the stray labels
+        shall be projected onto.
 
+    val_frame : (N,M) pd.DataFrame
+        Input NxM DataFrame of observations, where N is the number of observations and
+        M the number of components per observation.
 
-@register(
-    mask=["field"],
-    demask=["field"],
-    squeeze=["field"],
-    multivariate=True,
-    handles_target=False,
-)
-def flagCrossStatistics(
-    data: DictOfSeries,
-    field: Sequence[str],
-    flags: Flags,
-    thresh: float,
-    method: Literal["modZscore", "Zscore"] = "modZscore",
-    flag: float = BAD,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function checks for outliers relatively to the "horizontal" input data axis.
+    to_flag_frame : pandas.DataFrame
+        Input dataframe of observations to be tested, where N is the number of
+        observations and M the number of components per observation.
 
-    For `fields` :math:`=[f_1,f_2,...,f_N]` and timestamps :math:`[t_1,t_2,...,t_K]`, the following steps are taken
-    for outlier detection:
+    reduction_range : {None, str}
+        An offset string, denoting the range of the temporal surrounding to include
+        into the MAD testing. If ``None`` is passed, no testing will be performed and
+        all targets will have the stray flag projected.
 
-    1. All timestamps :math:`t_i`, where there is one :math:`f_k`, with :math:`data[f_K]` having no entry at
-       :math:`t_i`, are excluded from the following process (inner join of the :math:`f_i` fields.)
-    2. for every :math:`0 <= i <= K`, the value
-       :math:`m_j = median(\\{data[f_1][t_i], data[f_2][t_i], ..., data[f_N][t_i]\\})` is calculated
-    3. for every :math:`0 <= i <= K`, the set
-       :math:`\\{data[f_1][t_i] - m_j, data[f_2][t_i] - m_j, ..., data[f_N][t_i] - m_j\\}` is tested for outliers with the
-       specified method (`cross_stat` parameter).
+    reduction_drop_flagged : bool, default False
+        Wheather or not to drop flagged values other than the value under test, from the
+        temporal surrounding before checking the value with MAD.
 
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : list of str
-        List of fieldnames in data, determining wich variables are to be included into the flagging process.
-    flags : saqc.Flags
-        A flags object, holding flags and additional informations related to `data`.
-    thresh : float
-        Threshold which the outlier score of an value must exceed, for being flagged an outlier.
-    method : {'modZscore', 'Zscore'}, default 'modZscore'
-        Method used for calculating the outlier scores.
+    reduction_thresh : float, default 3.5
+        The `critical` value, controlling wheather the MAD score is considered
+        referring to an outlier or not. Higher values result in less rigid flagging.
+        The default value is widely used in the literature. See references section
+        for more details ([1]).
 
-        * ``'modZscore'``: Median based "sigma"-ish approach. See Referenecs [1].
-        * ``'Zscore'``: Score values by how many times the standard deviation they differ from the median.
-          See References [1]
+    at_least_one : bool, default True
+        If none of the variables, the outlier label shall be reduced to, is an outlier
+        with regard to the test, all (True) or none (False) of the variables are flagged
 
     flag : float, default BAD
         flag to set.
 
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed relatively to the input flags.
-
-
-    Notes
-    -----
-
-    The input variables dont necessarily have to be aligned. If the variables are unaligned, scoring
-    and flagging will be only performed on the subset of inices shared among all input variables.
-
-
     References
     ----------
     [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
     """
+    val_frame = data[target].to_df()
+    stray_detects = flags[field] > UNFLAGGED
+    stray_detects = stray_detects[stray_detects]
+    to_flag_frame = pd.DataFrame(False, columns=target, index=stray_detects.index)
 
-    fields = toSequence(field)
-
-    df = data[fields].loc[data[fields].index_of("shared")].to_df()
+    if reduction_range is None:
+        for field in to_flag_frame.columns:
+            flags[to_flag_frame.index, field] = flag
+        return data, flags
 
-    if isinstance(method, str):
+    for var in target:
+        for index in enumerate(to_flag_frame.index):
 
-        if method == "modZscore":
-            MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
-            diff_scores = (
-                (0.6745 * (df.subtract(df.median(axis=1), axis=0)))
-                .divide(MAD_series, axis=0)
-                .abs()
+            index_slice = slice(
+                index[1] - pd.Timedelta(reduction_range),
+                index[1] + pd.Timedelta(reduction_range),
             )
+            test_slice = val_frame[var][index_slice].dropna()
 
-        elif method == "Zscore":
-            diff_scores = (
-                df.subtract(df.mean(axis=1), axis=0)
-                .divide(df.std(axis=1), axis=0)
-                .abs()
-            )
+            # check, wheather value under test is sufficiently centered:
+            first = test_slice.first_valid_index()
+            last = test_slice.last_valid_index()
+            min_range = pd.Timedelta(reduction_range) / 4
 
-        else:
-            raise ValueError(method)
+            if (
+                pd.Timedelta(index[1] - first) < min_range
+                or pd.Timedelta(last - index[1]) < min_range
+            ):
+                polydeg = 0
+            else:
+                polydeg = 2
 
-    else:
+            if reduction_drop_flagged:
+                test_slice = test_slice.drop(to_flag_frame.index, errors="ignore")
 
-        try:
-            stat = getattr(df, method.__name__)(axis=1)
-        except AttributeError:
-            stat = df.aggregate(method, axis=1)
+            if test_slice.shape[0] < reduction_min_periods:
+                to_flag_frame.loc[index[1], var] = True
+                continue
 
-        diff_scores = df.subtract(stat, axis=0).abs()
+            x = test_slice.index.values.astype(float)
+            x_0 = x[0]
+            x = (x - x_0) / 10**12
 
-    mask = diff_scores > thresh
-    if mask.empty:
-        return data, flags
+            polyfitted = poly.polyfit(y=test_slice.values, x=x, deg=polydeg)
+
+            testval = poly.polyval(
+                (float(index[1].to_numpy()) - x_0) / 10**12, polyfitted
+            )
+            testval = val_frame[var][index[1]] - testval
 
-    for f in fields:
-        m = mask[f].reindex(index=flags[f].index, fill_value=False)
-        flags[m, f] = flag
+            resids = test_slice.values - poly.polyval(x, polyfitted)
+            med_resids = np.median(resids)
+            MAD = np.median(np.abs(resids - med_resids))
+            crit_val = 0.6745 * (abs(med_resids - testval)) / MAD
+
+            if crit_val > reduction_thresh:
+                to_flag_frame.loc[index[1], var] = True
+
+    if at_least_one:
+        to_flag_frame[~to_flag_frame.any(axis=1)] = True
+
+    for field in to_flag_frame.columns:
+        col = to_flag_frame[field]
+        flags[col[col].index, field] = flag
 
     return data, flags
diff --git a/saqc/funcs/pattern.py b/saqc/funcs/pattern.py
index 96a5b176c3aed3666470696ac785d00b6acdaa9c..43e4f5f97d26b27391d4d67a3754b9f8776b9019 100644
--- a/saqc/funcs/pattern.py
+++ b/saqc/funcs/pattern.py
@@ -5,6 +5,9 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 # -*- coding: utf-8 -*-
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
 
 import dtw
 import pandas as pd
@@ -13,6 +16,9 @@ from saqc.constants import BAD
 from saqc.core.register import flagging
 from saqc.lib.tools import customRoller
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
 
 def calculateDistanceByDTW(
     data: pd.Series, reference: pd.Series, forward=True, normalize=True
@@ -81,112 +87,104 @@ def calculateDistanceByDTW(
     return distances.reindex(index=data.index)  # reinsert NaNs
 
 
-# todo should we mask `reference` even if the func fail if reference has NaNs
-@flagging()
-def flagPatternByDTW(
-    data,
-    field,
-    flags,
-    reference,
-    max_distance=0.0,
-    normalize=True,
-    plot=False,
-    flag=BAD,
-    **kwargs,
-):
-    """
-    Pattern Recognition via Dynamic Time Warping.
-
-    The steps are:
-    1. work on a moving window
-
-    2. for each data chunk extracted from each window, a distance to the given pattern
-       is calculated, by the dynamic time warping algorithm [1]
-
-    3. if the distance is below the threshold, all the data in the window gets flagged
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The name of the data column
-
-    flags : saqc.Flags
-        The flags belonging to `data`.
-
-    reference : str
-        The name in `data` which holds the pattern. The pattern must not have NaNs,
-        have a datetime index and must not be empty.
-
-    max_distance : float, default 0.0
-        Maximum dtw-distance between chunk and pattern, if the distance is lower than
-        ``max_distance`` the data gets flagged. With default, ``0.0``, only exact
-        matches are flagged.
-
-    normalize : bool, default True
-        If `False`, return unmodified distances.
-        If `True`, normalize distances by the number of observations of the reference.
-        This helps to make it easier to find a good cutoff threshold for further
-        processing. The distances then refer to the mean distance per datapoint,
-        expressed in the datas units.
-
-    plot: bool, default False
-        Show a calibration plot, which can be quite helpful to find the right threshold
-        for `max_distance`. It works best with `normalize=True`. Do not use in automatic
-        setups / pipelines. The plot show three lines:
-
-        - data: the data the function was called on
-        - distances: the calculated distances by the algorithm
-        - indicator: have to distinct levels: `0` and the value of `max_distance`.
-          If `max_distance` is `0.0` it defaults to `1`. Everywhere where the
-          indicator is not `0` the data will be flagged.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-
-    flags : saqc.Flags
-        The flags belonging to `data`.
-
-    Notes
-    -----
-    The window size of the moving window is set to equal the temporal extension of the
-    reference datas datetime index.
-
-    References
-    ----------
-    Find a nice description of underlying the Dynamic Time Warping Algorithm here:
-
-    [1] https://cran.r-project.org/web/packages/dtw/dtw.pdf
-    """
-    ref = data[reference]
-    dat = data[field]
-
-    distances = calculateDistanceByDTW(dat, ref, forward=True, normalize=normalize)
-    winsz = ref.index.max() - ref.index.min()
-
-    # prevent nan propagation
-    distances = distances.fillna(max_distance + 1)
-
-    # find minima filter by threshold
-    fw = customRoller(distances, window=winsz, forward=True, closed="both", expand=True)
-    bw = customRoller(distances, window=winsz, closed="both", expand=True)
-    minima = (fw.min() == bw.min()) & (distances <= max_distance)
-
-    # Propagate True's to size of pattern.
-    rolling = customRoller(minima, window=winsz, closed="both", expand=True)
-    mask = rolling.sum() > 0
-
-    if plot:
-        df = pd.DataFrame()
-        df["data"] = dat
-        df["distances"] = distances
-        df["indicator"] = mask.astype(float) * (max_distance or 1)
-        df.plot()
-
-    flags[mask, field] = flag
-    return data, flags
+class PatternMixin:
+
+    # todo should we mask `reference` even if the func fail if reference has NaNs
+    @flagging()
+    def flagPatternByDTW(
+        self: "SaQC",
+        field,
+        reference,
+        max_distance=0.0,
+        normalize=True,
+        plot=False,
+        flag=BAD,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Pattern Recognition via Dynamic Time Warping.
+
+        The steps are:
+        1. work on a moving window
+
+        2. for each data chunk extracted from each window, a distance to the given pattern
+           is calculated, by the dynamic time warping algorithm [1]
+
+        3. if the distance is below the threshold, all the data in the window gets flagged
+
+        Parameters
+        ----------
+        field : str
+            The name of the data column
+
+        reference : str
+            The name in `data` which holds the pattern. The pattern must not have NaNs,
+            have a datetime index and must not be empty.
+
+        max_distance : float, default 0.0
+            Maximum dtw-distance between chunk and pattern, if the distance is lower than
+            ``max_distance`` the data gets flagged. With default, ``0.0``, only exact
+            matches are flagged.
+
+        normalize : bool, default True
+            If `False`, return unmodified distances.
+            If `True`, normalize distances by the number of observations of the reference.
+            This helps to make it easier to find a good cutoff threshold for further
+            processing. The distances then refer to the mean distance per datapoint,
+            expressed in the datas units.
+
+        plot: bool, default False
+            Show a calibration plot, which can be quite helpful to find the right threshold
+            for `max_distance`. It works best with `normalize=True`. Do not use in automatic
+            setups / pipelines. The plot show three lines:
+
+            - data: the data the function was called on
+            - distances: the calculated distances by the algorithm
+            - indicator: have to distinct levels: `0` and the value of `max_distance`.
+              If `max_distance` is `0.0` it defaults to `1`. Everywhere where the
+              indicator is not `0` the data will be flagged.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Notes
+        -----
+        The window size of the moving window is set to equal the temporal extension of the
+        reference datas datetime index.
+
+        References
+        ----------
+        Find a nice description of underlying the Dynamic Time Warping Algorithm here:
+
+        [1] https://cran.r-project.org/web/packages/dtw/dtw.pdf
+        """
+        ref = self._data[reference]
+        dat = self._data[field]
+
+        distances = calculateDistanceByDTW(dat, ref, forward=True, normalize=normalize)
+        winsz = ref.index.max() - ref.index.min()
+
+        # prevent nan propagation
+        distances = distances.fillna(max_distance + 1)
+
+        # find minima filter by threshold
+        fw = customRoller(
+            distances, window=winsz, forward=True, closed="both", expand=True
+        )
+        bw = customRoller(distances, window=winsz, closed="both", expand=True)
+        minima = (fw.min() == bw.min()) & (distances <= max_distance)
+
+        # Propagate True's to size of pattern.
+        rolling = customRoller(minima, window=winsz, closed="both", expand=True)
+        mask = rolling.sum() > 0
+
+        if plot:
+            df = pd.DataFrame()
+            df["data"] = dat
+            df["distances"] = distances
+            df["indicator"] = mask.astype(float) * (max_distance or 1)
+            df.plot()
+
+        self._flags[mask, field] = flag
+        return self
diff --git a/saqc/funcs/resampling.py b/saqc/funcs/resampling.py
index 9c2b4565f901ec384ea3bb38434c8e725ae8f0e9..4f227d8975e74e2af016b48437c8aa57d4aa4c05 100644
--- a/saqc/funcs/resampling.py
+++ b/saqc/funcs/resampling.py
@@ -8,20 +8,22 @@
 
 from __future__ import annotations
 
-from typing import Callable, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Optional, Union
 
 import numpy as np
 import pandas as pd
 from typing_extensions import Literal
 
-import saqc.funcs.tools as tools
-from dios import DictOfSeries, DtItype
-from saqc.core.flags import Flags
+from dios import DtItype
 from saqc.core.register import _isflagged, register
-from saqc.funcs.interpolation import _SUPPORTED_METHODS, interpolateIndex
+from saqc.funcs.interpolation import _SUPPORTED_METHODS
 from saqc.lib.tools import evalFreqStr, filterKwargs, getFreqDelta
 from saqc.lib.ts_operators import aggregate2Freq, shift2Freq
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
 METHOD2ARGS = {
     "inverse_fshift": ("backward", pd.Timedelta),
     "inverse_bshift": ("forward", pd.Timedelta),
@@ -33,336 +35,442 @@ METHOD2ARGS = {
 }
 
 
-@register(mask=["field"], demask=[], squeeze=[])
-def linear(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    freq: str,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    A method to "regularize" data by interpolating linearly the data at regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    Interpolated values will get assigned the worst flag within freq-range.
-
-    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
-    not-na) datapoint preceeding them and one succeeding them within freq range.
-    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
-    ``UNFLAGGED``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The fieldname of the column, holding the data-to-be-regularized.
-
-    flags : saqc.Flags
-        Container to store flags of the data.  freq
-
-    freq : str
-        An offset string. The frequency of the grid you want to interpolate your data at.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-    reserved = ["method", "order", "limit", "downgrade"]
-    kwargs = filterKwargs(kwargs, reserved)
-    return interpolateIndex(data, field, flags, freq, "time", **kwargs)
-
-
-@register(mask=["field"], demask=[], squeeze=[])
-def interpolate(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    freq: str,
-    method: _SUPPORTED_METHODS,
-    order: int = 1,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    A method to "regularize" data by interpolating the data at regular timestamp.
-
-    A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
-
-    Interpolated values will get assigned the worst flag within freq-range.
-
-    There are available all the interpolations from the pandas.Series.interpolate method and they are called by
-    the very same keywords.
-
-    Note, that, to perform a timestamp aware, linear interpolation, you have to pass ``'time'`` as `method`,
-    and NOT ``'linear'``.
-
-    Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
-    not-na) datapoint preceeding them and one succeeding them within freq range.
-    Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
-    ``UNFLAGGED``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The fieldname of the column, holding the data-to-be-regularized.
-
-    flags : saqc.Flags
-        Container to store flags of the data.
-
-    freq : str
-        An offset string. The frequency of the grid you want to interpolate your data at.
-
-    method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
-        "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
-        The interpolation method you want to apply.
-
-    order : int, default 1
-        If your selected interpolation method can be performed at different *orders* - here you pass the desired
-        order.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-    reserved = ["limit", "downgrade"]
-    kwargs = filterKwargs(kwargs, reserved)
-    return interpolateIndex(
-        data, field, flags, freq, method=method, order=order, **kwargs
-    )
+class ResamplingMixin:
+    @register(mask=["field"], demask=[], squeeze=[])
+    def linear(
+        self: "SaQC",
+        field: str,
+        freq: str,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        A method to "regularize" data by interpolating linearly the data at regular timestamp.
+
+        A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
+
+        Interpolated values will get assigned the worst flag within freq-range.
+
+        Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
+        not-na) datapoint preceeding them and one succeeding them within freq range.
+        Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
+        ``UNFLAGGED``.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-regularized.
+
+        freq : str
+            An offset string. The frequency of the grid you want to interpolate your data at.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        reserved = ["method", "order", "limit", "downgrade"]
+        kwargs = filterKwargs(kwargs, reserved)
+        return self.interpolateIndex(field, freq, "time", **kwargs)
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def interpolate(
+        self: "SaQC",
+        field: str,
+        freq: str,
+        method: _SUPPORTED_METHODS,
+        order: int = 1,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        A method to "regularize" data by interpolating the data at regular timestamp.
+
+        A series of data is considered "regular", if it is sampled regularly (= having uniform sampling rate).
+
+        Interpolated values will get assigned the worst flag within freq-range.
+
+        There are available all the interpolations from the pandas.Series.interpolate method and they are called by
+        the very same keywords.
+
+        Note, that, to perform a timestamp aware, linear interpolation, you have to pass ``'time'`` as `method`,
+        and NOT ``'linear'``.
+
+        Note, that the data only gets interpolated at those (regular) timestamps, that have a valid (existing and
+        not-na) datapoint preceeding them and one succeeding them within freq range.
+        Regular timestamp that do not suffice this condition get nan assigned AND The associated flag will be of value
+        ``UNFLAGGED``.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-regularized.
+
+        freq : str
+            An offset string. The frequency of the grid you want to interpolate your data at.
+
+        method : {"linear", "time", "nearest", "zero", "slinear", "quadratic", "cubic", "spline", "barycentric",
+            "polynomial", "krogh", "piecewise_polynomial", "spline", "pchip", "akima"}
+            The interpolation method you want to apply.
+
+        order : int, default 1
+            If your selected interpolation method can be performed at different *orders* - here you pass the desired
+            order.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        reserved = ["limit", "downgrade"]
+        kwargs = filterKwargs(kwargs, reserved)
+        return self.interpolateIndex(field, freq, method=method, order=order, **kwargs)
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def shift(
+        self: "SaQC",
+        field: str,
+        freq: str,
+        method: Literal["fshift", "bshift", "nshift"] = "nshift",
+        freq_check: Optional[Literal["check", "auto"]] = None,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function to shift data and flags to a regular (equidistant) timestamp grid, according to ``method``.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-shifted.
+
+        freq : str
+            An frequency Offset String that will be interpreted as the sampling rate you want the data to be shifted to.
+
+        method : {'fshift', 'bshift', 'nshift'}, default 'nshift'
+            Specifies how misaligned data-points get propagated to a grid timestamp.
+            Following choices are available:
+
+            * 'nshift' : every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
+            * 'bshift' : every grid point gets assigned its first succeeding value, if one is available in
+              the succeeding sampling interval.
+            * 'fshift' : every grid point gets assigned its ultimately preceding value, if one is available in
+              the preceeding sampling interval.
+
+        freq_check : {None, 'check', 'auto'}, default None
+
+            * ``None`` : do not validate frequency-string passed to `freq`
+            * 'check' : estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
+              or if no uniform sampling rate could be estimated
+            * 'auto' : estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        datcol = self._data[field]
+        if datcol.empty:
+            return self
+
+        freq = evalFreqStr(freq, freq_check, datcol.index)
+
+        # do the shift
+        datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
+
+        # do the shift on the history
+        kws = dict(method=method, freq=freq)
+
+        history = self._flags.history[field].apply(
+            index=datcol.index,
+            func_handle_df=True,
+            func=shift2Freq,
+            func_kws={**kws, "fill_value": np.nan},
+        )
 
+        self._flags.history[field] = history
+        self._data[field] = datcol
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def resample(
+        self: "SaQC",
+        field: str,
+        freq: str,
+        func: Callable[[pd.Series], pd.Series] = np.mean,
+        method: Literal["fagg", "bagg", "nagg"] = "bagg",
+        maxna: Optional[int] = None,
+        maxna_group: Optional[int] = None,
+        maxna_flags: Optional[int] = None,  # TODO: still a case ??
+        maxna_group_flags: Optional[int] = None,
+        flag_func: Callable[[pd.Series], float] = max,
+        freq_check: Optional[Literal["check", "auto"]] = None,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function to resample the data.
+
+        The data will be sampled at regular (equidistant) timestamps aka. Grid points.
+        Sampling intervals therefore get aggregated with a function, specified by
+        'func' parameter and the result gets projected onto the new timestamps with a
+        method, specified by "method". The following method (keywords) are available:
+
+        * ``'nagg'``: all values in the range (+/- `freq`/2) of a grid point get
+            aggregated with func and assigned to it.
+        * ``'bagg'``: all values in a sampling interval get aggregated with func and
+            the result gets assigned to the last grid point.
+        * ``'fagg'``: all values in a sampling interval get aggregated with func and
+            the result gets assigned to the next grid point.
+
+
+        Note, that. if possible, functions passed to func will get projected
+        internally onto pandas.resample methods, wich results in some reasonable
+        performance boost - however, for this to work, you should pass functions that
+        have the __name__ attribute initialised and the according methods name assigned
+        to it. Furthermore, you shouldnt pass numpys nan-functions (``nansum``,
+        ``nanmean``,...) because those for example, have ``__name__ == 'nansum'`` and
+        they will thus not trigger ``resample.func()``, but the slower ``resample.apply(
+        nanfunc)``. Also, internally, no nans get passed to the functions anyway,
+        so that there is no point in passing the nan functions.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-resampled.
+
+        freq : str
+            An Offset String, that will be interpreted as the frequency you want to
+            resample your data with.
+
+        func : Callable
+            The function you want to use for aggregation.
+
+        method: {'fagg', 'bagg', 'nagg'}, default 'bagg'
+            Specifies which intervals to be aggregated for a certain timestamp. (preceding,
+            succeeding or "surrounding" interval). See description above for more details.
+
+        maxna : {None, int}, default None
+            Maximum number NaNs in a resampling interval. If maxna is exceeded, the interval
+            is set entirely to NaN.
+
+        maxna_group : {None, int}, default None
+            Same as `maxna` but for consecutive NaNs.
+
+        maxna_flags : {None, int}, default None
+            Same as `max_invalid`, only applying for the flags. The flag regarded
+            as "invalid" value, is the one passed to empty_intervals_flag (
+            default=``BAD``). Also this is the flag assigned to invalid/empty intervals.
+
+        maxna_group_flags : {None, int}, default None
+            Same as `maxna_flags`, only applying onto flags. The flag regarded as
+            "invalid" value, is the one passed to empty_intervals_flag. Also this is the
+            flag assigned to invalid/empty intervals.
+
+        flag_func : Callable, default: max
+            The function you want to aggregate the flags with. It should be capable of
+            operating on the flags dtype (usually ordered categorical).
+
+        freq_check : {None, 'check', 'auto'}, default None
+
+            * ``None``: do not validate frequency-string passed to `freq`
+            * ``'check'``: estimate frequency and log a warning if estimate miss matchs
+                frequency string passed to 'freq', or if no uniform sampling rate could be
+                estimated
+            * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+
+        datcol = self._data[field]
+
+        # workaround for #GL-333
+        if datcol.empty and self._data.itype in [None, DtItype]:
+            datcol = pd.Series(index=pd.DatetimeIndex([]), dtype=datcol.dtype)
+
+        freq = evalFreqStr(freq, freq_check, datcol.index)
+
+        datcol = aggregate2Freq(
+            datcol,
+            method,
+            freq,
+            func,
+            fill_value=np.nan,
+            max_invalid_total=maxna,
+            max_invalid_consec=maxna_group,
+        )
 
-@register(mask=["field"], demask=[], squeeze=[])
-def shift(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    freq: str,
-    method: Literal["fshift", "bshift", "nshift"] = "nshift",
-    freq_check: Optional[Literal["check", "auto"]] = None,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function to shift data and flags to a regular (equidistant) timestamp grid, according to ``method``.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The fieldname of the column, holding the data-to-be-shifted.
-
-    flags : saqc.Flags
-        Container to store flags of the data.
-
-    freq : str
-        An frequency Offset String that will be interpreted as the sampling rate you want the data to be shifted to.
-
-    method : {'fshift', 'bshift', 'nshift'}, default 'nshift'
-        Specifies how misaligned data-points get propagated to a grid timestamp.
-        Following choices are available:
-
-        * 'nshift' : every grid point gets assigned the nearest value in its range. (range = +/- 0.5 * `freq`)
-        * 'bshift' : every grid point gets assigned its first succeeding value, if one is available in
-          the succeeding sampling interval.
-        * 'fshift' : every grid point gets assigned its ultimately preceding value, if one is available in
-          the preceeding sampling interval.
-
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None`` : do not validate frequency-string passed to `freq`
-        * 'check' : estimate frequency and log a warning if estimate miss matches frequency string passed to `freq`,
-          or if no uniform sampling rate could be estimated
-        * 'auto' : estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-    datcol = data[field]
-    if datcol.empty:
-        return data, flags
-
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-
-    # do the shift
-    datcol = shift2Freq(datcol, method, freq, fill_value=np.nan)
-
-    # do the shift on the history
-    kws = dict(method=method, freq=freq)
-
-    history = flags.history[field].apply(
-        index=datcol.index,
-        func_handle_df=True,
-        func=shift2Freq,
-        func_kws={**kws, "fill_value": np.nan},
-    )
+        kws = dict(
+            method=method,
+            freq=freq,
+            agg_func=flag_func,
+            fill_value=np.nan,
+            max_invalid_total=maxna_flags,
+            max_invalid_consec=maxna_group_flags,
+        )
 
-    flags.history[field] = history
-    data[field] = datcol
-    return data, flags
+        history = self._flags.history[field].apply(
+            index=datcol.index,
+            func=aggregate2Freq,
+            func_kws=kws,
+        )
 
+        self._data[field] = datcol
+        self._flags.history[field] = history
+        return self
 
-@register(mask=["field"], demask=[], squeeze=[])
-def resample(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    freq: str,
-    func: Callable[[pd.Series], pd.Series] = np.mean,
-    method: Literal["fagg", "bagg", "nagg"] = "bagg",
-    maxna: Optional[int] = None,
-    maxna_group: Optional[int] = None,
-    maxna_flags: Optional[int] = None,  # TODO: still a case ??
-    maxna_group_flags: Optional[int] = None,
-    flag_func: Callable[[pd.Series], float] = max,
-    freq_check: Optional[Literal["check", "auto"]] = None,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function to resample the data.
-
-    The data will be sampled at regular (equidistant) timestamps aka. Grid points.
-    Sampling intervals therefore get aggregated with a function, specified by
-    'func' parameter and the result gets projected onto the new timestamps with a
-    method, specified by "method". The following method (keywords) are available:
-
-    * ``'nagg'``: all values in the range (+/- `freq`/2) of a grid point get
-        aggregated with func and assigned to it.
-    * ``'bagg'``: all values in a sampling interval get aggregated with func and
-        the result gets assigned to the last grid point.
-    * ``'fagg'``: all values in a sampling interval get aggregated with func and
-        the result gets assigned to the next grid point.
-
-
-    Note, that. if possible, functions passed to func will get projected
-    internally onto pandas.resample methods, wich results in some reasonable
-    performance boost - however, for this to work, you should pass functions that
-    have the __name__ attribute initialised and the according methods name assigned
-    to it. Furthermore, you shouldnt pass numpys nan-functions (``nansum``,
-    ``nanmean``,...) because those for example, have ``__name__ == 'nansum'`` and
-    they will thus not trigger ``resample.func()``, but the slower ``resample.apply(
-    nanfunc)``. Also, internally, no nans get passed to the functions anyway,
-    so that there is no point in passing the nan functions.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        The fieldname of the column, holding the data-to-be-resampled.
-
-    flags : saqc.Flags
-        Container to store flags of the data.
-
-    freq : str
-        An Offset String, that will be interpreted as the frequency you want to
-        resample your data with.
-
-    func : Callable
-        The function you want to use for aggregation.
-
-    method: {'fagg', 'bagg', 'nagg'}, default 'bagg'
-        Specifies which intervals to be aggregated for a certain timestamp. (preceding,
-        succeeding or "surrounding" interval). See description above for more details.
-
-    maxna : {None, int}, default None
-        Maximum number NaNs in a resampling interval. If maxna is exceeded, the interval
-        is set entirely to NaN.
-
-    maxna_group : {None, int}, default None
-        Same as `maxna` but for consecutive NaNs.
-
-    maxna_flags : {None, int}, default None
-        Same as `max_invalid`, only applying for the flags. The flag regarded
-        as "invalid" value, is the one passed to empty_intervals_flag (
-        default=``BAD``). Also this is the flag assigned to invalid/empty intervals.
-
-    maxna_group_flags : {None, int}, default None
-        Same as `maxna_flags`, only applying onto flags. The flag regarded as
-        "invalid" value, is the one passed to empty_intervals_flag. Also this is the
-        flag assigned to invalid/empty intervals.
-
-    flag_func : Callable, default: max
-        The function you want to aggregate the flags with. It should be capable of
-        operating on the flags dtype (usually ordered categorical).
-
-    freq_check : {None, 'check', 'auto'}, default None
-
-        * ``None``: do not validate frequency-string passed to `freq`
-        * ``'check'``: estimate frequency and log a warning if estimate miss matchs
-            frequency string passed to 'freq', or if no uniform sampling rate could be
-            estimated
-        * ``'auto'``: estimate frequency and use estimate. (Ignores `freq` parameter.)
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values and shape may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-
-    datcol = data[field]
-
-    # workaround for #GL-333
-    if datcol.empty and data.itype in [None, DtItype]:
-        datcol = pd.Series(index=pd.DatetimeIndex([]), dtype=datcol.dtype)
-
-    freq = evalFreqStr(freq, freq_check, datcol.index)
-
-    datcol = aggregate2Freq(
-        datcol,
-        method,
-        freq,
-        func,
-        fill_value=np.nan,
-        max_invalid_total=maxna,
-        max_invalid_consec=maxna_group,
+    @register(
+        mask=[],
+        demask=[],
+        squeeze=[],
+        handles_target=True,  # target is mandatory in func, so its allowed
     )
+    def concatFlags(
+        self: "SaQC",
+        field: str,
+        target: str,
+        method: Literal[
+            "inverse_fagg",
+            "inverse_bagg",
+            "inverse_nagg",
+            "inverse_fshift",
+            "inverse_bshift",
+            "inverse_nshift",
+            "inverse_interpolation",
+            "match",
+        ] = "match",
+        freq: Optional[str] = None,
+        drop: Optional[bool] = False,
+        squeeze: Optional[bool] = False,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        The Function appends flags history of ``fields`` to flags history of ``target``.
+        Before appending, columns in ``field`` history are projected onto the target index via ``method``
+
+        method: (field_flag associated with "field", source_flags associated with "source")
+
+        * 'inverse_nagg' - all target_flags within the range +/- freq/2 of a field_flag, get assigned this field flags value.
+           (if field_flag > target_flag)
+
+        * 'inverse_bagg' - all target_flags succeeding a field_flag within the range of "freq", get assigned this field flags
+           value. (if field_flag > target_flag)
+
+        * 'inverse_fagg' - all target_flags preceeding a field_flag within the range of "freq", get assigned this field flags
+           value. (if field_flag > target_flag)
+
+        * 'inverse_interpolation' - all target_flags within the range +/- freq of a field_flag, get assigned this source flags value.
+          (if field_flag > target_flag)
+
+        * 'inverse_nshift' - That target_flag within the range +/- freq/2, that is nearest to a field_flag, gets the source
+          flags value. (if field_flag > target_flag)
+
+        * 'inverse_bshift' - That target_flag succeeding a field flag within the range freq, that is nearest to a
+           field_flag, gets assigned this field flags value. (if field_flag > target_flag)
+
+        * 'inverse_nshift' - That target_flag preceeding a field flag within the range freq, that is nearest to a
+           field_flag, gets assigned this field flags value. (if field_flag > target_flag)
+
+        * 'match' - any target_flag with a timestamp matching a field_flags timestamp gets this field_flags value
+           (if field_flag > target_flag)
+
+        Note, to undo or backtrack a resampling/shifting/interpolation that has been performed with a certain method,
+        you can just pass the associated "inverse" method. Also you should pass the same ``drop`` keyword.
+
+        Parameters
+        ----------
+        field : str
+            Fieldname of flags history to append.
+
+        target : str
+            Field name of flags history to append to.
+
+        method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift', 'match'}, default 'match'
+            The method used for projection of ``field`` flags onto ``target`` flags. See description above for more details.
+
+        freq : str or None, default None
+            The ``freq`` determines the projection range for the projection method. See above description for more details.
+            Defaultly (None), the sampling frequency of ``field`` is used.
+
+        drop : bool, default False
+            If set to `True`, the `field` column will be removed after processing
+
+        squeeze : bool, default False
+            If set to `True`, the appended flags frame will be squeezed - resulting in function specific flags informations
+            getting lost.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        flagscol = self._flags[field]
+        target_datcol = self._data[target]
+        target_flagscol = self._flags[target]
+
+        if target_datcol.empty or flagscol.empty:
+            return self
+
+        dummy = pd.Series(np.nan, target_flagscol.index, dtype=float)
+
+        if freq is None:
+            freq = getFreqDelta(flagscol.index)
+            if freq is None and not method == "match":
+                raise ValueError(
+                    'To project irregularly sampled data, either use method="match", or '
+                    "pass custom projection range to freq parameter."
+                )
+
+        if method[-13:] == "interpolation":
+            ignore = _getChunkBounds(target_datcol, flagscol, freq)
+            func = _inverseInterpolation
+            func_kws = dict(freq=freq, chunk_bounds=ignore, target=dummy)
+
+        elif method[-3:] == "agg":
+            projection_method = METHOD2ARGS[method][0]
+            tolerance = METHOD2ARGS[method][1](freq)
+            func = _inverseAggregation
+            func_kws = dict(freq=tolerance, method=projection_method, target=dummy)
+
+        elif method[-5:] == "shift":
+            drop_mask = target_datcol.isna() | _isflagged(
+                target_flagscol, kwargs["dfilter"]
+            )
+            projection_method = METHOD2ARGS[method][0]
+            tolerance = METHOD2ARGS[method][1](freq)
+            func = _inverseShift
+            kws = dict(
+                freq=tolerance,
+                method=projection_method,
+                drop_mask=drop_mask,
+                target=dummy,
+            )
+            func_kws = {**kws, "fill_value": np.nan}
 
-    kws = dict(
-        method=method,
-        freq=freq,
-        agg_func=flag_func,
-        fill_value=np.nan,
-        max_invalid_total=maxna_flags,
-        max_invalid_consec=maxna_group_flags,
-    )
+        elif method == "match":
+            func = lambda x: x
+            func_kws = {}
 
-    history = flags.history[field].apply(
-        index=datcol.index,
-        func=aggregate2Freq,
-        func_kws=kws,
-    )
+        else:
+            raise ValueError(f"unknown method {method}")
+
+        history = self._flags.history[field].apply(dummy.index, func, func_kws)
+        if squeeze:
+            history = history.squeeze(raw=True)
+
+            meta = {
+                "func": f"concatFlags({field})",
+                "args": (field, target),
+                "kwargs": {
+                    "method": method,
+                    "freq": freq,
+                    "drop": drop,
+                    "squeeze": squeeze,
+                    **kwargs,
+                },
+            }
+            self._flags.history[target].append(history, meta)
+        else:
+            self._flags.history[target].append(history)
 
-    data[field] = datcol
-    flags.history[field] = history
-    return data, flags
+        if drop:
+            return self.dropField(field=field)
+
+        return self
 
 
 def _getChunkBounds(target: pd.Series, flagscol: pd.Series, freq: str):
@@ -421,170 +529,3 @@ def _inverseShift(
     source.loc[target_drops.index] = target_drops.values
 
     return source.fillna(fill_value).astype(dtype, copy=False)
-
-
-@register(
-    mask=[],
-    demask=[],
-    squeeze=[],
-    handles_target=True,  # target is mandatory in func, so its allowed
-)
-def concatFlags(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    target: str,
-    method: Literal[
-        "inverse_fagg",
-        "inverse_bagg",
-        "inverse_nagg",
-        "inverse_fshift",
-        "inverse_bshift",
-        "inverse_nshift",
-        "inverse_interpolation",
-        "match",
-    ] = "match",
-    freq: Optional[str] = None,
-    drop: Optional[bool] = False,
-    squeeze: Optional[bool] = False,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    The Function appends flags history of ``fields`` to flags history of ``target``.
-    Before appending, columns in ``field`` history are projected onto the target index via ``method``
-
-    method: (field_flag associated with "field", source_flags associated with "source")
-
-    * 'inverse_nagg' - all target_flags within the range +/- freq/2 of a field_flag, get assigned this field flags value.
-       (if field_flag > target_flag)
-
-    * 'inverse_bagg' - all target_flags succeeding a field_flag within the range of "freq", get assigned this field flags
-       value. (if field_flag > target_flag)
-
-    * 'inverse_fagg' - all target_flags preceeding a field_flag within the range of "freq", get assigned this field flags
-       value. (if field_flag > target_flag)
-
-    * 'inverse_interpolation' - all target_flags within the range +/- freq of a field_flag, get assigned this source flags value.
-      (if field_flag > target_flag)
-
-    * 'inverse_nshift' - That target_flag within the range +/- freq/2, that is nearest to a field_flag, gets the source
-      flags value. (if field_flag > target_flag)
-
-    * 'inverse_bshift' - That target_flag succeeding a field flag within the range freq, that is nearest to a
-       field_flag, gets assigned this field flags value. (if field_flag > target_flag)
-
-    * 'inverse_nshift' - That target_flag preceeding a field flag within the range freq, that is nearest to a
-       field_flag, gets assigned this field flags value. (if field_flag > target_flag)
-
-    * 'match' - any target_flag with a timestamp matching a field_flags timestamp gets this field_flags value
-       (if field_flag > target_flag)
-
-    Note, to undo or backtrack a resampling/shifting/interpolation that has been performed with a certain method,
-    you can just pass the associated "inverse" method. Also you should pass the same ``drop`` keyword.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-
-    field : str
-        Fieldname of flags history to append.
-
-    flags : saqc.Flags
-        Container to store flags of the data.
-
-    target : str
-        Field name of flags history to append to.
-
-    method : {'inverse_fagg', 'inverse_bagg', 'inverse_nagg', 'inverse_fshift', 'inverse_bshift', 'inverse_nshift', 'match'}, default 'match'
-        The method used for projection of ``field`` flags onto ``target`` flags. See description above for more details.
-
-    freq : str or None, default None
-        The ``freq`` determines the projection range for the projection method. See above description for more details.
-        Defaultly (None), the sampling frequency of ``field`` is used.
-
-    drop : bool, default False
-        If set to `True`, the `field` column will be removed after processing
-
-    squeeze : bool, default False
-        If set to `True`, the appended flags frame will be squeezed - resulting in function specific flags informations
-        getting lost.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values and shape may have changed relatively to the flags input.
-    """
-    flagscol = flags[field]
-    target_datcol = data[target]
-    target_flagscol = flags[target]
-
-    if target_datcol.empty or flagscol.empty:
-        return data, flags
-
-    dummy = pd.Series(np.nan, target_flagscol.index, dtype=float)
-
-    if freq is None:
-        freq = getFreqDelta(flagscol.index)
-        if freq is None and not method == "match":
-            raise ValueError(
-                'To project irregularly sampled data, either use method="match", or '
-                "pass custom projection range to freq parameter."
-            )
-
-    if method[-13:] == "interpolation":
-        ignore = _getChunkBounds(target_datcol, flagscol, freq)
-        func = _inverseInterpolation
-        func_kws = dict(freq=freq, chunk_bounds=ignore, target=dummy)
-
-    elif method[-3:] == "agg":
-        projection_method = METHOD2ARGS[method][0]
-        tolerance = METHOD2ARGS[method][1](freq)
-        func = _inverseAggregation
-        func_kws = dict(freq=tolerance, method=projection_method, target=dummy)
-
-    elif method[-5:] == "shift":
-        drop_mask = target_datcol.isna() | _isflagged(
-            target_flagscol, kwargs["dfilter"]
-        )
-        projection_method = METHOD2ARGS[method][0]
-        tolerance = METHOD2ARGS[method][1](freq)
-        func = _inverseShift
-        kws = dict(
-            freq=tolerance, method=projection_method, drop_mask=drop_mask, target=dummy
-        )
-        func_kws = {**kws, "fill_value": np.nan}
-
-    elif method == "match":
-        func = lambda x: x
-        func_kws = {}
-
-    else:
-        raise ValueError(f"unknown method {method}")
-
-    history = flags.history[field].apply(dummy.index, func, func_kws)
-    if squeeze:
-        history = history.squeeze(raw=True)
-
-        meta = {
-            "func": f"concatFlags({field})",
-            "args": (field, target),
-            "kwargs": {
-                "method": method,
-                "freq": freq,
-                "drop": drop,
-                "squeeze": squeeze,
-                **kwargs,
-            },
-        }
-        flags.history[target].append(history, meta)
-    else:
-        flags.history[target].append(history)
-
-    if drop:
-        data, flags = tools.dropField(data=data, flags=flags, field=field)
-
-    return data, flags
diff --git a/saqc/funcs/residuals.py b/saqc/funcs/residuals.py
index dc8f05e364ebea15cbd818e2911d0767940fd2ea..dec6681e2be5c2f3b16e112bbe2b4984703e99da 100644
--- a/saqc/funcs/residuals.py
+++ b/saqc/funcs/residuals.py
@@ -7,154 +7,145 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Callable, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Optional, Union
 
 import numpy as np
 import pandas as pd
 
-from dios import DictOfSeries
-from saqc.core.flags import Flags
 from saqc.core.register import register
 from saqc.funcs.curvefit import _fitPolynomial
 from saqc.funcs.rolling import _roll
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
-@register(mask=["field"], demask=[], squeeze=[])
-def calculatePolynomialResiduals(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    order: int,
-    min_periods: Optional[int] = 0,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Fits a polynomial model to the data and calculate the residuals.
-
-    The residual  is calculated by fitting a polynomial of degree `order` to a data
-    slice of size `window`, that has x at its center.
-
-    Note, that calculating the residuals tends to be quite costy, because a function
-    fitting is performed for every sample. To improve performance, consider the
-    following possibilities:
-
-    In case your data is sampled at an equidistant frequency grid:
-
-    (1) If you know your data to have no significant number of missing values,
-    or if you do not want to calculate residuals for windows containing missing values
-    any way, performance can be increased by setting min_periods=window.
-
-    Note, that the initial and final window/2 values do not get fitted.
-
-    Each residual gets assigned the worst flag present in the interval of
-    the original data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data.
-
-    field : str
-        The column, holding the data-to-be-modelled.
-
-    flags : saqc.Flags
-        Container to store quality flags to data.
-
-    window : {str, int}
-        The size of the window you want to use for fitting. If an integer is passed,
-        the size refers to the number of periods for every fitting window. If an
-        offset string is passed, the size refers to the total temporal extension. The
-        window will be centered around the vaule-to-be-fitted. For regularly sampled
-        timeseries the period number will be casted down to an odd number if even.
-
-    order : int
-        The degree of the polynomial used for fitting
-
-    min_periods : int or None, default 0
-        The minimum number of periods, that has to be available in every values
-        fitting surrounding for the polynomial fit to be performed. If there are not
-        enough values, np.nan gets assigned. Default (0) results in fitting
-        regardless of the number of values present (results in overfitting for too
-        sparse intervals). To automatically set the minimum number of periods to the
-        number of values in an offset defined window size, pass np.nan.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-    flags : saqc.Flags
-    """
-    orig = data[field]
-    data, flags = _fitPolynomial(
-        data=data,
-        field=field,
-        flags=flags,
-        window=window,
-        order=order,
-        min_periods=min_periods,
+
+class ResidualsMixin:
+    @register(mask=["field"], demask=[], squeeze=[])
+    def calculatePolynomialResiduals(
+        self: "SaQC",
+        field: str,
+        window: str | int,
+        order: int,
+        min_periods: int = 0,
         **kwargs,
-    )
-    data[field] = orig - data[field]
-    return data, flags
-
-
-@register(mask=["field"], demask=[], squeeze=[])
-def calculateRollingResiduals(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    func: Callable[[pd.Series], np.ndarray] = np.mean,
-    min_periods: Optional[int] = 0,
-    center: bool = True,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Calculate the diff of a rolling-window function and the data.
-
-    Note, that the data gets assigned the worst flag present in the original data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data.
-    field : str
-        The column to calculate on.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-    window : {int, str}
-        The size of the window you want to roll with. If an integer is passed, the size
-        refers to the number of periods for every fitting window. If an offset string
-        is passed, the size refers to the total temporal extension. For regularly
-        sampled timeseries, the period number will be casted down to an odd number if
-        ``center=True``.
-    func : Callable, default np.mean
-        Function to roll with.
-    min_periods : int, default 0
-        The minimum number of periods to get a valid value
-    center : bool, default True
-        If True, center the rolling window.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    orig = data[field].copy()
-    data, flags = _roll(
-        data=data,
-        field=field,
-        flags=flags,
-        window=window,
-        func=func,
-        min_periods=min_periods,
-        center=center,
+    ) -> "SaQC":
+        """
+        Fits a polynomial model to the data and calculate the residuals.
+
+        The residual  is calculated by fitting a polynomial of degree `order` to a data
+        slice of size `window`, that has x at its center.
+
+        Note, that calculating the residuals tends to be quite costy, because a function
+        fitting is performed for every sample. To improve performance, consider the
+        following possibilities:
+
+        In case your data is sampled at an equidistant frequency grid:
+
+        (1) If you know your data to have no significant number of missing values,
+        or if you do not want to calculate residuals for windows containing missing values
+        any way, performance can be increased by setting min_periods=window.
+
+        Note, that the initial and final window/2 values do not get fitted.
+
+        Each residual gets assigned the worst flag present in the interval of
+        the original data.
+
+        Parameters
+        ----------
+        field : str
+            The column, holding the data-to-be-modelled.
+
+        window : {str, int}
+            The size of the window you want to use for fitting. If an integer is passed,
+            the size refers to the number of periods for every fitting window. If an
+            offset string is passed, the size refers to the total temporal extension. The
+            window will be centered around the vaule-to-be-fitted. For regularly sampled
+            timeseries the period number will be casted down to an odd number if even.
+
+        order : int
+            The degree of the polynomial used for fitting
+
+        min_periods : int or None, default 0
+            The minimum number of periods, that has to be available in every values
+            fitting surrounding for the polynomial fit to be performed. If there are not
+            enough values, np.nan gets assigned. Default (0) results in fitting
+            regardless of the number of values present (results in overfitting for too
+            sparse intervals). To automatically set the minimum number of periods to the
+            number of values in an offset defined window size, pass np.nan.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        orig = self._data[field]
+        data, _ = _fitPolynomial(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            window=window,
+            order=order,
+            min_periods=min_periods,
+            **kwargs,
+        )
+        self._data[field] = orig - data[field]
+        return self
+
+    @register(mask=["field"], demask=[], squeeze=[])
+    def calculateRollingResiduals(
+        self: "SaQC",
+        field: str,
+        window: str | int,
+        func: Callable[[pd.Series], np.ndarray] = np.mean,
+        min_periods: int = 0,
+        center: bool = True,
         **kwargs,
-    )
-
-    # calculate residual
-    data[field] = orig - data[field]
-    return data, flags
+    ) -> "SaQC":
+        """
+        Calculate the diff of a rolling-window function and the data.
+
+        Note, that the data gets assigned the worst flag present in the original data.
+
+        Parameters
+        ----------
+        field : str
+            The column to calculate on.
+
+        flags : saqc.Flags
+            Container to store quality flags to data.
+
+        window : {int, str}
+            The size of the window you want to roll with. If an integer is passed, the size
+            refers to the number of periods for every fitting window. If an offset string
+            is passed, the size refers to the total temporal extension. For regularly
+            sampled timeseries, the period number will be casted down to an odd number if
+            ``center=True``.
+
+        func : Callable, default np.mean
+            Function to roll with.
+
+        min_periods : int, default 0
+            The minimum number of periods to get a valid value
+
+        center : bool, default True
+            If True, center the rolling window.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        orig = self._data[field].copy()
+        data, _ = _roll(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            window=window,
+            func=func,
+            min_periods=min_periods,
+            center=center,
+            **kwargs,
+        )
+
+        # calculate residual
+        self._data[field] = orig - data[field]
+        return self
diff --git a/saqc/funcs/rolling.py b/saqc/funcs/rolling.py
index fa5cabecdbd0036010080cee7b15222ce34d25a6..80699200d866e7bf498426d0c214de3e0e815fae 100644
--- a/saqc/funcs/rolling.py
+++ b/saqc/funcs/rolling.py
@@ -6,7 +6,7 @@
 
 # -*- coding: utf-8 -*-
 
-from typing import Callable, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Union
 
 import numpy as np
 import pandas as pd
@@ -16,62 +16,65 @@ from saqc.core.flags import Flags
 from saqc.core.register import register
 from saqc.lib.tools import getFreqDelta
 
-
-@register(mask=["field"], demask=[], squeeze=[])
-def roll(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    window: Union[str, int],
-    func: Callable[[pd.Series], np.ndarray] = np.mean,
-    min_periods: int = 0,
-    center: bool = True,
-    **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Calculate a rolling-window function on the data.
-
-    Note, that the data gets assigned the worst flag present in the original data.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        The data.
-    field : str
-        The column to calculate on.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-    window : {int, str}
-        The size of the window you want to roll with. If an integer is passed, the size
-        refers to the number of periods for every fitting window. If an offset string
-        is passed, the size refers to the total temporal extension. For regularly
-        sampled timeseries, the period number will be casted down to an odd number if
-        ``center=True``.
-    func : Callable, default np.mean
-        Function to roll with.
-    min_periods : int, default 0
-        The minimum number of periods to get a valid value
-    center : bool, default True
-        If True, center the rolling window.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    return _roll(
-        data=data,
-        field=field,
-        flags=flags,
-        window=window,
-        func=func,
-        min_periods=min_periods,
-        center=center,
-        **kwargs,
-    )
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+class RollingMixin:
+    @register(mask=["field"], demask=[], squeeze=[])
+    def roll(
+        self: "SaQC",
+        field: str,
+        window: Union[str, int],
+        func: Callable[[pd.Series], np.ndarray] = np.mean,
+        min_periods: int = 0,
+        center: bool = True,
+        **kwargs
+    ) -> "SaQC":
+        """
+        Calculate a rolling-window function on the data.
+
+        Note, that the data gets assigned the worst flag present in the original data.
+
+        Parameters
+        ----------
+        field : str
+            The column to calculate on.
+
+        flags : saqc.Flags
+            Container to store quality flags to data.
+
+        window : {int, str}
+            The size of the window you want to roll with. If an integer is passed, the size
+            refers to the number of periods for every fitting window. If an offset string
+            is passed, the size refers to the total temporal extension. For regularly
+            sampled timeseries, the period number will be casted down to an odd number if
+            ``center=True``.
+
+        func : Callable, default np.mean
+            Function to roll with.
+
+        min_periods : int, default 0
+            The minimum number of periods to get a valid value
+
+        center : bool, default True
+            If True, center the rolling window.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self._data, self._flags = _roll(
+            data=self._data,
+            field=field,
+            flags=self._flags,
+            window=window,
+            func=func,
+            min_periods=min_periods,
+            center=center,
+            **kwargs,
+        )
+        return self
 
 
 def _roll(
diff --git a/saqc/funcs/scores.py b/saqc/funcs/scores.py
index 3af6854b7317a20dc7cd8d5d753d0c371b624fd5..8e0be0b36f6d2bad5810096798eaf23470b606ae 100644
--- a/saqc/funcs/scores.py
+++ b/saqc/funcs/scores.py
@@ -7,158 +7,293 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Callable, Sequence, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Optional, Sequence, Tuple
 
 import numpy as np
 import pandas as pd
 from typing_extensions import Literal
 
 import saqc.lib.ts_operators as ts_ops
-from dios import DictOfSeries
 from saqc.constants import UNFLAGGED
-from saqc.core.flags import Flags
 from saqc.core.register import register
-from saqc.lib.tools import toSequence
-
-
-@register(
-    mask=["field"],
-    demask=[],
-    squeeze=["target"],
-    multivariate=True,
-    handles_target=True,
-)
-def assignKNNScore(
-    data: DictOfSeries,
-    field: Sequence[str],
-    flags: Flags,
-    target: str,
-    n: int = 10,
-    func: Callable[[pd.Series], float] = np.sum,
-    freq: Union[float, str] = np.inf,
-    min_periods: int = 2,
-    method: Literal["ball_tree", "kd_tree", "brute", "auto"] = "ball_tree",
-    metric: str = "minkowski",
-    p: int = 2,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
+from saqc.lib.tools import getApply, toSequence
+
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
+def _univarScoring(
+    data: pd.Series,
+    window: Optional[str, int] = None,
+    norm_func: Callable = np.nanstd,
+    model_func: Callable = np.nanmean,
+    center: bool = True,
+    min_periods: Optional[int] = None,
+) -> Tuple[pd.Series, pd.Series, pd.Series]:
     """
-    TODO: docstring need a rework
-    Score datapoints by an aggregation of the dictances to their k nearest neighbors.
-
-    The function is a wrapper around the NearestNeighbors method from pythons sklearn library (See reference [1]).
-
-    The steps taken to calculate the scores are as follows:
-
-    1. All the timeseries, given through ``field``, are combined to one feature space by an *inner* join on their
-       date time indexes. thus, only samples, that share timestamps across all ``field`` will be included in the
-       feature space.
-    2. Any datapoint/sample, where one ore more of the features is invalid (=np.nan) will get excluded.
-    3. For every data point, the distance to its `n` nearest neighbors is calculated by applying the
-       metric `metric` at grade `p` onto the feature space. The defaults lead to the euclidian to be applied.
-       If `radius` is not None, it sets the upper bound of distance for a neighbor to be considered one of the
-       `n` nearest neighbors. Furthermore, the `freq` argument determines wich samples can be
-       included into a datapoints nearest neighbors list, by segmenting the data into chunks of specified temporal
-       extension and feeding that chunks to the kNN algorithm seperatly.
-    4. For every datapoint, the calculated nearest neighbors distances get aggregated to a score, by the function
-       passed to `func`. The default, ``sum`` obviously just sums up the distances.
-    5. The resulting timeseries of scores gets assigned to the field target.
+    Calculate (rolling) normalisation scores.
 
     Parameters
     ----------
-    data : dios.DictOfSeries
+    data
         A dictionary of pandas.Series, holding all the data.
-    field : list of str
-        input variable names.
-    flags : saqc.flags
-        A flags object, holding flags and additional informations related to `data`.
-    target : str, default "kNNscores"
-        A new Column name, where the result is stored.
-    n : int, default 10
-        The number of nearest neighbors to which the distance is comprised in every datapoints scoring calculation.
-    func : Callable[numpy.array, float], default np.sum
-        A function that assigns a score to every one dimensional array, containing the distances
-        to every datapoints `n` nearest neighbors.
-    freq : {np.inf, float, str}, default np.inf
-        Determines the segmentation of the data into partitions, the kNN algorithm is
-        applied onto individually.
-
-        * ``np.inf``: Apply Scoring on whole data set at once
-        * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
-        * Offset String : Apply scoring on successive partitions of temporal extension matching the passed offset
-          string
-
-    min_periods : int, default 2
-        The minimum number of periods that have to be present in a partition for the kNN scoring
-        to be applied. If the number of periods present is below `min_periods`, the score for the
-        datapoints in that partition will be np.nan.
-    method : {'ball_tree', 'kd_tree', 'brute', 'auto'}, default 'ball_tree'
-        The search algorithm to find each datapoints k nearest neighbors.
-        The keyword just gets passed on to the underlying sklearn method.
-        See reference [1] for more information on the algorithm.
-    metric : str, default 'minkowski'
-        The metric the distances to any datapoints neighbors is computed with. The default of `metric`
-        together with the default of `p` result in the euclidian to be applied.
-        The keyword just gets passed on to the underlying sklearn method.
-        See reference [1] for more information on the algorithm.
-    p : int, default 2
-        The grade of the metrice specified by parameter `metric`.
-        The keyword just gets passed on to the underlying sklearn method.
-        See reference [1] for more information on the algorithm.
-
-    References
-    ----------
-    [1] https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html
+    window : {str, int}, default None
+            Size of the window. Either determined via an Offset String, denoting the windows temporal extension or
+            by an integer, denoting the windows number of periods.
+            `NaN` measurements also count as periods.
+            If `None` is passed, All data points share the same scoring window, which than equals the whole
+            data.
+    model_func
+        Function to calculate the center moment in every window.
+    norm_func
+        Function to calculate the scaling for every window
+    center
+        Weather or not to center the target value in the scoring window. If `False`, the
+        target value is the last value in the window.
+    min_periods
+        Minimum number of valid meassurements in a scoring window, to consider the resulting score valid.
     """
-    if isinstance(target, list):
-        if (len(target) > 1) or (target[0] in data.columns):
-            raise ValueError(
-                f"'target' must not exist and be of length 1. {target} was passed instead."
+    if data.empty:
+        return data, data, data
+    if min_periods is None:
+        min_periods = 0
+
+    if window is None:
+        if data.notna().sum() >= min_periods:
+            # in case of stationary analysis, broadcast statistics to series for compatibility reasons
+            norm = pd.Series(norm_func(data.values), index=data.index)
+            model = pd.Series(model_func(data.values), index=data.index)
+        else:
+            norm = pd.Series(np.nan, index=data.index)
+            model = pd.Series(np.nan, index=data.index)
+    else:
+        # wrap passed func with rolling built in if possible and rolling.apply else
+        roller = data.rolling(window=window, min_periods=min_periods, center=center)
+        norm = getApply(roller, norm_func)
+        model = getApply(roller, model_func)
+
+    score = (data - model) / norm
+    return score, model, norm
+
+
+class ScoresMixin:
+    @register(
+        mask=["field"],
+        demask=[],
+        squeeze=["target"],
+        multivariate=True,
+        handles_target=True,
+    )
+    def assignKNNScore(
+        self: "SaQC",
+        field: Sequence[str],
+        target: str,
+        n: int = 10,
+        func: Callable[[pd.Series], float] = np.sum,
+        freq: float | str | None = np.inf,
+        min_periods: int = 2,
+        method: Literal["ball_tree", "kd_tree", "brute", "auto"] = "ball_tree",
+        metric: str = "minkowski",
+        p: int = 2,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        TODO: docstring need a rework
+        Score datapoints by an aggregation of the dictances to their k nearest neighbors.
+
+        The function is a wrapper around the NearestNeighbors method from pythons sklearn library (See reference [1]).
+
+        The steps taken to calculate the scores are as follows:
+
+        1. All the timeseries, given through ``field``, are combined to one feature space by an *inner* join on their
+           date time indexes. thus, only samples, that share timestamps across all ``field`` will be included in the
+           feature space.
+        2. Any datapoint/sample, where one ore more of the features is invalid (=np.nan) will get excluded.
+        3. For every data point, the distance to its `n` nearest neighbors is calculated by applying the
+           metric `metric` at grade `p` onto the feature space. The defaults lead to the euclidian to be applied.
+           If `radius` is not None, it sets the upper bound of distance for a neighbor to be considered one of the
+           `n` nearest neighbors. Furthermore, the `freq` argument determines wich samples can be
+           included into a datapoints nearest neighbors list, by segmenting the data into chunks of specified temporal
+           extension and feeding that chunks to the kNN algorithm seperatly.
+        4. For every datapoint, the calculated nearest neighbors distances get aggregated to a score, by the function
+           passed to `func`. The default, ``sum`` obviously just sums up the distances.
+        5. The resulting timeseries of scores gets assigned to the field target.
+
+        Parameters
+        ----------
+        field : list of str
+            input variable names.
+
+        target : str, default "kNNscores"
+            A new Column name, where the result is stored.
+
+        n : int, default 10
+            The number of nearest neighbors to which the distance is comprised in every datapoints scoring calculation.
+
+        func : Callable[numpy.array, float], default np.sum
+            A function that assigns a score to every one dimensional array, containing the distances
+            to every datapoints `n` nearest neighbors.
+
+        freq : {float, str, None}, default np.inf
+            Determines the segmentation of the data into partitions, the kNN algorithm is
+            applied onto individually.
+
+            * ``np.inf``: Apply Scoring on whole data set at once
+            * ``x`` > 0 : Apply scoring on successive data chunks of periods length ``x``
+            * Offset String : Apply scoring on successive partitions of temporal extension matching the passed offset
+              string
+
+        min_periods : int, default 2
+            The minimum number of periods that have to be present in a partition for the kNN scoring
+            to be applied. If the number of periods present is below `min_periods`, the score for the
+            datapoints in that partition will be np.nan.
+
+        method : {'ball_tree', 'kd_tree', 'brute', 'auto'}, default 'ball_tree'
+            The search algorithm to find each datapoints k nearest neighbors.
+            The keyword just gets passed on to the underlying sklearn method.
+            See reference [1] for more information on the algorithm.
+
+        metric : str, default 'minkowski'
+            The metric the distances to any datapoints neighbors is computed with. The default of `metric`
+            together with the default of `p` result in the euclidian to be applied.
+            The keyword just gets passed on to the underlying sklearn method.
+            See reference [1] for more information on the algorithm.
+
+        p : int, default 2
+            The grade of the metrice specified by parameter `metric`.
+            The keyword just gets passed on to the underlying sklearn method.
+            See reference [1] for more information on the algorithm.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        References
+        ----------
+        [1] https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html
+        """
+        if isinstance(target, list):
+            if (len(target) > 1) or (target[0] in self._data.columns):
+                raise ValueError(
+                    f"'target' must not exist and be of length 1. {target} was passed instead."
+                )
+            target = target[0]
+
+        fields = toSequence(field)
+        val_frame = self._data[fields].copy()
+        score_index = val_frame.index_of("shared")
+        score_ser = pd.Series(np.nan, index=score_index, name=target)
+
+        val_frame = val_frame.loc[val_frame.index_of("shared")].to_df()
+        val_frame.dropna(inplace=True)
+
+        if val_frame.empty:
+            return self
+
+        # partitioning
+        if not freq:
+            freq = val_frame.shape[0]
+
+        if isinstance(freq, str):
+            grouper = pd.Grouper(freq=freq)
+        else:
+            grouper = pd.Series(
+                data=np.arange(0, val_frame.shape[0]), index=val_frame.index
+            )
+            grouper = grouper.transform(lambda x: int(np.floor(x / freq)))
+
+        partitions = val_frame.groupby(grouper)
+
+        for _, partition in partitions:
+            if partition.empty or (partition.shape[0] < min_periods):
+                continue
+
+            sample_size = partition.shape[0]
+            nn_neighbors = min(n, max(sample_size, 2) - 1)
+            dist, *_ = ts_ops.kNN(
+                partition.values, nn_neighbors, algorithm=method, metric=metric, p=p
             )
-        target = target[0]
+            try:
+                resids = getattr(dist, func.__name__)(axis=1)
+            except AttributeError:
+                resids = np.apply_along_axis(func, 1, dist)
 
-    fields = toSequence(field)
-    val_frame = data[fields].copy()
-    score_index = val_frame.index_of("shared")
-    score_ser = pd.Series(np.nan, index=score_index, name=target)
+            score_ser[partition.index] = resids
 
-    val_frame = val_frame.loc[val_frame.index_of("shared")].to_df()
-    val_frame.dropna(inplace=True)
+        self._flags[target] = pd.Series(UNFLAGGED, index=score_ser.index, dtype=float)
+        self._data[target] = score_ser
 
-    if val_frame.empty:
-        return data, flags
+        return self
 
-    # partitioning
-    if not freq:
-        freq = val_frame.shape[0]
+    @register(mask=["field"], demask=[], squeeze=[])
+    def assignZScore(
+        self: "SaQC",
+        field: str,
+        window: Optional[str] = None,
+        norm_func: Callable = np.nanstd,
+        model_func: Callable = np.nanmean,
+        center: bool = True,
+        min_periods: Optional[int] = None,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Calculate (rolling) Zscores.
 
-    if isinstance(freq, str):
-        grouper = pd.Grouper(freq=freq)
-    else:
-        grouper = pd.Series(
-            data=np.arange(0, val_frame.shape[0]), index=val_frame.index
-        )
-        grouper = grouper.transform(lambda x: int(np.floor(x / freq)))
+        See the Notes section for a detailed overview of the calculation
 
-    partitions = val_frame.groupby(grouper)
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-flagged. (Here a dummy, for structural reasons)
+        window : {str, int}, default None
+            Size of the window. Either determined via an Offset String, denoting the windows temporal extension or
+            by an integer, denoting the windows number of periods.
+            `NaN` measurements also count as periods.
+            If `None` is passed, All data points share the same scoring window, which than equals the whole
+            data.
+        model_func
+            Function to calculate the center moment in every window.
+        norm_func
+            Function to calculate the scaling for every window
+        center
+            Weather or not to center the target value in the scoring window. If `False`, the
+            target value is the last value in the window.
+        min_periods
+            Minimum number of valid meassurements in a scoring window, to consider the resulting score valid.
 
-    for _, partition in partitions:
-        if partition.empty or (partition.shape[0] < min_periods):
-            continue
+        Returns
+        -------
+        data : dios.DictOfSeries
+            A dictionary of pandas.Series, holding all the data.
+        flags : saqc.Flags
+            The quality flags of data
+            Flags values may have changed, relatively to the flags input.
 
-        sample_size = partition.shape[0]
-        nn_neighbors = min(n, max(sample_size, 2) - 1)
-        dist, *_ = ts_ops.kNN(
-            partition.values, nn_neighbors, algorithm=method, metric=metric, p=p
-        )
-        try:
-            resids = getattr(dist, func.__name__)(axis=1)
-        except AttributeError:
-            resids = np.apply_along_axis(func, 1, dist)
+        Notes
+        -----
+        Steps of calculation:
+
+        1. Consider a window :math:`W` of successive points :math:`W = x_{1},...x_{w}`
+        containing the value :math:`y_{K}` wich is to be checked.
+        (The index of :math:`K` depends on the selection of the parameter `center`.)
+
+        2. The "moment" :math:`M` for the window gets calculated via :math:`M=` `model_func(:math:`W`)
 
-        score_ser[partition.index] = resids
+        3. The "scaling" :math:`N` for the window gets calculated via :math:`N=` `norm_func(:math:`W`)
 
-    flags[target] = pd.Series(UNFLAGGED, index=score_ser.index, dtype=float)
-    data[target] = score_ser
+        4. The "score" :math:`S` for the point :math:`x_{k}`gets calculated via :math:`S=(x_{k} - M) / N`
+        """
 
-    return data, flags
+        if min_periods is None:
+            min_periods = 0
+
+        score, _, _ = _univarScoring(
+            self._data[field],
+            window=window,
+            norm_func=norm_func,
+            model_func=model_func,
+            center=center,
+            min_periods=min_periods,
+        )
+        self._data[field] = score
+        return self
diff --git a/saqc/funcs/tools.py b/saqc/funcs/tools.py
index f418f26afb1ee5194054af5b9d8e387ccc21da20..0967a823a62b5266f6a35dfd89aaf698a2cf0c5f 100644
--- a/saqc/funcs/tools.py
+++ b/saqc/funcs/tools.py
@@ -8,368 +8,333 @@
 from __future__ import annotations
 
 import pickle
-from typing import Optional, Tuple
+from typing import TYPE_CHECKING, Optional
 
 import matplotlib as mpl
 import matplotlib.pyplot as plt
 import numpy as np
 from typing_extensions import Literal
 
-from dios import DictOfSeries
 from saqc.constants import FILTER_NONE, UNFLAGGED
-from saqc.core.flags import Flags
 from saqc.core.register import processing, register
 from saqc.lib.plotting import makeFig
 from saqc.lib.tools import periodicMask
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
+
+
 _MPL_DEFAULT_BACKEND = mpl.get_backend()
 
 
-@register(mask=[], demask=[], squeeze=[], handles_target=True)
-def copyField(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    target: str,
-    overwrite: bool = False,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Copy data and flags to a new name (preserve flags history).
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to fork (copy).
-    flags : saqc.Flags
-        Container to store quality flags to data.
-    target: str
-        Target name.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        data shape may have changed relatively to the flags input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags shape may have changed relatively to the flags input.
-    """
-    if field == target:
-        return data, flags
-
-    if target in flags.columns.union(data.columns):
-        if not overwrite:
-            raise ValueError(f"{target}: already exist")
-        data, flags = dropField(data=data, flags=flags, field=target)
-
-    data[target] = data[field].copy()
-    flags.history[target] = flags.history[field].copy()
-
-    return data, flags
-
-
-@processing()
-def dropField(
-    data: DictOfSeries, field: str, flags: Flags, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Drops field from the data and flags.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to drop.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        data shape may have changed relatively to the flags input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags shape may have changed relatively to the flags input.
-    """
-    del data[field]
-    del flags[field]
-    return data, flags
-
-
-@processing()
-def renameField(
-    data: DictOfSeries, field: str, flags: Flags, new_name: str, **kwargs
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Rename field in data and flags.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the data column, you want to rename.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    new_name : str
-        String, field is to be replaced with.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    data[new_name] = data[field]
-    flags.history[new_name] = flags.history[field]
-    del data[field]
-    del flags[field]
-    return data, flags
-
-
-@register(mask=[], demask=[], squeeze=["field"])
-def selectTime(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    mode: Literal["periodic", "selection_field"],
-    selection_field: Optional[str] = None,
-    start: Optional[str] = None,
-    end: Optional[str] = None,
-    closed: bool = True,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Realizes masking within saqc.
-
-    Due to some inner saqc mechanics, it is not straight forwardly possible to exclude
-    values or datachunks from flagging routines. This function replaces flags with UNFLAGGED
-    value, wherever values are to get masked. Furthermore, the masked values get replaced by
-    np.nan, so that they dont effect calculations.
-
-    Here comes a recipe on how to apply a flagging function only on a masked chunk of the variable field:
-
-    1. dublicate "field" in the input data (`copyField`)
-    2. mask the dublicated data (this, `selectTime`)
-    3. apply the tests you only want to be applied onto the masked data chunks (a saqc function)
-    4. project the flags, calculated on the dublicated and masked data onto the original field data
-        (`concateFlags` or `flagGeneric`)
-    5. drop the dublicated data (`dropField`)
-
-    To see an implemented example, checkout flagSeasonalRange in the saqc.functions module
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-masked.
-    flags : saqc.Flags
-        Container to store flags of the data.
-    mode : {"periodic", "mask_field"}
-        The masking mode.
-        - "periodic": parameters "period_start", "end" are evaluated to generate a periodical mask
-        - "mask_var": data[mask_var] is expected to be a boolean valued timeseries and is used as mask.
-    selection_field : {None, str}, default None
-        Only effective if mode == "mask_var"
-        Fieldname of the column, holding the data that is to be used as mask. (must be boolean series)
-        Neither the series` length nor its labels have to match data[field]`s index and length. An inner join of the
-        indices will be calculated and values get masked where the values of the inner join are ``True``.
-    start : {None, str}, default None
-        Only effective if mode == "seasonal"
-        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
-        Has to be of same length as `end` parameter.
-        See examples section below for some examples.
-    end : {None, str}, default None
-        Only effective if mode == "periodic"
-        String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
-        Has to be of same length as `end` parameter.
-        See examples section below for some examples.
-    closed : boolean
-        Wheather or not to include the mask defining bounds to the mask.
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-        Flags values may have changed relatively to the flags input.
-
-
-    Examples
-    --------
-    The `period_start` and `end` parameters provide a conveniant way to generate seasonal / date-periodic masks.
-    They have to be strings of the forms: "mm-ddTHH:MM:SS", "ddTHH:MM:SS" , "HH:MM:SS", "MM:SS" or "SS"
-    (mm=month, dd=day, HH=hour, MM=minute, SS=second)
-    Single digit specifications have to be given with leading zeros.
-    `period_start` and `seas   on_end` strings have to be of same length (refer to the same periodicity)
-    The highest date unit gives the period.
-    For example:
-
-    >>> start = "01T15:00:00"
-    >>> end = "13T17:30:00"
-
-    Will result in all values sampled between 15:00 at the first and  17:30 at the 13th of every month get masked
-
-    >>> start = "01:00"
-    >>> end = "04:00"
-
-    All the values between the first and 4th minute of every hour get masked.
-
-    >>> start = "01-01T00:00:00"
-    >>> end = "01-03T00:00:00"
-
-    Mask january and february of evcomprosed in theery year. masking is inclusive always, so in this case the mask will
-    include 00:00:00 at the first of march. To exclude this one, pass:
-
-    >>> start = "01-01T00:00:00"
-    >>> end = "02-28T23:59:59"
-
-    To mask intervals that lap over a seasons frame, like nights, or winter, exchange sequence of season start and
-    season end. For example, to mask night hours between 22:00:00 in the evening and 06:00:00 in the morning, pass:
-
-    >>> start = "22:00:00"
-    >>> end = "06:00:00"
-    """
-    datcol_idx = data[field].index
-
-    if mode == "periodic":
-        mask = periodicMask(datcol_idx, start, end, ~closed)
-    elif mode == "selection_field":
-        idx = data[selection_field].index.intersection(datcol_idx)
-        mask = data.loc[idx, selection_field]
-    else:
-        raise ValueError("Keyword passed as masking mode is unknown ({})!".format(mode))
-
-    data.aloc[mask, field] = np.nan
-    flags[mask, field] = UNFLAGGED
-    return data, flags
-
-
-@register(mask=[], demask=[], squeeze=[])
-def plot(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    path: Optional[str] = None,
-    max_gap: Optional[str] = None,
-    history: Optional[Literal["valid", "complete"] | list] = "valid",
-    xscope: Optional[slice] = None,
-    phaseplot: Optional[str] = None,
-    store_kwargs: Optional[dict] = None,
-    ax_kwargs: Optional[dict] = None,
-    dfilter: Optional[float] = FILTER_NONE,
-    **kwargs,
-):
-    """
-    Plot data and flags or store plot to file.
-
-    There are two modes, 'interactive' and 'store', which are determind through the
-    ``save_path`` keyword. In interactive mode (default) the plot is shown at runtime
-    and the program execution stops until the plot window is closed manually. In
-    store mode the generated plot is stored to disk and no manually interaction is
-    needed.
-
-    Parameters
-    ----------
-    data : {pd.DataFrame, dios.DictOfSeries}
-        data
-
-    field : str
-        Name of the variable-to-plot
-
-    flags : {pd.DataFrame, dios.DictOfSeries, saqc.flagger}
-        Flags or flagger object
-
-    path : str, default None
-        If ``None`` is passed, interactive mode is entered; plots are shown immediatly
-        and a user need to close them manually before execution continues.
-        If a filepath is passed instead, store-mode is entered and
-        the plot is stored unter the passed location.
-
-    max_gap : str, default None
-        If None, all the points in the data will be connected, resulting in long linear
-        lines, where continous chunks of data is missing. Nans in the data get dropped
-        before plotting. If an offset string is passed, only points that have a distance
-        below `max_gap` get connected via the plotting line.
-
-    history : {"valid", "complete", None, list of strings}, default "valid"
-        Discriminate the plotted flags with respect to the tests they originate from.
-
-        * "valid" - Only plot those flags, that do not get altered or "unflagged" by subsequent tests. Only list tests
-          in the legend, that actually contributed flags to the overall resault.
-        * "complete" - plot all the flags set and list all the tests ran on a variable. Suitable for debugging/tracking.
-        * None - just plot the resulting flags for one variable, without any historical meta information.
-        * list of strings - plot only flags set by those tests listed.
-
-    xscope : slice or Offset, default None
-        Parameter, that determines a chunk of the data to be plotted
-        processed. `xscope` can be anything, that is a valid argument to the ``pandas.Series.__getitem__`` method.
-
-    phaseplot : str or None, default None
-        If a string is passed, plot ``field`` in the phase space it forms together with the Variable ``phaseplot``.
-
-    store_kwargs : dict, default {}
-        Keywords to be passed on to the ``matplotlib.pyplot.savefig`` method, handling
-        the figure storing. To store an pickle object of the figure, use the option
-        ``{'pickle': True}``, but note that all other store_kwargs are ignored then.
-        Reopen with: ``pickle.load(open(savepath,'w')).show()``
-
-    ax_kwargs : dict, default {}
-        Axis keywords. Change the axis labeling defaults. Most important keywords:
-        'x_label', 'y_label', 'title', 'fontsize', 'cycleskip'.
-
-    """
-    # keep the very original, not the copy
-    orig = data, flags
-    data, flags = data.copy(), flags.copy()
-
-    interactive = path is None
-    level = kwargs.get("flag", UNFLAGGED)
-
-    if dfilter < np.inf:
-        data.loc[flags[field] >= dfilter, field] = np.nan
-
-    if store_kwargs is None:
-        store_kwargs = {}
-
-    if ax_kwargs is None:
-        ax_kwargs = {}
-
-    if interactive:
-        mpl.use(_MPL_DEFAULT_BACKEND)
-
-    else:
-        mpl.use("Agg")
-
-    fig = makeFig(
-        data=data,
-        field=field,
-        flags=flags,
-        level=level,
-        max_gap=max_gap,
-        history=history,
-        xscope=xscope,
-        phaseplot=phaseplot,
-        ax_kwargs=ax_kwargs,
-    )
-
-    if interactive:
-        plt.show()
-
-    else:
-        if store_kwargs.pop("pickle", False):
-            with open(path, "wb") as f:
-                pickle.dump(fig, f)
+class ToolsMixin:
+    @register(mask=[], demask=[], squeeze=[], handles_target=True)
+    def copyField(
+        self: "SaQC",
+        field: str,
+        target: str,
+        overwrite: bool = False,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Copy data and flags to a new name (preserve flags history).
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the data column, you want to fork (copy).
+
+        target: str
+            Target name.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        if field == target:
+            return self
+
+        if target in self._flags.columns.union(self._data.columns):
+            if not overwrite:
+                raise ValueError(f"{target}: already exist")
+            self = self.dropField(field=target)
+
+        self._data[target] = self._data[field].copy()
+        self._flags.history[target] = self._flags.history[field].copy()
+
+        return self
+
+    @processing()
+    def dropField(self: "SaQC", field: str, **kwargs) -> "SaQC":
+        """
+        Drops field from the data and flags.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the data column, you want to drop.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        del self._data[field]
+        del self._flags[field]
+        return self
+
+    @processing()
+    def renameField(self: "SaQC", field: str, new_name: str, **kwargs) -> "SaQC":
+        """
+        Rename field in data and flags.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the data column, you want to rename.
+
+        new_name : str
+            String, field is to be replaced with.
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        self._data[new_name] = self._data[field]
+        self._flags.history[new_name] = self._flags.history[field]
+        del self._data[field]
+        del self._flags[field]
+        return self
+
+    @register(mask=[], demask=[], squeeze=["field"])
+    def selectTime(
+        self: "SaQC",
+        field: str,
+        mode: Literal["periodic", "selection_field"],
+        selection_field: Optional[str] = None,
+        start: Optional[str] = None,
+        end: Optional[str] = None,
+        closed: bool = True,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Realizes masking within saqc.
+
+        Due to some inner saqc mechanics, it is not straight forwardly possible to exclude
+        values or datachunks from flagging routines. This function replaces flags with UNFLAGGED
+        value, wherever values are to get masked. Furthermore, the masked values get replaced by
+        np.nan, so that they dont effect calculations.
+
+        Here comes a recipe on how to apply a flagging function only on a masked chunk of the variable field:
+
+        1. dublicate "field" in the input data (`copyField`)
+        2. mask the dublicated data (this, `selectTime`)
+        3. apply the tests you only want to be applied onto the masked data chunks (a saqc function)
+        4. project the flags, calculated on the dublicated and masked data onto the original field data
+            (`concateFlags` or `flagGeneric`)
+        5. drop the dublicated data (`dropField`)
+
+        To see an implemented example, checkout flagSeasonalRange in the saqc.functions module
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-masked.
+
+        flags : saqc.Flags
+            Container to store flags of the data.
+
+        mode : {"periodic", "mask_field"}
+            The masking mode.
+            - "periodic": parameters "period_start", "end" are evaluated to generate a periodical mask
+            - "mask_var": data[mask_var] is expected to be a boolean valued timeseries and is used as mask.
+
+        selection_field : {None, str}, default None
+            Only effective if mode == "mask_var"
+            Fieldname of the column, holding the data that is to be used as mask. (must be boolean series)
+            Neither the series` length nor its labels have to match data[field]`s index and length. An inner join of the
+            indices will be calculated and values get masked where the values of the inner join are ``True``.
+
+        start : {None, str}, default None
+            Only effective if mode == "seasonal"
+            String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
+            Has to be of same length as `end` parameter.
+            See examples section below for some examples.
+
+        end : {None, str}, default None
+            Only effective if mode == "periodic"
+            String denoting starting point of every period. Formally, it has to be a truncated instance of "mm-ddTHH:MM:SS".
+            Has to be of same length as `end` parameter.
+            See examples section below for some examples.
+
+        closed : boolean
+            Wheather or not to include the mask defining bounds to the mask.
+
+        Returns
+        -------
+        saqc.SaQC
+
+        Examples
+        --------
+        The `period_start` and `end` parameters provide a conveniant way to generate seasonal / date-periodic masks.
+        They have to be strings of the forms: "mm-ddTHH:MM:SS", "ddTHH:MM:SS" , "HH:MM:SS", "MM:SS" or "SS"
+        (mm=month, dd=day, HH=hour, MM=minute, SS=second)
+        Single digit specifications have to be given with leading zeros.
+        `period_start` and `seas   on_end` strings have to be of same length (refer to the same periodicity)
+        The highest date unit gives the period.
+        For example:
+
+        >>> start = "01T15:00:00"
+        >>> end = "13T17:30:00"
+
+        Will result in all values sampled between 15:00 at the first and  17:30 at the 13th of every month get masked
+
+        >>> start = "01:00"
+        >>> end = "04:00"
+
+        All the values between the first and 4th minute of every hour get masked.
+
+        >>> start = "01-01T00:00:00"
+        >>> end = "01-03T00:00:00"
+
+        Mask january and february of evcomprosed in theery year. masking is inclusive always, so in this case the mask will
+        include 00:00:00 at the first of march. To exclude this one, pass:
+
+        >>> start = "01-01T00:00:00"
+        >>> end = "02-28T23:59:59"
+
+        To mask intervals that lap over a seasons frame, like nights, or winter, exchange sequence of season start and
+        season end. For example, to mask night hours between 22:00:00 in the evening and 06:00:00 in the morning, pass:
+
+        >>> start = "22:00:00"
+        >>> end = "06:00:00"
+        """
+        datcol_idx = self._data[field].index
+
+        if mode == "periodic":
+            mask = periodicMask(datcol_idx, start, end, ~closed)
+        elif mode == "selection_field":
+            idx = self._data[selection_field].index.intersection(datcol_idx)
+            mask = self._data.loc[idx, selection_field]
+        else:
+            raise ValueError(
+                "Keyword passed as masking mode is unknown ({})!".format(mode)
+            )
+
+        self._data.aloc[mask, field] = np.nan
+        self._flags[mask, field] = UNFLAGGED
+        return self
+
+    @register(mask=[], demask=[], squeeze=[])
+    def plot(
+        self: "SaQC",
+        field: str,
+        path: Optional[str] = None,
+        max_gap: Optional[str] = None,
+        history: Optional[Literal["valid", "complete"] | list] = "valid",
+        xscope: Optional[slice] = None,
+        phaseplot: Optional[str] = None,
+        store_kwargs: Optional[dict] = None,
+        ax_kwargs: Optional[dict] = None,
+        dfilter: float = FILTER_NONE,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Plot data and flags or store plot to file.
+
+        There are two modes, 'interactive' and 'store', which are determind through the
+        ``save_path`` keyword. In interactive mode (default) the plot is shown at runtime
+        and the program execution stops until the plot window is closed manually. In
+        store mode the generated plot is stored to disk and no manually interaction is
+        needed.
+
+        Parameters
+        ----------
+        field : str
+            Name of the variable-to-plot
+
+        path : str, default None
+            If ``None`` is passed, interactive mode is entered; plots are shown immediatly
+            and a user need to close them manually before execution continues.
+            If a filepath is passed instead, store-mode is entered and
+            the plot is stored unter the passed location.
+
+        max_gap : str, default None
+            If None, all the points in the data will be connected, resulting in long linear
+            lines, where continous chunks of data is missing. Nans in the data get dropped
+            before plotting. If an offset string is passed, only points that have a distance
+            below `max_gap` get connected via the plotting line.
+
+        history : {"valid", "complete", None, list of strings}, default "valid"
+            Discriminate the plotted flags with respect to the tests they originate from.
+
+            * "valid" - Only plot those flags, that do not get altered or "unflagged" by subsequent tests. Only list tests
+              in the legend, that actually contributed flags to the overall resault.
+            * "complete" - plot all the flags set and list all the tests ran on a variable. Suitable for debugging/tracking.
+            * None - just plot the resulting flags for one variable, without any historical meta information.
+            * list of strings - plot only flags set by those tests listed.
+
+        xscope : slice or Offset, default None
+            Parameter, that determines a chunk of the data to be plotted
+            processed. `xscope` can be anything, that is a valid argument to the ``pandas.Series.__getitem__`` method.
+
+        phaseplot : str or None, default None
+            If a string is passed, plot ``field`` in the phase space it forms together with the Variable ``phaseplot``.
+
+        store_kwargs : dict, default {}
+            Keywords to be passed on to the ``matplotlib.pyplot.savefig`` method, handling
+            the figure storing. To store an pickle object of the figure, use the option
+            ``{'pickle': True}``, but note that all other store_kwargs are ignored then.
+            Reopen with: ``pickle.load(open(savepath,'w')).show()``
+
+        ax_kwargs : dict, default {}
+            Axis keywords. Change the axis labeling defaults. Most important keywords:
+            'x_label', 'y_label', 'title', 'fontsize', 'cycleskip'.
+
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        data, flags = self._data.copy(), self._flags.copy()
+
+        interactive = path is None
+        level = kwargs.get("flag", UNFLAGGED)
+
+        if dfilter < np.inf:
+            data.loc[flags[field] >= dfilter, field] = np.nan
+
+        if store_kwargs is None:
+            store_kwargs = {}
+
+        if ax_kwargs is None:
+            ax_kwargs = {}
+
+        if interactive:
+            mpl.use(_MPL_DEFAULT_BACKEND)
+
+        else:
+            mpl.use("Agg")
+
+        fig = makeFig(
+            data=data,
+            field=field,
+            flags=flags,
+            level=level,
+            max_gap=max_gap,
+            history=history,
+            xscope=xscope,
+            phaseplot=phaseplot,
+            ax_kwargs=ax_kwargs,
+        )
+
+        if interactive:
+            plt.show()
+
         else:
-            fig.savefig(path, **store_kwargs)
+            if store_kwargs.pop("pickle", False):
+                with open(path, "wb") as f:
+                    pickle.dump(fig, f)
+            else:
+                fig.savefig(path, **store_kwargs)
 
-    return orig
+        return self
diff --git a/saqc/funcs/transformation.py b/saqc/funcs/transformation.py
index 0b3b363c6cd829f5430fade0255e1e516770ba5a..a55568ddb1b300cf438898513c07a11a73fdec3f 100644
--- a/saqc/funcs/transformation.py
+++ b/saqc/funcs/transformation.py
@@ -7,73 +7,70 @@
 # -*- coding: utf-8 -*-
 from __future__ import annotations
 
-from typing import Callable, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Callable, Optional, Union
 
 import numpy as np
 import pandas as pd
 
-from dios import DictOfSeries
-from saqc.core.flags import Flags
 from saqc.core.register import register
 
+if TYPE_CHECKING:
+    from saqc.core.core import SaQC
 
-@register(mask=["field"], demask=[], squeeze=[])
-def transform(
-    data: DictOfSeries,
-    field: str,
-    flags: Flags,
-    func: Callable[[pd.Series], pd.Series],
-    freq: Optional[Union[float, str]] = None,
-    **kwargs,
-) -> Tuple[DictOfSeries, Flags]:
-    """
-    Function to transform data columns with a transformation that maps series onto series of the same length.
-
-    Note, that flags get preserved.
-
-    Parameters
-    ----------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-    field : str
-        The fieldname of the column, holding the data-to-be-transformed.
-    flags : saqc.Flags
-        Container to store quality flags to data.
-    func : Callable[{pd.Series, np.array}, np.array]
-        Function to transform data[field] with.
-    freq : {None, float, str}, default None
-        Determines the segmentation of the data into partitions, the transformation is applied on individually
-
-        * ``np.inf``: Apply transformation on whole data set at once
-        * ``x`` > 0 : Apply transformation on successive data chunks of periods length ``x``
-        * Offset String : Apply transformation on successive partitions of temporal extension matching the passed offset
-          string
-
-    Returns
-    -------
-    data : dios.DictOfSeries
-        A dictionary of pandas.Series, holding all the data.
-        Data values may have changed relatively to the data input.
-    flags : saqc.Flags
-        The quality flags of data
-    """
-    val_ser = data[field].copy()
-    # partitioning
-    if not freq:
-        freq = val_ser.shape[0]
-
-    if isinstance(freq, str):
-        grouper = pd.Grouper(freq=freq)
-    else:
-        grouper = pd.Series(data=np.arange(0, val_ser.shape[0]), index=val_ser.index)
-        grouper = grouper.transform(lambda x: int(np.floor(x / freq)))
-
-    partitions = val_ser.groupby(grouper)
-
-    for _, partition in partitions:
-        if partition.empty:
-            continue
-        val_ser[partition.index] = func(partition)
-
-    data[field] = val_ser
-    return data, flags
+
+class TransformationMixin:
+    @register(mask=["field"], demask=[], squeeze=[])
+    def transform(
+        self: "SaQC",
+        field: str,
+        func: Callable[[pd.Series], pd.Series],
+        freq: Optional[Union[float, str]] = None,
+        **kwargs,
+    ) -> "SaQC":
+        """
+        Function to transform data columns with a transformation that maps series onto series of the same length.
+
+        Note, that flags get preserved.
+
+        Parameters
+        ----------
+        field : str
+            The fieldname of the column, holding the data-to-be-transformed.
+
+        func : Callable[{pd.Series, np.array}, np.array]
+            Function to transform data[field] with.
+
+        freq : {None, float, str}, default None
+            Determines the segmentation of the data into partitions, the transformation is applied on individually
+
+            * ``np.inf``: Apply transformation on whole data set at once
+            * ``x`` > 0 : Apply transformation on successive data chunks of periods length ``x``
+            * Offset String : Apply transformation on successive partitions of temporal extension matching the passed offset
+              string
+
+        Returns
+        -------
+        saqc.SaQC
+        """
+        val_ser = self._data[field].copy()
+        # partitioning
+        if not freq:
+            freq = val_ser.shape[0]
+
+        if isinstance(freq, str):
+            grouper = pd.Grouper(freq=freq)
+        else:
+            grouper = pd.Series(
+                data=np.arange(0, val_ser.shape[0]), index=val_ser.index
+            )
+            grouper = grouper.transform(lambda x: int(np.floor(x / freq)))
+
+        partitions = val_ser.groupby(grouper)
+
+        for _, partition in partitions:
+            if partition.empty:
+                continue
+            val_ser[partition.index] = func(partition)
+
+        self._data[field] = val_ser
+        return self
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index 1c2d8b9d19b974f2e996ddf2a132c232f110d9f3..5f3fdd7a0bc4e990f7c379a0a07103e26861b78c 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -11,9 +11,8 @@ import collections
 import itertools
 import re
 import warnings
-from typing import Callable, Collection, Iterator, List, Sequence, TypeVar, Union
+from typing import Callable, Collection, List, Sequence, TypeVar, Union
 
-import numba as nb
 import numpy as np
 import pandas as pd
 from scipy import fft
@@ -24,6 +23,7 @@ import dios
 # keep this for external imports
 # TODO: fix the external imports
 from saqc.lib.rolling import customRoller
+from saqc.lib.types import CompT
 
 T = TypeVar("T", str, float, int)
 
@@ -171,35 +171,6 @@ def periodicMask(dtindex, season_start, season_end, include_bounds):
     return out
 
 
-@nb.jit(nopython=True, cache=True)
-def otherIndex(values: np.ndarray, start: int = 0) -> int:
-    """
-    returns the index of the first non value not equal to values[0]
-    -> values[start:i] are all identical
-    """
-    val = values[start]
-    for i in range(start, len(values)):
-        if values[i] != val:
-            return i
-    return -1
-
-
-def groupConsecutives(series: pd.Series) -> Iterator[pd.Series]:
-    """
-    group consecutive values into distinct pd.Series
-    """
-    index = series.index
-    values = series.values
-
-    start = 0
-    while True:
-        stop = otherIndex(values, start)
-        if stop == -1:
-            break
-        yield pd.Series(data=values[start:stop], index=index[start:stop])
-        start = stop
-
-
 def concatDios(data: List[dios.DictOfSeries], warn: bool = True, stacklevel: int = 2):
     # fast path for most common case
     if len(data) == 1 and data[0].columns.is_unique:
@@ -509,7 +480,7 @@ def getFreqDelta(index):
     return delta
 
 
-def getApply(in_obj, apply_obj, attr_access="__name__", attr_or="apply"):
+def getApply(in_obj, apply_obj, attr_access="__name__", attr_or="apply") -> pd.Series:
     """
     For the repeating task of applying build in (accelerated) methods/funcs (`apply_obj`),
     of rolling/resampling - like objects (`in_obj`) ,
@@ -519,20 +490,25 @@ def getApply(in_obj, apply_obj, attr_access="__name__", attr_or="apply"):
     try:
         out = getattr(in_obj, getattr(apply_obj, attr_access))()
     except AttributeError:
-        out = getattr(in_obj, attr_or)(apply_obj)
+        try:
+            # let's try to run it somewhat optimized
+            out = getattr(in_obj, attr_or)(apply_obj, raw=True)
+        except:
+            # did't work out, fallback
+            out = getattr(in_obj, attr_or)(apply_obj)
 
     return out
 
 
 def statPass(
     datcol: pd.Series,
-    stat: Callable[[np.array, pd.Series], float],
+    stat: Callable[[np.ndarray, pd.Series], float],
     winsz: pd.Timedelta,
     thresh: float,
-    comparator: Callable[[float, float], bool],
-    sub_winsz: pd.Timedelta = None,
-    sub_thresh: float = None,
-    min_periods: int = None,
+    comparator: Callable[[CompT, CompT], bool],
+    sub_winsz: pd.Timedelta | None = None,
+    sub_thresh: float | None = None,
+    min_periods: int | None = None,
 ) -> pd.Series:
     """
     Check `datcol`, if it contains chunks of length `window`, exceeding `thresh` with
diff --git a/saqc/lib/ts_operators.py b/saqc/lib/ts_operators.py
index 27db26955a8515a052b60080eb92914a3acf1e09..094875fcccebee0a58a15139a8f283d4d9175b3e 100644
--- a/saqc/lib/ts_operators.py
+++ b/saqc/lib/ts_operators.py
@@ -335,7 +335,9 @@ def interpolateNANs(
         data = pd.merge(gap_mask, data, how="inner", left_index=True, right_index=True)
 
         def _interpolWrapper(x, wrap_order=order, wrap_method=method):
-            if x.count() > wrap_order:
+            if wrap_order < 0:
+                return x
+            elif x.count() > wrap_order:
                 try:
                     return x.interpolate(method=wrap_method, order=int(wrap_order))
                 except (NotImplementedError, ValueError):
@@ -446,8 +448,8 @@ def shift2Freq(
     }
     direction, tolerance = methods[method](freq)
     target_ind = pd.date_range(
-        start=data.index[0].floor(freq),
-        end=data.index[-1].ceil(freq),
+        start=pd.Timestamp(data.index[0]).floor(freq),
+        end=pd.Timestamp(data.index[-1]).ceil(freq),
         freq=freq,
         name=data.index.name,
     )
@@ -457,7 +459,7 @@ def shift2Freq(
 
 
 def butterFilter(
-    x, cutoff, nyq=0.5, filter_order=2, fill_method="linear", filter_type="low"
+    x, cutoff, nyq=0.5, filter_order=2, fill_method="linear", filter_type="lowpass"
 ):
     """
     Applies butterworth filter.
@@ -479,6 +481,8 @@ def butterFilter(
         handle ''np.nan''). See documentation of pandas.Series.interpolate method for
         details on the methods associated with the different keywords.
 
+    filter_type: Literal["lowpass", "highpass", "bandpass", "bandstop"]
+        The type of filter. Default is ‘lowpass’.
 
     Returns
     -------
@@ -489,6 +493,8 @@ def butterFilter(
     na_mask = x.isna()
     x = x.interpolate(fill_method).interpolate("ffill").interpolate("bfill")
     b, a = butter(N=filter_order, Wn=cutoff / nyq, btype=filter_type)
+    if x.shape[0] < 3 * max(len(a), len(b)):
+        return pd.Series(np.nan, x.index, name=x.name)
     y = pd.Series(filtfilt(b, a, x), x.index, name=x.name)
     y[na_mask] = np.nan
     return y
@@ -554,6 +560,8 @@ def polyRoller(in_slice, miss_marker, val_range, center_index, poly_deg):
     miss_mask = in_slice == miss_marker
     x_data = val_range[~miss_mask]
     y_data = in_slice[~miss_mask]
+    if len(x_data) == 0:
+        return np.nan
     fitted = poly.polyfit(x=x_data, y=y_data, deg=poly_deg)
     return poly.polyval(center_index, fitted)
 
diff --git a/saqc/lib/types.py b/saqc/lib/types.py
index 6c0a61f352f4f40983d4813bfec7ee3ac6a14728..0e84ff16272d96cd7e206c6287e303a4cf986e62 100644
--- a/saqc/lib/types.py
+++ b/saqc/lib/types.py
@@ -5,6 +5,10 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 # -*- coding: utf-8 -*-
+from __future__ import annotations
+
+from abc import abstractmethod
+
 __all__ = [
     "T",
     "ArrayLike",
@@ -15,11 +19,12 @@ __all__ = [
     "OptionalNone",
 ]
 
+
 from typing import Any, Dict, TypeVar, Union
 
 import numpy as np
 import pandas as pd
-from typing_extensions import Literal, Protocol
+from typing_extensions import Protocol
 
 from dios import DictOfSeries
 
@@ -46,5 +51,14 @@ class GenericFunction(Protocol):
         ...  # pragma: no cover
 
 
+class Comparable(Protocol):
+    @abstractmethod
+    def __gt__(self: CompT, other: CompT) -> bool:
+        pass
+
+
+CompT = TypeVar("CompT", bound=Comparable)
+
+
 class OptionalNone:
     pass
diff --git a/saqc/version.py b/saqc/version.py
index 777c3104ada32dc027e6fa0a3a3d44c381a44138..0afc143e5e75816121a8708f858dbc28ff518e08 100644
--- a/saqc/version.py
+++ b/saqc/version.py
@@ -4,4 +4,4 @@
 #
 # SPDX-License-Identifier: GPL-3.0-or-later
 
-__version__ = "2.1.0"
+__version__ = "2.2.0"
diff --git a/setup.py b/setup.py
index 950f9aba023e36d7b79f7d811e0276c8b1c5643d..5aedd38f50e34f65488bdf965fca22be4791e086 100644
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@ setup(
         "numpy",
         "outlier-utils",
         "pyarrow",
-        "pandas>=1.2",
+        "pandas>=1.2,<1.5",
         "scikit-learn",
         "scipy",
         "typing_extensions",
diff --git a/tests/api/test_signatures.py b/tests/api/test_signatures.py
deleted file mode 100644
index f229b52efbb377b2030936741bf97b5b812a99c3..0000000000000000000000000000000000000000
--- a/tests/api/test_signatures.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-
-import inspect
-from typing import Callable, Set
-
-import pandas as pd
-import pytest
-
-from saqc import SaQC
-from saqc.core.register import FUNC_MAP, FunctionWrapper
-
-
-def _filterSignature(func: Callable, skip: Set):
-    sig = inspect.signature(func)
-    return {k: v for k, v in sig.parameters.items() if k not in skip}
-
-
-@pytest.mark.parametrize("name,func", FUNC_MAP.items())
-def test_signatureConformance(name: str, func: FunctionWrapper):
-
-    method = getattr(SaQC, name, None)
-    # check a wrapper function is defined at all
-    assert method is not None
-
-    fsig = _filterSignature(func.func, {"data", "field", "flags"})
-    msig = _filterSignature(method, {"self", "field"})
-    assert fsig.keys() == msig.keys()
-
-    for key, fp in fsig.items():
-        mp = msig[key]
-        try:
-            assert fp == mp
-        except AssertionError:
-            assert mp.annotation == fp.annotation
-            if pd.isna(fp.default) and pd.isna(mp.default):  # both NA: OK
-                pass
-            elif isinstance(fp.default, Callable) and isinstance(
-                mp.default, Callable
-            ):  # hard to check: ignore
-                pass
-            else:
-                assert mp.default == fp.default
diff --git a/tests/cli/test_integration.py b/tests/cli/test_integration.py
index 81447f3aef7450610e82d2d9efa36a9ac3572a19..5d4aef522dd2ccda7b785c4508d94a4e236b2bd8 100644
--- a/tests/cli/test_integration.py
+++ b/tests/cli/test_integration.py
@@ -16,7 +16,7 @@ FLOAT = [
     "2016-04-01 00:05:48,3573.0,-inf,32.685,-inf,nan,nan\n",
     "2016-04-01 00:15:00,nan,nan,nan,nan,29.3157,-inf\n",
     "2016-04-01 00:20:42,3572.0,-inf,32.7428,-inf,nan,nan\n",
-    "2016-04-01 00:30:00,nan,nan,nan,nan,29.3679,-inf\n",
+    "2016-04-01 00:30:00,nan,nan,nan,nan,29.3679,255.0\n",
     "2016-04-01 00:35:37,3572.0,-inf,32.6186,-inf,nan,nan\n",
     "2016-04-01 00:45:00,nan,nan,nan,nan,29.3679,-inf\n",
 ]
@@ -29,7 +29,7 @@ SIMPLE = [
     "2016-04-01 00:05:48,3573.0,UNFLAGGED,32.685,UNFLAGGED,nan,nan\n",
     "2016-04-01 00:15:00,nan,nan,nan,nan,29.3157,UNFLAGGED\n",
     "2016-04-01 00:20:42,3572.0,UNFLAGGED,32.7428,UNFLAGGED,nan,nan\n",
-    "2016-04-01 00:30:00,nan,nan,nan,nan,29.3679,UNFLAGGED\n",
+    "2016-04-01 00:30:00,nan,nan,nan,nan,29.3679,BAD\n",
     "2016-04-01 00:35:37,3572.0,UNFLAGGED,32.6186,UNFLAGGED,nan,nan\n",
     "2016-04-01 00:45:00,nan,nan,nan,nan,29.3679,UNFLAGGED\n",
 ]
@@ -42,7 +42,7 @@ POSITIONAL = [
     "2016-04-01 00:05:48,3573.0,9,32.685,90,nan,-9999\n",
     "2016-04-01 00:15:00,nan,-9999,nan,-9999,29.3157,9000\n",
     "2016-04-01 00:20:42,3572.0,9,32.7428,90,nan,-9999\n",
-    "2016-04-01 00:30:00,nan,-9999,nan,-9999,29.3679,9000\n",
+    "2016-04-01 00:30:00,nan,-9999,nan,-9999,29.3679,9002\n",
     "2016-04-01 00:35:37,3572.0,9,32.6186,90,nan,-9999\n",
     "2016-04-01 00:45:00,nan,-9999,nan,-9999,29.3679,9000\n",
 ]
@@ -55,7 +55,7 @@ DMP = [
     "2016-04-01 00:05:48,3573.0,NIL,,,32.685,NIL,,,nan,nan,nan,nan\n",
     "2016-04-01 00:15:00,nan,nan,nan,nan,nan,nan,nan,nan,29.3157,NIL,,\n",
     "2016-04-01 00:20:42,3572.0,NIL,,,32.7428,NIL,,,nan,nan,nan,nan\n",
-    "2016-04-01 00:30:00,nan,nan,nan,nan,nan,nan,nan,nan,29.3679,NIL,,\n",
+    '2016-04-01 00:30:00,nan,nan,nan,nan,nan,nan,nan,nan,29.3679,BAD,OTHER,"{""test"": ""flagMAD"", ""comment"": """"}"\n',
     "2016-04-01 00:35:37,3572.0,NIL,,,32.6186,NIL,,,nan,nan,nan,nan\n",
     "2016-04-01 00:45:00,nan,nan,nan,nan,nan,nan,nan,nan,29.3679,NIL,,\n",
 ]
diff --git a/tests/common.py b/tests/common.py
index 54d987e8100a971e244fde1144939e137a064bb7..3a973e4c9f5d60d3ced0a5f1068588ef96ea8d0a 100644
--- a/tests/common.py
+++ b/tests/common.py
@@ -12,18 +12,10 @@ import numpy as np
 import pandas as pd
 
 import dios
-from saqc.constants import BAD
 from saqc.core import Flags
 from saqc.core.history import History, createHistoryFromData
 
 
-def flagAll(data, field, flags, **kwargs):
-    # NOTE: remember to rename flag -> flag_values
-    flags.copy()
-    flags[:, field] = BAD
-    return data, flags
-
-
 def initData(
     cols=2, start_date="2017-01-01", end_date="2017-12-31", freq=None, rows=None
 ):
diff --git a/tests/core/test_FunctionsMixin.py b/tests/core/test_FunctionsMixin.py
deleted file mode 100644
index 9f57509665c3f02a7c902f2131ad6d3921fbdaee..0000000000000000000000000000000000000000
--- a/tests/core/test_FunctionsMixin.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-import inspect
-
-import pytest
-
-from saqc.core.modules import FunctionsMixin
-
-methods = [
-    attr
-    for attr in dir(FunctionsMixin)
-    if callable(getattr(FunctionsMixin, attr)) and not attr.startswith("_")
-]
-
-
-@pytest.mark.parametrize("name", methods)
-def test_redirect_call(name):
-    fmixin = FunctionsMixin()
-    method = getattr(fmixin, name)
-    params = inspect.signature(method).parameters
-    assert "field" in params
-    assert "kwargs" in params
-    dummy_params = dict.fromkeys(params.keys())
-    dummy_params.pop("kwargs")
-
-    err_msg = "'FunctionsMixin' object has no attribute '_wrap'"
-    with pytest.raises(AttributeError, match=err_msg):
-        method(**dummy_params)
diff --git a/tests/core/test_core.py b/tests/core/test_core.py
index 62ead32ff074849cb933ae44f0ae096bb5e11cdf..82b30186491a2ec8c1540902e8f530806087e57a 100644
--- a/tests/core/test_core.py
+++ b/tests/core/test_core.py
@@ -13,15 +13,15 @@ import pandas as pd
 import pytest
 
 import saqc
-from saqc.core import SaQC, initFlagsLike, register
+from saqc.constants import BAD, FILTER_ALL, FILTER_NONE, UNFLAGGED
+from saqc.core import SaQC, initFlagsLike
 from saqc.core.flags import Flags
-from saqc.core.register import flagging, processing
-from tests.common import flagAll, initData
+from saqc.core.register import flagging, processing, register
+from saqc.lib.types import OptionalNone
+from tests.common import initData
 
 OPTIONAL = [False, True]
 
-register(mask=["field"], demask=["field"], squeeze=["field"])(flagAll)
-
 
 @pytest.fixture
 def data():
@@ -36,7 +36,7 @@ def flags(data, optional):
 
 def test_errorHandling(data):
     @processing()
-    def raisingFunc(data, field, flags, **kwargs):
+    def raisingFunc(saqc, field, **kwargs):
         raise TypeError
 
     var1 = data.columns[0]
@@ -51,6 +51,12 @@ def test_dtypes(data, flags):
     """
     Test if the categorical dtype is preserved through the core functionality
     """
+
+    @register(mask=["field"], demask=["field"], squeeze=["field"])
+    def flagAll(saqc, field, **kwargs):
+        saqc._flags[:, field] = BAD
+        return saqc
+
     flags = initFlagsLike(data)
     flags_raw = flags.toDios()
     var1, var2 = data.columns[:2]
@@ -61,6 +67,11 @@ def test_dtypes(data, flags):
         assert pflags[c].dtype == flags[c].dtype
 
 
+def test_new_call(data):
+    qc = saqc.SaQC(data)
+    qc = qc.flagRange("var1", max=5)
+
+
 def test_copy(data):
     qc = saqc.SaQC(data)
 
@@ -100,9 +111,9 @@ def test_sourceTargetCopy():
     target = "new"
 
     @register(mask=["field"], demask=["field"], squeeze=["field"], handles_target=False)
-    def flagTarget(data, field, flags, **kwargs):
+    def flagTarget(saqc, field, **kwargs):
         assert "target" not in kwargs
-        return data, flags
+        return saqc
 
     qc = SaQC(data, flags=Flags({var1: pd.Series(127.0, index=data[var1].index)}))
     qc = qc.flagTarget(field=var1, target=target)
@@ -120,18 +131,18 @@ def test_sourceTargetNoCopy():
     target = "new"
 
     @register(mask=["field"], demask=["field"], squeeze=["field"], handles_target=True)
-    def flagField(data, field, flags, **kwargs):
+    def flagField(saqc, field, **kwargs):
         assert "target" in kwargs
-        assert "target" not in data
-        assert "target" not in flags
-        return data, flags
+        assert "target" not in saqc._data
+        assert "target" not in saqc._flags
+        return saqc
 
     SaQC(data).flagField(field=var1, target=target)
 
 
 def test_sourceTargetMultivariate():
     """
-    test bypassing of the imlpicit copy machiners
+    test bypassing of the imlpicit copy machinery
     """
     data = initData(3)
 
@@ -142,12 +153,12 @@ def test_sourceTargetMultivariate():
         handles_target=True,
         multivariate=True,
     )
-    def flagMulti(data, field, flags, **kwargs):
+    def flagMulti(saqc, field, **kwargs):
         assert "target" in kwargs
-        assert "target" not in data
-        assert "target" not in flags
+        assert "target" not in saqc._data
+        assert "target" not in saqc._flags
         assert field == kwargs["target"]
-        return data, flags
+        return saqc
 
     SaQC(data).flagMulti(field=data.columns, target=data.columns)
 
@@ -162,20 +173,17 @@ def test_sourceTargetMulti():
         mask=["field"],
         demask=["field"],
         squeeze=["field"],
-        handles_target=False,
+        handles_target=True,
         multivariate=True,
     )
-    def flagMulti(data, field, flags, target, **kwargs):
+    def flagMulti(saqc, field, target, **kwargs):
         assert len(field) == len(target)
         for src, trg in zip(field, target):
-            assert src in data
-            assert trg in data
-            assert src in flags
-            assert trg in flags
-
-            assert (data[src] == data[trg]).all(axis=None)
-            assert (flags[src] == flags[trg]).all(axis=None)
-        return data, flags
+            assert src in saqc._data
+            assert src in saqc._flags
+            assert trg not in saqc._data
+            assert trg not in saqc._flags
+        return saqc
 
     SaQC(data, flags).flagMulti(field=fields, target=targets)
 
@@ -196,28 +204,9 @@ def test_validation(data):
     qc = SaQC(df)
 
     @flagging()
-    def flagFoo(data, field, flags, **kwargs):
-        data["spam"] = data[field]
-        return data, flags
-
-    with pytest.raises(RuntimeError):
-        qc.flagFoo("a")
-
-
-@pytest.mark.skip(reason="bug in register, see #GL 342")
-def test_validation_flags(data):
-    """Test if validation detects different columns in data and flags."""
-    df = pd.DataFrame(
-        data=np.arange(8).reshape(4, 2),
-        index=pd.date_range("2020", None, 4, "1d"),
-        columns=list("ab"),
-    )
-    qc = SaQC(df)
-
-    @flagging()
-    def flagFoo(data, field, flags, **kwargs):
-        flags["spam"] = flags[field]
-        return data, flags
+    def flagFoo(saqc, field, **kwargs):
+        saqc._data["spam"] = saqc._data[field]
+        return saqc
 
     with pytest.raises(RuntimeError):
         qc.flagFoo("a")
@@ -245,3 +234,136 @@ def test_immutability(data):
     saqc_after = saqc_before.flagDummy(field)
     for name in SaQC._attributes:
         assert getattr(saqc_before, name) is not getattr(saqc_after, name)
+
+
+@pytest.mark.parametrize(
+    "field,target",
+    [
+        (["a"], ["x", "y"]),
+        (["a", "b"], ["x"]),
+    ],
+)
+def test_fieldsTargetsExpansionFail(field, target):
+    # check that the field/target handling works as expected for the
+    # different function types
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=False)
+    def foo(saqc, field, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=True)
+    def bar(saqc, field, target, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=True, handles_target=False)
+    def baz(saqc, field, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=True)
+    def fooBar(saqc, field, **kwargs):
+        return saqc
+
+    data = pd.DataFrame({"a": [1, 2], "b": [2, 3], "c": [3, 4], "d": [4, 5]})
+    qc = SaQC(data)
+    with pytest.raises(ValueError):
+        qc.foo(field, target=target)
+    with pytest.raises(ValueError):
+        qc.bar(field, target=target)
+    with pytest.raises(ValueError):
+        qc.baz(field, target=target)
+    with pytest.raises(ValueError):
+        qc.fooBar(field, target=target)
+
+
+@pytest.mark.parametrize(
+    "field,target",
+    [
+        (["a"], ["x"]),
+        (["a", "a"], ["x", "y"]),
+    ],
+)
+def test_fieldsTargetsExpansion(field, target):
+    # check that the field/target handling works as expected for the
+    # different function types
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=False)
+    def foo(saqc, field, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=True)
+    def bar(saqc, field, target, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=True, handles_target=False)
+    def baz(saqc, field, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=False, handles_target=True)
+    def fooBar(saqc, field, **kwargs):
+        return saqc
+
+    data = pd.DataFrame({"a": [1, 2], "b": [2, 3], "c": [3, 4], "d": [4, 5]})
+    qc = SaQC(data)
+    qc.foo(field, target=target)
+    qc.bar(field, target=target)
+    qc.baz(field, target=target)
+    qc.fooBar(field, target=target)
+
+
+@pytest.mark.parametrize(
+    "field,target",
+    [
+        (["a"], ["x"]),
+        (["a", "a"], ["x", "y"]),
+        (["a"], ["x", "y"]),
+        (["a", "a"], ["x"]),
+    ],
+)
+def test_fieldsTargetsExpansionMultivariate(field, target):
+    @register(mask=[], demask=[], squeeze=[], multivariate=True, handles_target=True)
+    def foo(saqc, field, target, **kwargs):
+        return saqc
+
+    @register(mask=[], demask=[], squeeze=[], multivariate=True, handles_target=True)
+    def bar(saqc, field, **kwargs):
+        return saqc
+
+    data = pd.DataFrame(
+        {"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5], "d": [4, 5, 6]}
+    )
+    qc = SaQC(data)
+    qc.foo(field, target)
+    qc.bar(field, target)
+
+
+def test_columnConsitency(data):
+    @flagging()
+    def flagFoo(saqc, field, **kwargs):
+        saqc._flags["spam"] = saqc._flags[field]
+        return saqc
+
+    field = data.columns[0]
+    qc = SaQC(data)
+    with pytest.raises(RuntimeError):
+        qc.flagFoo(field)
+
+
+@pytest.mark.parametrize(
+    "user_flag,internal_flag",
+    (
+        [FILTER_ALL, FILTER_ALL],
+        [FILTER_NONE, FILTER_NONE],
+        [OptionalNone(), FILTER_ALL],
+        ["BAD", BAD],
+        ["UNFLAGGED", UNFLAGGED],
+    ),
+)
+def test_dfilterTranslation(data, user_flag, internal_flag):
+    @flagging()
+    def flagFoo(saqc, field, dfilter, **kwargs):
+        assert dfilter == internal_flag
+        return saqc
+
+    field = data.columns[0]
+    qc = SaQC(data, scheme="simple")
+    qc.flagFoo(field, dfilter=user_flag)
diff --git a/tests/core/test_reader.py b/tests/core/test_reader.py
index 7df2cf625e71e9424ef6c8570735cc77d57c3dd2..70a00d0164b14daea1014d952850a6702b446815 100644
--- a/tests/core/test_reader.py
+++ b/tests/core/test_reader.py
@@ -141,9 +141,9 @@ def test_supportedArguments(data):
     # TODO: necessary?
 
     @flagging()
-    def func(data, field, flags, kwarg, **kwargs):
-        flags[:, field] = np.nan
-        return data, flags
+    def func(saqc, field, kwarg, **kwargs):
+        saqc._flags[:, field] = np.nan
+        return saqc
 
     var1 = data.columns[0]
 
diff --git a/tests/funcs/test_constants_detection.py b/tests/funcs/test_constants_detection.py
index c3f9d36678e42b35143e5eec518625717c78b611..381d5260fafb2d3ac3382d9c0110e6342cce5922 100644
--- a/tests/funcs/test_constants_detection.py
+++ b/tests/funcs/test_constants_detection.py
@@ -10,8 +10,7 @@ import numpy as np
 import pytest
 
 from saqc.constants import BAD, UNFLAGGED
-from saqc.core import initFlagsLike
-from saqc.funcs.constants import flagByVariance, flagConstants
+from saqc.core import SaQC, initFlagsLike
 from tests.common import initData
 
 
@@ -27,10 +26,9 @@ def data():
 def test_constants_flagBasic(data):
     field, *_ = data.columns
     flags = initFlagsLike(data)
-    data, flags_result = flagConstants(
-        data, field, flags, window="15Min", thresh=0.1, flag=BAD
-    )
-    flagscol = flags_result[field]
+    qc = SaQC(data, flags)
+    qc = qc.flagConstants(field, window="15Min", thresh=0.1, flag=BAD)
+    flagscol = qc._flags[field]
     assert np.all(flagscol[5:25] == BAD)
     assert np.all(flagscol[:5] == UNFLAGGED)
     assert np.all(flagscol[25 + 1 :] == UNFLAGGED)
@@ -39,11 +37,10 @@ def test_constants_flagBasic(data):
 def test_constants_flagVarianceBased(data):
     field, *_ = data.columns
     flags = initFlagsLike(data)
-    data, flags_result1 = flagByVariance(
-        data, field, flags, window="1h", thresh=0.0005, flag=BAD
-    )
+    qc = SaQC(data, flags)
+    qc = qc.flagByVariance(field, window="1h", thresh=0.0005, flag=BAD)
 
-    flagscol = flags_result1[field]
+    flagscol = qc._flags[field]
     assert np.all(flagscol[5:25] == BAD)
     assert np.all(flagscol[:5] == UNFLAGGED)
     assert np.all(flagscol[25 + 1 :] == UNFLAGGED)
diff --git a/tests/funcs/test_functions.py b/tests/funcs/test_functions.py
index 416bd1c9f8b081bb0db590f1beea6fbfa85ab9a6..c898cb8af6658f1ca424a25564fb345e2faf5402 100644
--- a/tests/funcs/test_functions.py
+++ b/tests/funcs/test_functions.py
@@ -14,13 +14,7 @@ import dios
 import saqc
 from saqc.constants import BAD, DOUBTFUL, UNFLAGGED
 from saqc.core import initFlagsLike
-from saqc.funcs.breaks import flagIsolated
-from saqc.funcs.drift import flagDriftFromNorm, flagDriftFromReference
-from saqc.funcs.flagtools import clearFlags, flagManual, forceFlags
-from saqc.funcs.noise import flagByStatLowPass
-from saqc.funcs.outliers import flagRange
-from saqc.funcs.resampling import concatFlags
-from saqc.funcs.tools import copyField, dropField, selectTime
+from saqc.core.core import SaQC
 from tests.common import initData
 from tests.fixtures import char_dict, course_1
 
@@ -42,19 +36,20 @@ def test_statPass():
     data[200:210] = noise[:10]
     data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    data, flags = flagByStatLowPass(
-        data, "data", flags, np.std, "20D", 0.999, "5D", 0.999, 0, flag=BAD
+    qc = SaQC(data, flags).flagByStatLowPass(
+        "data", np.std, "20D", 0.999, "5D", 0.999, 0, flag=BAD
     )
-    assert (flags["data"].iloc[:100] == UNFLAGGED).all()
-    assert (flags["data"].iloc[100:120] == BAD).all()
-    assert (flags["data"].iloc[121:] == UNFLAGGED).all()
+    assert (qc.flags["data"].iloc[:100] == UNFLAGGED).all()
+    assert (qc.flags["data"].iloc[100:120] == BAD).all()
+    assert (qc.flags["data"].iloc[121:] == UNFLAGGED).all()
 
 
 def test_flagRange(data, field):
     min, max = 10, 90
     flags = initFlagsLike(data)
-    data, flags = flagRange(data, field, flags, min=min, max=max, flag=BAD)
-    flagged = flags[field] > UNFLAGGED
+    qc = SaQC(data, flags)
+    qc = qc.flagRange(field, min=min, max=max, flag=BAD)
+    flagged = qc.flags[field] > UNFLAGGED
     expected = (data[field] < min) | (data[field] > max)
     assert all(flagged == expected)
 
@@ -89,31 +84,26 @@ def test_flagSesonalRange(data, field):
         ),
     ]
 
+    flags = initFlagsLike(data)
+    qc = SaQC(data, flags)
     for test, expected in tests:
-        flags = initFlagsLike(data)
         newfield = f"{field}_masked"
         start = f"{test['startmonth']:02}-{test['startday']:02}T00:00:00"
         end = f"{test['endmonth']:02}-{test['endday']:02}T00:00:00"
 
-        data, flags = copyField(data, field, flags, field + "_masked")
-        data, flags = selectTime(
-            data,
+        qc = qc.copyField(field, field + "_masked")
+        qc = qc.selectTime(
             newfield,
-            flags,
             mode="periodic",
             start=start,
             end=end,
             closed=True,
             flag=BAD,
         )
-        data, flags = flagRange(
-            data, newfield, flags, min=test["min"], max=test["max"], flag=BAD
-        )
-        data, flags = concatFlags(
-            data, newfield, flags, method="match", target=field, flag=BAD
-        )
-        data, flags = dropField(data, newfield, flags)
-        flagged = flags[field] > UNFLAGGED
+        qc = qc.flagRange(newfield, min=test["min"], max=test["max"], flag=BAD)
+        qc = qc.concatFlags(newfield, method="match", target=field, flag=BAD)
+        qc = qc.dropField(newfield)
+        flagged = qc._flags[field] > UNFLAGGED
         assert flagged.sum() == expected
 
 
@@ -122,8 +112,9 @@ def test_clearFlags(data, field):
     flags[:, field] = BAD
     assert all(flags[field] == BAD)
 
-    _, flags = clearFlags(data, field, flags)
-    assert all(flags[field] == UNFLAGGED)
+    qc = SaQC(data, flags)
+    qc = qc.clearFlags(field)
+    assert all(qc._flags[field] == UNFLAGGED)
 
 
 def test_forceFlags(data, field):
@@ -131,13 +122,13 @@ def test_forceFlags(data, field):
     flags[:, field] = BAD
     assert all(flags[field] == BAD)
 
-    _, flags = forceFlags(data, field, flags, flag=DOUBTFUL)
-    assert all(flags[field] == DOUBTFUL)
+    qc = SaQC(data, flags).forceFlags(field, flag=DOUBTFUL)
+    assert all(qc._flags[field] == DOUBTFUL)
 
 
 def test_flagIsolated(data, field):
     flags = initFlagsLike(data)
-
+    d_len = data.shape[0][0]
     data.iloc[1:3, 0] = np.nan
     data.iloc[4:5, 0] = np.nan
     flags[data[field].index[5:6], field] = BAD
@@ -155,22 +146,22 @@ def test_flagIsolated(data, field):
     # 2016-01-08   7.0   -inf
     #         ..    ..     ..
 
-    _, flags_result = flagIsolated(
-        data, field, flags, group_window="1D", gap_window="2.1D", flag=BAD
+    qc = SaQC(data, flags).flagIsolated(
+        field, group_window="1D", gap_window="2.1D", flag=BAD
     )
+    assert (qc._flags[field].iloc[[3, 5]] == BAD).all()
+    neg_list = [k for k in range(d_len) if k not in [3, 5]]
+    assert (qc._flags[field].iloc[neg_list] == UNFLAGGED).all()
 
-    assert flags_result[field].iloc[[3, 5]].all()
-
-    data, flags_result = flagIsolated(
-        data,
+    qc = qc.flagIsolated(
         field,
-        flags_result,
         group_window="2D",
         gap_window="2.1D",
-        continuation_range="1.1D",
         flag=BAD,
     )
-    assert flags_result[field].iloc[[3, 5, 13, 14]].all()
+    assert (qc._flags[field].iloc[[3, 5, 13, 14]] == BAD).all()
+    neg_list = [k for k in range(d_len) if k not in [3, 5, 13, 14]]
+    assert (qc._flags[field].iloc[neg_list] == UNFLAGGED).all()
 
 
 def test_flagManual(data, field):
@@ -190,22 +181,20 @@ def test_flagManual(data, field):
     ]
 
     for kw in kwargs_list:
-        _, fl = flagManual(data.copy(), field, flags.copy(), **kw)
-        isflagged = fl[field] > UNFLAGGED
+        qc = SaQC(data, flags).flagManual(field, **kw)
+        isflagged = qc._flags[field] > UNFLAGGED
         assert isflagged[isflagged].index.equals(index_exp)
 
     # flag not exist in mdata
-    _, fl = flagManual(
-        data.copy(),
+    qc = SaQC(data, flags).flagManual(
         field,
-        flags.copy(),
         mdata=mdata,
         mflag="i do not exist",
         method="ontime",
         mformat="mflag",
         flag=BAD,
     )
-    isflagged = fl[field] > UNFLAGGED
+    isflagged = qc._flags[field] > UNFLAGGED
     assert isflagged[isflagged].index.equals(pd.DatetimeIndex([]))
 
     # check closure methods
@@ -233,17 +222,15 @@ def test_flagManual(data, field):
     ]
     bound_drops = {"right-open": [1], "left-open": [0], "closed": []}
     for method in ["right-open", "left-open", "closed"]:
-        _, fl = flagManual(
-            data.copy(),
+        qc = qc.flagManual(
             field,
-            flags.copy(),
             mdata=mdata,
             mflag=1,
             method=method,
             mformat="mflag",
             flag=BAD,
         )
-        isflagged = fl[field] > UNFLAGGED
+        isflagged = qc._flags[field] > UNFLAGGED
         for flag_i in flag_intervals:
             f_i = isflagged[slice(flag_i[0], flag_i[-1])].index
             check_i = f_i.drop(
@@ -264,15 +251,13 @@ def test_flagDriftFromNorm(dat):
     fields = ["field1", "field2", "field3"]
 
     flags = initFlagsLike(data)
-    _, flags_norm = flagDriftFromNorm(
-        data=data.copy(),
+    qc = SaQC(data, flags).flagDriftFromNorm(
         field=fields,
-        flags=flags.copy(),
         freq="200min",
         spread=5,
         flag=BAD,
     )
-    assert all(flags_norm["field3"] > UNFLAGGED)
+    assert all(qc._flags["field3"] > UNFLAGGED)
 
 
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_1")])
@@ -285,16 +270,14 @@ def test_flagDriftFromReference(dat):
 
     flags = initFlagsLike(data)
 
-    _, flags_ref = flagDriftFromReference(
-        data=data.copy(),
+    qc = SaQC(data, flags).flagDriftFromReference(
         field=fields,
-        flags=flags.copy(),
         reference="field1",
         freq="3D",
         thresh=20,
         flag=BAD,
     )
-    assert all(flags_ref["field3"] > UNFLAGGED)
+    assert all(qc._flags["field3"] > UNFLAGGED)
 
 
 def test_transferFlags():
@@ -302,9 +285,18 @@ def test_transferFlags():
     qc = saqc.SaQC(data)
     qc = qc.flagRange("a", max=1.5)
     qc = qc.transferFlags(["a", "a"], ["b", "c"])
-    assert np.all(
-        qc.flags["b"].values == np.array([saqc.constants.UNFLAGGED, saqc.constants.BAD])
+    assert np.all(qc.flags["b"].values == np.array([UNFLAGGED, BAD]))
+    assert np.all(qc.flags["c"].values == np.array([UNFLAGGED, BAD]))
+
+
+def test_flagJumps():
+    data = pd.DataFrame(
+        {"a": [1, 1, 1, 1, 1, 6, 6, 6, 6, 6]},
+        index=pd.date_range(start="2020-01-01", periods=10, freq="D"),
     )
-    assert np.all(
-        qc.flags["c"].values == np.array([saqc.constants.UNFLAGGED, saqc.constants.BAD])
+    qc = SaQC(data=data)
+    qc = qc.flagJumps(field="a", thresh=1, window="2D")
+    assert qc.flags["a"][5] == BAD
+    assert np.all(qc.flags["a"].values[:5] == UNFLAGGED) & np.all(
+        qc.flags["a"].values[6:] == UNFLAGGED
     )
diff --git a/tests/funcs/test_generic_api_functions.py b/tests/funcs/test_generic_api_functions.py
index 64ca9173effe6a1017ff4ea09fb4504e358c01a2..6f3c77691bec10bb084b3149cbb422a43d17fe91 100644
--- a/tests/funcs/test_generic_api_functions.py
+++ b/tests/funcs/test_generic_api_functions.py
@@ -35,156 +35,157 @@ def test_emptyData():
     assert saqc.flags.empty
 
 
-def test_writeTargetFlagGeneric(data):
-    params = [
+@pytest.mark.parametrize(
+    "targets, func",
+    [
         (["tmp"], lambda x, y: pd.Series(True, index=x.index.union(y.index))),
         (
             ["tmp1", "tmp2"],
             lambda x, y: [pd.Series(True, index=x.index.union(y.index))] * 2,
         ),
-    ]
-    for targets, func in params:
-        expected_meta = {
-            "func": "flagGeneric",
-            "args": (data.columns.tolist(), targets),
-            "kwargs": {
-                "func": func.__name__,
-                "flag": BAD,
-                "dfilter": FILTER_ALL,
-            },
-        }
-
-        saqc = SaQC(data=data)
-        saqc = saqc.flagGeneric(field=data.columns, target=targets, func=func, flag=BAD)
-        for target in targets:
-            assert saqc._flags.history[target].hist.iloc[0].tolist() == [BAD]
-            assert saqc._flags.history[target].hist.iloc[0].tolist() == [BAD]
-            assert saqc._flags.history[target].meta[0] == expected_meta
-
-
-def test_overwriteFieldFlagGeneric(data):
-    params = [
+    ],
+)
+def test_writeTargetFlagGeneric(data, targets, func):
+    expected_meta = {
+        "func": "flagGeneric",
+        "args": (data.columns.tolist(), targets),
+        "kwargs": {
+            "func": func.__name__,
+            "flag": BAD,
+            "dfilter": FILTER_ALL,
+        },
+    }
+
+    saqc = SaQC(data=data)
+    saqc = saqc.flagGeneric(field=data.columns, target=targets, func=func, flag=BAD)
+    for target in targets:
+        assert saqc._flags.history[target].hist.iloc[0].tolist() == [BAD]
+        assert saqc._flags.history[target].meta[0] == expected_meta
+
+
+@pytest.mark.parametrize(
+    "fields, func",
+    [
         (["var1"], lambda x: pd.Series(True, index=x.index)),
         (
             ["var1", "var2"],
             lambda x, y: [pd.Series(True, index=x.index.union(y.index))] * 2,
         ),
-    ]
+    ],
+)
+def test_overwriteFieldFlagGeneric(data, fields, func):
 
     flag = 12
 
-    for fields, func in params:
-        expected_meta = {
-            "func": "flagGeneric",
-            "args": (fields, fields),
-            "kwargs": {
-                "func": func.__name__,
-                "flag": flag,
-                "dfilter": FILTER_ALL,
-            },
-        }
-
-        saqc = SaQC(
-            data=data.copy(),
-            flags=Flags(
-                {
-                    k: pd.Series(data[k] % 2, index=data[k].index).replace(
-                        {0: UNFLAGGED, 1: 127}
-                    )
-                    for k in data.columns
-                }
-            ),
-        )
-
-        res = saqc.flagGeneric(field=fields, func=func, flag=flag)
-        for field in fields:
-            histcol1 = res._flags.history[field].hist[1]
-            assert (histcol1 == flag).all()
-            assert (data[field] == res.data[field]).all(axis=None)
-            assert res._flags.history[field].meta[0] == {}
-            assert res._flags.history[field].meta[1] == expected_meta
-
-
-def test_writeTargetProcGeneric(data):
-    fields = ["var1", "var2"]
-    params = [
+    expected_meta = {
+        "func": "flagGeneric",
+        "args": (fields, fields),
+        "kwargs": {
+            "func": func.__name__,
+            "flag": flag,
+            "dfilter": FILTER_ALL,
+        },
+    }
+
+    saqc = SaQC(
+        data=data.copy(),
+        flags=Flags(
+            {
+                k: pd.Series(data[k] % 2, index=data[k].index).replace(
+                    {0: UNFLAGGED, 1: 127}
+                )
+                for k in data.columns
+            }
+        ),
+    )
+
+    res = saqc.flagGeneric(field=fields, func=func, flag=flag)
+    for field in fields:
+        histcol1 = res._flags.history[field].hist[1]
+        assert (histcol1 == flag).all()
+        assert (data[field] == res.data[field]).all(axis=None)
+        assert res._flags.history[field].meta[0] == {}
+        assert res._flags.history[field].meta[1] == expected_meta
+
+
+@pytest.mark.parametrize(
+    "targets, func",
+    [
         (["tmp"], lambda x, y: x + y),
         (["tmp1", "tmp2"], lambda x, y: (x + y, y * 2)),
-    ]
+    ],
+)
+def test_writeTargetProcGeneric(data, targets, func):
+    fields = ["var1", "var2"]
     dfilter = 128
-    for targets, func in params:
-
-        expected_data = DictOfSeries(
-            func(*[data[f] for f in fields]), columns=toSequence(targets)
-        ).squeeze()
-
-        expected_meta = {
-            "func": "procGeneric",
-            "args": (fields, targets),
-            "kwargs": {
-                "func": func.__name__,
-                "dfilter": dfilter,
-                "label": "generic",
-            },
-        }
-        saqc = SaQC(
-            data=data,
-            flags=Flags(
-                {k: pd.Series(127.0, index=data[k].index) for k in data.columns}
-            ),
-        )
-        res = saqc.processGeneric(
-            field=fields,
-            target=targets,
-            func=func,
-            dfilter=dfilter,
-            label="generic",
-        )
-        assert (expected_data == res.data[targets].squeeze()).all(axis=None)
-        # check that new histories where created
-        for target in targets:
-            assert res._flags.history[target].hist.iloc[0].isna().all()
-            assert res._flags.history[target].meta[0] == expected_meta
-
-
-def test_overwriteFieldProcGeneric(data):
-    params = [
+
+    expected_data = DictOfSeries(
+        func(*[data[f] for f in fields]), columns=toSequence(targets)
+    ).squeeze()
+
+    expected_meta = {
+        "func": "procGeneric",
+        "args": (fields, targets),
+        "kwargs": {
+            "func": func.__name__,
+            "dfilter": dfilter,
+            "label": "generic",
+        },
+    }
+    saqc = SaQC(
+        data=data,
+        flags=Flags({k: pd.Series(127.0, index=data[k].index) for k in data.columns}),
+    )
+    res = saqc.processGeneric(
+        field=fields,
+        target=targets,
+        func=func,
+        dfilter=dfilter,
+        label="generic",
+    )
+    assert (expected_data == res.data[targets].squeeze()).all(axis=None)
+    # check that new histories where created
+    for target in targets:
+        assert res._flags.history[target].hist.iloc[0].isna().all()
+        assert res._flags.history[target].meta[0] == expected_meta
+
+
+@pytest.mark.parametrize(
+    "fields, func",
+    [
         (["var1"], lambda x: x * 2),
         (["var1", "var2"], lambda x, y: (x + y, y * 2)),
-    ]
+    ],
+)
+def test_overwriteFieldProcGeneric(data, fields, func):
     dfilter = 128
-    for fields, func in params:
-        expected_data = DictOfSeries(
-            func(*[data[f] for f in fields]), columns=fields
-        ).squeeze()
-
-        expected_meta = {
-            "func": "procGeneric",
-            "args": (fields, fields),
-            "kwargs": {
-                "func": func.__name__,
-                "dfilter": dfilter,
-                "label": "generic",
-            },
-        }
-
-        saqc = SaQC(
-            data=data,
-            flags=Flags(
-                {k: pd.Series(127.0, index=data[k].index) for k in data.columns}
-            ),
-        )
-
-        res = saqc.processGeneric(
-            field=fields, func=func, dfilter=dfilter, label="generic"
-        )
-        assert (expected_data == res.data[fields].squeeze()).all(axis=None)
-        # check that the histories got appended
-        for field in fields:
-            assert (res._flags.history[field].hist[0] == 127.0).all()
-            assert res._flags.history[field].hist[1].isna().all()
-            assert res._flags.history[field].meta[0] == {}
-            assert res._flags.history[field].meta[1] == expected_meta
+    expected_data = DictOfSeries(
+        func(*[data[f] for f in fields]), columns=fields
+    ).squeeze()
+
+    expected_meta = {
+        "func": "procGeneric",
+        "args": (fields, fields),
+        "kwargs": {
+            "func": func.__name__,
+            "dfilter": dfilter,
+            "label": "generic",
+        },
+    }
+
+    saqc = SaQC(
+        data=data,
+        flags=Flags({k: pd.Series(127.0, index=data[k].index) for k in data.columns}),
+    )
+
+    res = saqc.processGeneric(field=fields, func=func, dfilter=dfilter, label="generic")
+    assert (expected_data == res.data[fields].squeeze()).all(axis=None)
+    # check that the histories got appended
+    for field in fields:
+        assert (res._flags.history[field].hist[0] == 127.0).all()
+        assert res._flags.history[field].hist[1].isna().all()
+        assert res._flags.history[field].meta[0] == {}
+        assert res._flags.history[field].meta[1] == expected_meta
 
 
 def test_label():
@@ -202,3 +203,31 @@ def test_label():
         func=lambda x, y: isflagged(x, "out of range") | isflagged(y),
     )
     assert list((qc.flags["data2"] > 0).values) == [False, False, True, False, False]
+
+
+@pytest.mark.parametrize(
+    "kwargs, got, expected",
+    [
+        (
+            {
+                "lower": 0,
+            },
+            [-9, -2, 1, 2, 9],
+            [0, 0, 1, 2, 9],
+        ),
+        ({"upper": 3}, [-9, -2, 1, 2, 9], [-9, -2, 1, 2, 3]),
+        ({"lower": -6, "upper": 3}, [-9, -2, 1, 2, 9], [-6, -2, 1, 2, 3]),
+    ],
+)
+def test_processGenericClip(kwargs, got, expected):
+    field = "data"
+    got = pd.DataFrame(
+        got, columns=[field], index=pd.date_range("2020-06-30", periods=len(got))
+    )
+    expected = pd.DataFrame(
+        expected,
+        columns=[field],
+        index=pd.date_range("2020-06-30", periods=len(expected)),
+    )
+    qc = SaQC(got).processGeneric(field, func=lambda x: clip(x, **kwargs))
+    assert (qc._data[field] == expected[field]).all()
diff --git a/tests/funcs/test_generic_config_functions.py b/tests/funcs/test_generic_config_functions.py
index a9e4d09a61bf5a12c0e7b63afc074b26aba5428a..1106cb21718f19e131d8e72b7c6e653d3cb371d1 100644
--- a/tests/funcs/test_generic_config_functions.py
+++ b/tests/funcs/test_generic_config_functions.py
@@ -222,10 +222,10 @@ def test_callableArgumentsUnary(data):
     window = 5
 
     @register(mask=["field"], demask=["field"], squeeze=["field"])
-    def testFuncUnary(data, field, flags, func, **kwargs):
-        value = data[field].rolling(window=window).apply(func)
-        data[field] = value
-        return data, initFlagsLike(data)
+    def testFuncUnary(saqc, field, func, **kwargs):
+        value = saqc._data[field].rolling(window=window).apply(func)
+        saqc._data[field] = value
+        return saqc
 
     var = data.columns[0]
 
@@ -252,9 +252,9 @@ def test_callableArgumentsBinary(data):
     var1, var2 = data.columns[:2]
 
     @register(mask=["field"], demask=["field"], squeeze=["field"])
-    def testFuncBinary(data, field, flags, func, **kwargs):
-        data[field] = func(data[var1], data[var2])
-        return data, initFlagsLike(data)
+    def testFuncBinary(saqc, field, func, **kwargs):
+        saqc._data[field] = func(data[var1], data[var2])
+        return saqc
 
     config = f"""
     varname ; test
diff --git a/tests/funcs/test_harm_funcs.py b/tests/funcs/test_harm_funcs.py
index 53ff995a4fefb45d383153c2698ff5e19f9d5272..1dbed141d201de517fc0044fa790b67fdd6e7826 100644
--- a/tests/funcs/test_harm_funcs.py
+++ b/tests/funcs/test_harm_funcs.py
@@ -12,9 +12,7 @@ import pytest
 
 import dios
 from saqc.constants import BAD, UNFLAGGED
-from saqc.core import Flags, initFlagsLike
-from saqc.funcs.resampling import concatFlags, interpolate, linear, resample, shift
-from saqc.funcs.tools import copyField, dropField
+from saqc.core import SaQC, initFlagsLike
 from tests.common import checkDataFlagsInvariants
 
 
@@ -55,14 +53,13 @@ def test_wrapper(data, func, kws):
     for c in flags.columns:
         flags[:, c] = BAD
 
-    import saqc
+    qc = SaQC(data, flags)
 
-    func = getattr(saqc.funcs, func)
-    data, flags = func(data, field, flags, freq, **kws)
+    qc = getattr(qc, func)(field, freq, **kws)
 
     # check minimal requirements
-    checkDataFlagsInvariants(data, flags, field)
-    assert data[field].index.inferred_freq == freq
+    checkDataFlagsInvariants(qc._data, qc._flags, field)
+    assert qc.data[field].index.inferred_freq == freq
 
 
 _SUPPORTED_METHODS = [
@@ -105,31 +102,27 @@ def test_gridInterpolation(data, method, fill_history):
         for c in flags.columns:
             flags[::2, c] = UNFLAGGED
 
+    qc = SaQC(data, flags)
+
     # we are just testing if the interpolation gets passed to the series without
     # causing an error:
-    res = interpolate(
-        data.copy(),
+    res = qc.interpolate(
         field,
-        flags.copy(),
         freq,
         method=method,
         downcast_interpolation=True,
     )
 
     if method == "polynomial":
-        res = interpolate(
-            data.copy(),
+        res = qc.interpolate(
             field,
-            flags.copy(),
             freq,
             order=2,
             method=method,
             downcast_interpolation=True,
         )
-        res = interpolate(
-            data.copy(),
+        res = qc.interpolate(
             field,
-            flags.copy(),
             freq,
             order=10,
             method=method,
@@ -137,9 +130,8 @@ def test_gridInterpolation(data, method, fill_history):
         )
 
     # check minimal requirements
-    rdata, rflags = res
-    checkDataFlagsInvariants(rdata, rflags, field, identical=False)
-    assert rdata[field].index.inferred_freq == freq
+    checkDataFlagsInvariants(res._data, res._flags, field, identical=False)
+    assert res.data[field].index.inferred_freq == freq
 
 
 @pytest.mark.parametrize(
@@ -181,21 +173,27 @@ def test_harmSingleVarIntermediateFlagging(data, reshaper):
 
     pre_data = data.copy()
     pre_flags = flags.copy()
-    data, flags = copyField(data, field, flags, field + "_interpolated")
-    data, flags = linear(data, field + "_interpolated", flags, freq=freq)
-    checkDataFlagsInvariants(data, flags, field + "_interpolated", identical=True)
-    assert data[field + "_interpolated"].index.inferred_freq == freq
+    qc = SaQC(data, flags)
+
+    qc = qc.copyField(field, field + "_interpolated")
+    qc = qc.linear(field + "_interpolated", freq=freq)
+    checkDataFlagsInvariants(
+        qc._data, qc._flags, field + "_interpolated", identical=True
+    )
+    assert qc._data[field + "_interpolated"].index.inferred_freq == freq
 
     # flag something bad
-    flags[data[field + "_interpolated"].index[3:4], field + "_interpolated"] = BAD
-    data, flags = concatFlags(
-        data, field + "_interpolated", flags, method="inverse_" + reshaper, target=field
+    qc._flags[
+        qc._data[field + "_interpolated"].index[3:4], field + "_interpolated"
+    ] = BAD
+    qc = qc.concatFlags(
+        field + "_interpolated", method="inverse_" + reshaper, target=field
     )
-    data, flags = dropField(data, field + "_interpolated", flags)
+    qc = qc.dropField(field + "_interpolated")
 
-    assert len(data[field]) == len(flags[field])
-    assert data[field].equals(pre_data[field])
-    assert flags[field].index.equals(pre_flags[field].index)
+    assert len(qc.data[field]) == len(qc.flags[field])
+    assert qc.data[field].equals(pre_data[field])
+    assert qc.flags[field].index.equals(pre_flags[field].index)
 
     if "agg" in reshaper:
         if reshaper == "nagg":
@@ -207,9 +205,9 @@ def test_harmSingleVarIntermediateFlagging(data, reshaper):
         else:
             raise NotImplementedError("untested test case")
 
-        assert all(flags[field].iloc[start:end] > UNFLAGGED)
-        assert all(flags[field].iloc[:start] == UNFLAGGED)
-        assert all(flags[field].iloc[end:] == UNFLAGGED)
+        assert all(qc._flags[field].iloc[start:end] > UNFLAGGED)
+        assert all(qc._flags[field].iloc[:start] == UNFLAGGED)
+        assert all(qc._flags[field].iloc[end:] == UNFLAGGED)
 
     elif "shift" in reshaper:
         if reshaper == "nshift":
@@ -221,7 +219,7 @@ def test_harmSingleVarIntermediateFlagging(data, reshaper):
         else:
             raise NotImplementedError("untested test case")
 
-        flagged = flags[field] > UNFLAGGED
+        flagged = qc._flags[field] > UNFLAGGED
         assert all(flagged == exp)
 
     elif reshaper == "interpolation":
@@ -281,21 +279,20 @@ def test_harmSingleVarInterpolationAgg(data, params, expected):
     pre_flaggger = flags.copy()
     method, freq = params
 
-    data_harm, flags_harm = copyField(data, "data", flags, "data_harm")
-    data_harm, flags_harm = resample(
-        data_harm, h_field, flags_harm, freq, func=np.sum, method=method
-    )
-    checkDataFlagsInvariants(data_harm, flags_harm, h_field, identical=True)
-    assert data_harm[h_field].index.freq == pd.Timedelta(freq)
-    assert data_harm[h_field].equals(expected)
+    qc = SaQC(data, flags)
 
-    data_deharm, flags_deharm = concatFlags(
-        data_harm, h_field, flags_harm, target=field, method="inverse_" + method
-    )
-    data_deharm, flags_deharm = dropField(data_deharm, h_field, flags_deharm)
-    checkDataFlagsInvariants(data_deharm, flags_deharm, field, identical=True)
-    assert data_deharm[field].equals(pre_data[field])
-    assert flags_deharm[field].equals(pre_flaggger[field])
+    qc = qc.copyField("data", "data_harm")
+    qc = qc.resample(h_field, freq, func=np.sum, method=method)
+
+    checkDataFlagsInvariants(qc._data, qc._flags, h_field, identical=True)
+    assert qc._data[h_field].index.freq == pd.Timedelta(freq)
+    assert qc._data[h_field].equals(expected)
+
+    qc = qc.concatFlags(h_field, target=field, method="inverse_" + method)
+    qc = qc.dropField(h_field)
+    checkDataFlagsInvariants(qc._data, qc._flags, field, identical=True)
+    assert qc.data[field].equals(pre_data[field])
+    assert qc.flags[field].equals(pre_flaggger[field])
 
 
 @pytest.mark.parametrize(
@@ -365,16 +362,16 @@ def test_harmSingleVarInterpolationShift(data, params, expected):
     pre_flags = flags.copy()
     method, freq = params
 
-    data_harm, flags_harm = copyField(data, "data", flags, "data_harm")
-    data_harm, flags_harm = shift(data_harm, h_field, flags_harm, freq, method=method)
-    assert data_harm[h_field].equals(expected)
-    checkDataFlagsInvariants(data_harm, flags_harm, field, identical=True)
+    qc = SaQC(data, flags)
 
-    data_deharm, flags_deharm = concatFlags(
-        data_harm, h_field, flags_harm, target=field, method="inverse_" + method
-    )
-    checkDataFlagsInvariants(data_deharm, flags_deharm, field, identical=True)
+    qc = qc.copyField("data", "data_harm")
+    qc = qc.shift(h_field, freq, method=method)
+    assert qc.data[h_field].equals(expected)
+    checkDataFlagsInvariants(qc._data, qc._flags, field, identical=True)
+
+    qc = qc.concatFlags(h_field, target=field, method="inverse_" + method)
+    checkDataFlagsInvariants(qc._data, qc._flags, field, identical=True)
 
-    data_deharm, flags_deharm = dropField(data_deharm, h_field, flags_deharm)
-    assert data_deharm[field].equals(pre_data[field])
-    assert flags_deharm[field].equals(pre_flags[field])
+    qc = qc.dropField(h_field)
+    assert qc.data[field].equals(pre_data[field])
+    assert qc.flags[field].equals(pre_flags[field])
diff --git a/tests/funcs/test_modelling.py b/tests/funcs/test_modelling.py
deleted file mode 100644
index a768c86586bf1e9329f731a9e74ea9778f502a29..0000000000000000000000000000000000000000
--- a/tests/funcs/test_modelling.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#! /usr/bin/env python
-
-# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -*- coding: utf-8 -*-
-
-import numpy as np
-import pandas as pd
-
-# see test/functs/fixtures.py for global fixtures "course_..."
-import pytest
-
-import dios
-from saqc import BAD, UNFLAGGED
-from saqc.core import initFlagsLike
-from saqc.funcs.residuals import calculatePolynomialResiduals, calculateRollingResiduals
-from saqc.funcs.tools import selectTime
-from tests.fixtures import char_dict, course_1, course_2
-
-
-@pytest.mark.filterwarnings("ignore: The fit may be poorly conditioned")
-@pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_2")])
-def test_modelling_polyFit_forRegular(dat):
-    data, _ = dat(
-        freq="10min", periods=30, initial_level=0, final_level=100, out_val=-100
-    )
-    # add some nice sine distortion
-    data = data + 10 * np.sin(np.arange(0, len(data.indexes[0])))
-    data = dios.DictOfSeries(data)
-    flags = initFlagsLike(data)
-    result1, _ = calculatePolynomialResiduals(data, "data", flags, 11, 2, numba=False)
-    result2, _ = calculatePolynomialResiduals(data, "data", flags, 11, 2, numba=True)
-    assert (result1["data"] - result2["data"]).abs().max() < 10**-10
-    result3, _ = calculatePolynomialResiduals(
-        data, "data", flags, "110min", 2, numba=False
-    )
-    assert result3["data"].equals(result1["data"])
-    result4, _ = calculatePolynomialResiduals(
-        data, "data", flags, 11, 2, numba=True, min_periods=11
-    )
-    assert (result4["data"] - result2["data"]).abs().max() < 10**-10
-    data.iloc[13:16] = np.nan
-    result5, _ = calculatePolynomialResiduals(
-        data, "data", flags, 11, 2, numba=True, min_periods=9
-    )
-    assert result5["data"].iloc[10:19].isna().all()
-
-
-@pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_2")])
-def test_modelling_rollingMean_forRegular(dat):
-    data, _ = dat(
-        freq="10min", periods=30, initial_level=0, final_level=100, out_val=-100
-    )
-    data = dios.DictOfSeries(data)
-    flags = initFlagsLike(data)
-    calculateRollingResiduals(
-        data,
-        "data",
-        flags,
-        5,
-        func=np.mean,
-        min_periods=0,
-        center=True,
-    )
-    calculateRollingResiduals(
-        data,
-        "data",
-        flags,
-        5,
-        func=np.mean,
-        min_periods=0,
-        center=False,
-    )
-
-
-@pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_1")])
-def test_modelling_mask(dat):
-    data, _ = dat()
-    data = dios.DictOfSeries(data)
-    flags = initFlagsLike(data)
-    field = "data"
-
-    # set flags everywhere to test unflagging
-    flags[:, field] = BAD
-
-    common = dict(data=data, field=field, flags=flags, mode="periodic")
-    data_seasonal, flags_seasonal = selectTime(
-        **common, start="20:00", end="40:00", closed=False
-    )
-    flagscol = flags_seasonal[field]
-    m = (20 > flagscol.index.minute) | (flagscol.index.minute > 40)
-    assert all(flags_seasonal[field][m] == UNFLAGGED)
-    assert all(data_seasonal[field][m].isna())
-
-    data_seasonal, flags_seasonal = selectTime(
-        **common, start="15:00:00", end="02:00:00"
-    )
-    flagscol = flags_seasonal[field]
-    m = (15 <= flagscol.index.hour) & (flagscol.index.hour <= 2)
-    assert all(flags_seasonal[field][m] == UNFLAGGED)
-    assert all(data_seasonal[field][m].isna())
-
-    data_seasonal, flags_seasonal = selectTime(
-        **common, start="03T00:00:00", end="10T00:00:00"
-    )
-    flagscol = flags_seasonal[field]
-    m = (3 <= flagscol.index.hour) & (flagscol.index.hour <= 10)
-    assert all(flags_seasonal[field][m] == UNFLAGGED)
-    assert all(data_seasonal[field][m].isna())
-
-    mask_ser = pd.Series(False, index=data["data"].index)
-    mask_ser[::5] = True
-    data["mask_ser"] = mask_ser
-    flags = initFlagsLike(data)
-    data_masked, flags_masked = selectTime(
-        data, "data", flags, mode="selection_field", selection_field="mask_ser"
-    )
-    m = mask_ser
-    assert all(flags_masked[field][m] == UNFLAGGED)
-    assert all(data_masked[field][m].isna())
diff --git a/tests/funcs/test_outier_detection.py b/tests/funcs/test_outlier_detection.py
similarity index 68%
rename from tests/funcs/test_outier_detection.py
rename to tests/funcs/test_outlier_detection.py
index 21da1d6efb242b49187a2f0e32f85b8b26083bab..921a82e5300c426217669c800b5521c90ec57839 100644
--- a/tests/funcs/test_outier_detection.py
+++ b/tests/funcs/test_outlier_detection.py
@@ -13,16 +13,9 @@ import pandas as pd
 import pytest
 
 import dios
+import saqc
 from saqc.constants import BAD, UNFLAGGED
-from saqc.core import initFlagsLike
-from saqc.funcs.outliers import (
-    flagByGrubbs,
-    flagCrossStatistics,
-    flagMAD,
-    flagMVScores,
-    flagOffset,
-    flagRaise,
-)
+from saqc.core import SaQC, initFlagsLike
 from tests.fixtures import char_dict, course_1, course_2, course_3, course_4
 
 
@@ -40,8 +33,8 @@ def test_flagMad(spiky_data):
     data = spiky_data[0]
     field, *_ = data.columns
     flags = initFlagsLike(data)
-    data, flags_result = flagMAD(data, field, flags, "1H", flag=BAD)
-    flag_result = flags_result[field]
+    qc = SaQC(data, flags).flagMAD(field, "1H", flag=BAD)
+    flag_result = qc.flags[field]
     test_sum = (flag_result[spiky_data[1]] == BAD).sum()
     assert test_sum == len(spiky_data[1])
 
@@ -50,10 +43,10 @@ def test_flagSpikesBasic(spiky_data):
     data = spiky_data[0]
     field, *_ = data.columns
     flags = initFlagsLike(data)
-    data, flags_result = flagOffset(
-        data, field, flags, thresh=60, tolerance=10, window="20min", flag=BAD
+    qc = SaQC(data, flags).flagOffset(
+        field, thresh=60, tolerance=10, window="20min", flag=BAD
     )
-    flag_result = flags_result[field]
+    flag_result = qc.flags[field]
     test_sum = (flag_result[spiky_data[1]] == BAD).sum()
     assert test_sum == len(spiky_data[1])
 
@@ -73,19 +66,17 @@ def test_flagSpikesLimitRaise(dat):
     data, characteristics = dat()
     field, *_ = data.columns
     flags = initFlagsLike(data)
-    _, flags_result = flagRaise(
-        data,
+    qc = SaQC(data, flags).flagRaise(
         field,
-        flags,
         thresh=2,
         freq="10min",
         raise_window="20min",
         numba_boost=False,
         flag=BAD,
     )
-    assert np.all(flags_result[field][characteristics["raise"]] > UNFLAGGED)
-    assert not np.any(flags_result[field][characteristics["return"]] > UNFLAGGED)
-    assert not np.any(flags_result[field][characteristics["drop"]] > UNFLAGGED)
+    assert np.all(qc.flags[field][characteristics["raise"]] > UNFLAGGED)
+    assert not np.any(qc.flags[field][characteristics["return"]] > UNFLAGGED)
+    assert not np.any(qc.flags[field][characteristics["drop"]] > UNFLAGGED)
 
 
 # see test/functs/fixtures.py for the 'course_N'
@@ -110,16 +101,14 @@ def test_flagMVScores(dat):
     s2 = pd.Series(data=s2.values, index=s1.index)
     data = dios.DictOfSeries([s1, s2], columns=["field1", "field2"])
     flags = initFlagsLike(data)
-    _, flags_result = flagMVScores(
-        data=data,
+    qc = SaQC(data, flags).flagMVScores(
         field=fields,
-        flags=flags,
         trafo=np.log,
         iter_start=0.95,
         n=10,
         flag=BAD,
     )
-    _check(fields, flags_result, characteristics)
+    _check(fields, qc.flags, characteristics)
 
 
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_3")])
@@ -134,10 +123,8 @@ def test_grubbs(dat):
         out_val=-10,
     )
     flags = initFlagsLike(data)
-    data, result_flags = flagByGrubbs(
-        data, "data", flags, window=20, min_periods=15, flag=BAD
-    )
-    assert np.all(result_flags["data"][char_dict["drop"]] > UNFLAGGED)
+    qc = SaQC(data, flags).flagByGrubbs("data", window=20, min_periods=15, flag=BAD)
+    assert np.all(qc.flags["data"][char_dict["drop"]] > UNFLAGGED)
 
 
 @pytest.mark.parametrize("dat", [pytest.lazy_fixture("course_2")])
@@ -151,9 +138,39 @@ def test_flagCrossStatistics(dat):
     data = dios.DictOfSeries([s1, s2], columns=["field1", "field2"])
     flags = initFlagsLike(data)
 
-    _, flags_result = flagCrossStatistics(
-        data, fields, flags, thresh=3, method=np.mean, flag=BAD
+    qc = SaQC(data, flags).flagCrossStatistics(
+        fields, thresh=3, method=np.mean, flag=BAD
     )
     for field in fields:
-        isflagged = flags_result[field] > UNFLAGGED
+        isflagged = qc.flags[field] > UNFLAGGED
         assert isflagged[characteristics["raise"]].all()
+
+
+def test_flagZScores():
+    np.random.seed(seed=1)
+    data = pd.Series(
+        [np.random.normal() for k in range(100)],
+        index=pd.date_range("2000", freq="1D", periods=100),
+        name="data",
+    )
+    data.iloc[[5, 80]] = 5
+    data.iloc[[40]] = -6
+    qc = saqc.SaQC(data)
+    qc = qc.flagZScore("data", window=None)
+
+    assert (qc.flags.to_df().iloc[[5, 40, 80], 0] > 0).all()
+
+    qc = saqc.SaQC(data)
+    qc = qc.flagZScore("data", window=None, min_residuals=10)
+
+    assert (qc.flags.to_df()["data"] < 0).all()
+
+    qc = saqc.SaQC(data)
+    qc = qc.flagZScore("data", window="20D")
+
+    assert (qc.flags.to_df().iloc[[40, 80], 0] > 0).all()
+
+    qc = saqc.SaQC(data)
+    qc = qc.flagZScore("data", window=20)
+
+    assert (qc.flags.to_df().iloc[[40, 80], 0] > 0).all()
diff --git a/tests/funcs/test_pattern_rec.py b/tests/funcs/test_pattern_rec.py
index 037bba240edb8622ad173a49848c60b0b31963f6..7b9e087164e7227f0a7ac58ec741150b02d6215e 100644
--- a/tests/funcs/test_pattern_rec.py
+++ b/tests/funcs/test_pattern_rec.py
@@ -8,12 +8,10 @@
 
 import pandas as pd
 import pytest
-from pandas.testing import assert_series_equal
 
 import dios
 from saqc.constants import BAD, UNFLAGGED
-from saqc.core import initFlagsLike
-from saqc.funcs.pattern import flagPatternByDTW
+from saqc.core import SaQC, initFlagsLike
 from tests.common import initData
 
 
@@ -36,16 +34,14 @@ def test_flagPattern_dtw(plot, normalize):
 
     data = dios.DictOfSeries(dict(data=data, pattern_data=pattern))
     flags = initFlagsLike(data, name="data")
-    data, flags = flagPatternByDTW(
-        data,
+    qc = SaQC(data, flags).flagPatternByDTW(
         "data",
-        flags,
         reference="pattern_data",
         plot=plot,
         normalize=normalize,
         flag=BAD,
     )
 
-    assert all(flags["data"].iloc[10:18] == BAD)
-    assert all(flags["data"].iloc[:9] == UNFLAGGED)
-    assert all(flags["data"].iloc[18:] == UNFLAGGED)
+    assert all(qc.flags["data"].iloc[10:18] == BAD)
+    assert all(qc.flags["data"].iloc[:9] == UNFLAGGED)
+    assert all(qc.flags["data"].iloc[18:] == UNFLAGGED)
diff --git a/tests/funcs/test_proc_functions.py b/tests/funcs/test_proc_functions.py
index 056a78b42314a4badc8cf4f0de9f9a970ebabff4..b1dd896078b14ca9dafb9aec2bfc03619c6ecf8b 100644
--- a/tests/funcs/test_proc_functions.py
+++ b/tests/funcs/test_proc_functions.py
@@ -16,15 +16,7 @@ import pytest
 import dios
 import saqc
 from saqc.constants import UNFLAGGED
-from saqc.core import initFlagsLike
-from saqc.funcs.drift import correctOffset
-from saqc.funcs.interpolation import (
-    interpolateByRolling,
-    interpolateIndex,
-    interpolateInvalid,
-)
-from saqc.funcs.resampling import resample
-from saqc.funcs.transformation import transform
+from saqc.core import SaQC, initFlagsLike
 from saqc.lib.ts_operators import linearInterpolation, polynomialInterpolation
 from tests.fixtures import char_dict, course_3, course_5
 
@@ -34,28 +26,24 @@ def test_rollingInterpolateMissing(course_5):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    dataInt, *_ = interpolateByRolling(
-        data.copy(),
+    qc = SaQC(data, flags).interpolateByRolling(
         field,
-        flags.copy(),
         3,
         func=np.median,
         center=True,
         min_periods=0,
         interpol_flag=UNFLAGGED,
     )
-    assert dataInt[field][characteristics["missing"]].notna().all()
-    dataInt, *_ = interpolateByRolling(
-        data.copy(),
+    assert qc.data[field][characteristics["missing"]].notna().all()
+    qc = SaQC(data, flags).interpolateByRolling(
         field,
-        flags.copy(),
         3,
         func=np.nanmean,
         center=False,
         min_periods=3,
         interpol_flag=UNFLAGGED,
     )
-    assert dataInt[field][characteristics["missing"]].isna().all()
+    assert qc.data[field][characteristics["missing"]].isna().all()
 
 
 def test_interpolateMissing(course_5):
@@ -63,23 +51,23 @@ def test_interpolateMissing(course_5):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    dataLin, *_ = interpolateInvalid(data, field, flags, method="linear")
-    dataPoly, *_ = interpolateInvalid(data, field, flags, method="polynomial")
-    assert dataLin[field][characteristics["missing"]].notna().all()
-    assert dataPoly[field][characteristics["missing"]].notna().all()
+    qc = SaQC(data, flags)
+
+    qc_lin = qc.interpolateInvalid(field, method="linear")
+    qc_poly = qc.interpolateInvalid(field, method="polynomial")
+    assert qc_lin.data[field][characteristics["missing"]].notna().all()
+    assert qc_poly.data[field][characteristics["missing"]].notna().all()
+
     data, characteristics = course_5(periods=10, nan_slice=[5, 6, 7])
-    dataLin1, *_ = interpolateInvalid(
-        data.copy(), field, flags, method="linear", limit=2
-    )
-    dataLin2, *_ = interpolateInvalid(
-        data.copy(), field, flags, method="linear", limit=3
-    )
-    dataLin3, *_ = interpolateInvalid(
-        data.copy(), field, flags, method="linear", limit=4
-    )
-    assert dataLin1[field][characteristics["missing"]].isna().all()
-    assert dataLin2[field][characteristics["missing"]].isna().all()
-    assert dataLin3[field][characteristics["missing"]].notna().all()
+
+    qc = SaQC(data, flags)
+    qc_lin_1 = qc.interpolateInvalid(field, method="linear", limit=2)
+    qc_lin_2 = qc.interpolateInvalid(field, method="linear", limit=3)
+    qc_lin_3 = qc.interpolateInvalid(field, method="linear", limit=4)
+
+    assert qc_lin_1.data[field][characteristics["missing"]].isna().all()
+    assert qc_lin_2.data[field][characteristics["missing"]].isna().all()
+    assert qc_lin_3.data[field][characteristics["missing"]].notna().all()
 
 
 def test_transform(course_5):
@@ -87,50 +75,45 @@ def test_transform(course_5):
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    data1, *_ = transform(data, field, flags, func=linearInterpolation)
-    assert data1[field][characteristics["missing"]].isna().all()
-    data1, *_ = transform(
-        data, field, flags, func=lambda x: linearInterpolation(x, inter_limit=3)
-    )
-    assert data1[field][characteristics["missing"]].notna().all()
-    data1, *_ = transform(
-        data,
+    qc = SaQC(data, flags)
+
+    result = qc.transform(field, func=linearInterpolation)
+    assert result.data[field][characteristics["missing"]].isna().all()
+
+    result = qc.transform(field, func=lambda x: linearInterpolation(x, inter_limit=3))
+    assert result.data[field][characteristics["missing"]].notna().all()
+
+    result = qc.transform(
         field,
-        flags,
         func=lambda x: polynomialInterpolation(x, inter_limit=3, inter_order=3),
     )
-    assert data1[field][characteristics["missing"]].notna().all()
+    assert result.data[field][characteristics["missing"]].notna().all()
 
 
 def test_resample(course_5):
-    data, characteristics = course_5(
-        freq="1min", periods=30, nan_slice=[1, 11, 12, 22, 24, 26]
-    )
+    data, _ = course_5(freq="1min", periods=30, nan_slice=[1, 11, 12, 22, 24, 26])
     field = data.columns[0]
     data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    data1, *_ = resample(
-        data,
+    qc = SaQC(data, flags).resample(
         field,
-        flags,
         "10min",
         np.mean,
         maxna=2,
         maxna_group=1,
     )
-    assert ~np.isnan(data1[field].iloc[0])
-    assert np.isnan(data1[field].iloc[1])
-    assert np.isnan(data1[field].iloc[2])
+    assert ~np.isnan(qc.data[field].iloc[0])
+    assert np.isnan(qc.data[field].iloc[1])
+    assert np.isnan(qc.data[field].iloc[2])
 
 
 def test_interpolateGrid(course_5, course_3):
     data, _ = course_5()
-    data_grid, characteristics = course_3()
+    data_grid, _ = course_3()
     data["grid"] = data_grid.to_df()
-    # data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    dataInt, *_ = interpolateIndex(
-        data, "data", flags, "1h", "time", grid_field="grid", limit=10
+    SaQC(data, flags).interpolateIndex(
+        "data", "1h", "time", grid_field="grid", limit=10
     )
 
 
@@ -139,13 +122,34 @@ def test_offsetCorrecture():
     data = pd.Series(0, index=pd.date_range("2000", freq="1d", periods=100), name="dat")
     data.iloc[30:40] = -100
     data.iloc[70:80] = 100
-    data = dios.DictOfSeries(data)
     flags = initFlagsLike(data)
-    data, _ = correctOffset(data, "dat", flags, 40, 20, "3d", 1)
-    assert (data == 0).all()[0]
+    qc = SaQC(data, flags).correctOffset("dat", 40, 20, "3d", 1)
+    assert (qc.data == 0).all()[0]
 
 
 # GL-333
 def test_resampleSingleEmptySeries():
     qc = saqc.SaQC(pd.DataFrame(1, columns=["a"], index=pd.DatetimeIndex([])))
     qc.resample("a", freq="1d")
+
+
+@pytest.mark.parametrize(
+    "data",
+    [
+        pd.Series(
+            [
+                np.random.normal(loc=1 + k * 0.1, scale=3 * (1 - (k * 0.001)))
+                for k in range(100)
+            ],
+            index=pd.date_range("2000", freq="1D", periods=100),
+            name="data",
+        )
+    ],
+)
+def test_assignZScore(data):
+    qc = saqc.SaQC(data)
+    qc = qc.assignZScore("data", window="20D")
+    mean_res = qc.data["data"].mean()
+    std_res = qc.data["data"].std()
+    assert -0.1 < mean_res < 0.1
+    assert 0.9 < std_res < 1.1
diff --git a/tests/funcs/test_tools.py b/tests/funcs/test_tools.py
index 0d6c634f927df5e2e0de216b17c31f48aa50df85..8ec0b5262b6b953a40536ed2f0aa29b453942cb2 100644
--- a/tests/funcs/test_tools.py
+++ b/tests/funcs/test_tools.py
@@ -8,7 +8,6 @@ import pytest
 
 import dios
 import saqc
-from saqc.lib.plotting import makeFig
 
 
 @pytest.mark.slow
diff --git a/tests/fuzzy/test_masking.py b/tests/fuzzy/test_masking.py
index 3f49df708ff34f97b8b9dd815e4e2e8e4b1b2fde..0d0a49e92e88e932227e28bb6a4f0487fc8149b7 100644
--- a/tests/fuzzy/test_masking.py
+++ b/tests/fuzzy/test_masking.py
@@ -11,7 +11,7 @@ import pytest
 from hypothesis import given, settings
 
 from saqc.constants import BAD, UNFLAGGED
-from saqc.core.register import FunctionWrapper
+from saqc.core.register import _maskData, _unmaskData
 from tests.fuzzy.lib import MAX_EXAMPLES, dataFieldFlags
 
 
@@ -23,7 +23,7 @@ def test_maskingMasksData(data_field_flags):
     test if flagged values are replaced by np.nan
     """
     data_in, field, flags = data_field_flags
-    data_masked, mask = FunctionWrapper._maskData(
+    data_masked, mask = _maskData(
         data_in, flags, columns=[field], thresh=UNFLAGGED
     )  # thresh UNFLAGGED | np.inf
     assert data_masked[field].iloc[mask[field].index].isna().all()
@@ -42,11 +42,9 @@ def test_dataMutationPreventsUnmasking(data_field_flags):
 
     data_in, field, flags = data_field_flags
 
-    data_masked, mask = FunctionWrapper._maskData(
-        data_in, flags, columns=[field], thresh=UNFLAGGED
-    )
+    data_masked, mask = _maskData(data_in, flags, columns=[field], thresh=UNFLAGGED)
     data_masked[field] = filler
-    data_out = FunctionWrapper._unmaskData(data_masked, mask)
+    data_out = _unmaskData(data_masked, mask)
     assert (data_out[field] == filler).all(axis=None)
 
 
@@ -60,11 +58,9 @@ def test_flagsMutationPreventsUnmasking(data_field_flags):
     """
     data_in, field, flags = data_field_flags
 
-    data_masked, mask = FunctionWrapper._maskData(
-        data_in, flags, columns=[field], thresh=UNFLAGGED
-    )
+    data_masked, mask = _maskData(data_in, flags, columns=[field], thresh=UNFLAGGED)
     flags[:, field] = UNFLAGGED
-    data_out = FunctionWrapper._unmaskData(data_masked, mask)
+    data_out = _unmaskData(data_masked, mask)
     assert (data_out.loc[flags[field] == BAD, field].isna()).all(axis=None)
 
 
@@ -82,9 +78,7 @@ def test_reshapingPreventsUnmasking(data_field_flags):
 
     data_in, field, flags = data_field_flags
 
-    data_masked, mask = FunctionWrapper._maskData(
-        data_in, flags, columns=[field], thresh=UNFLAGGED
-    )
+    data_masked, mask = _maskData(data_in, flags, columns=[field], thresh=UNFLAGGED)
     # mutate indexes of `data` and `flags`
     index = data_masked[field].index.to_series()
     index.iloc[-len(data_masked[field]) // 2 :] += pd.Timedelta("7.5Min")
@@ -94,7 +88,7 @@ def test_reshapingPreventsUnmasking(data_field_flags):
     flags.drop(field)
     flags[field] = pd.Series(data=fflags.values, index=index)
 
-    data_out = FunctionWrapper._unmaskData(data_masked, mask)
+    data_out = _unmaskData(data_masked, mask)
     assert (data_out[field] == filler).all(axis=None)
 
 
@@ -107,10 +101,8 @@ def test_unmaskingInvertsMasking(data_field_flags):
     """
     data_in, field, flags = data_field_flags
 
-    data_masked, mask = FunctionWrapper._maskData(
-        data_in, flags, columns=[field], thresh=UNFLAGGED
-    )
-    data_out = FunctionWrapper._unmaskData(data_masked, mask)
+    data_masked, mask = _maskData(data_in, flags, columns=[field], thresh=UNFLAGGED)
+    data_out = _unmaskData(data_masked, mask)
     assert pd.DataFrame.equals(
         data_out.to_df().astype(float), data_in.to_df().astype(float)
     )
diff --git a/tests/requirements.txt b/tests/requirements.txt
index a18d98d1bc61ff30cb421bc37afddd2208659061..539d74b9d070974a0bda140d91dc58ff498aac84 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: GPL-3.0-or-later
 
-pytest==7.1.2
+pytest==7.1.3
 pytest-lazy-fixture==0.6.3
 Markdown==3.3.7
 beautifulsoup4==4.11.1