diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index a919acfa6eec14030b1b74644b6e66c007050ce3..a14efd71e18eb7d1bcdb312f590547ade43b232b 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -29,7 +29,7 @@ jobs:
       fail-fast: false
       matrix:
         os: ["windows-latest", "ubuntu-latest", "macos-latest"]
-        python-version: ["3.9", "3.10", "3.11"]
+        python-version: ["3.9", "3.10", "3.11", "3.12"]
     defaults:
       run:
         # somehow this also works for windows O.o ??
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 78bc10f9eebfb2a6ea73c9214f6e1aaa81ba9e0b..d66374875a7bbb0334e3529d19251d9ab33e53fd 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -30,7 +30,7 @@ stages:
   - deploy
 
 default:
-  image: python:3.10
+  image: python:3.11
   before_script:
     - pip install --upgrade pip
     - pip install -r requirements.txt
@@ -133,8 +133,23 @@ python311:
     reports:
       junit: report.xml
 
+python312:
+  stage: test
+  image: python:3.12
+  script:
+    - export DISPLAY=:99
+    - Xvfb :99 &
+    - pytest tests -Werror --junitxml=report.xml
+    - python -m saqc --config docs/resources/data/config.csv --data docs/resources/data/data.csv --outfile /tmp/test.csv
+  artifacts:
+    when: always
+    reports:
+      junit: report.xml
+
 doctest:
   stage: test
+  variables:
+    COLUMNS: 200
   script:
     - cd docs
     - pip install -r requirements.txt
@@ -180,6 +195,16 @@ wheel311:
     - pip install .
     - python -c 'import saqc; print(f"{saqc.__version__=}")'
 
+wheel312:
+  stage: build
+  image: python:3.12
+  variables:
+    PYPI_PKG_NAME: "saqc-dev"
+  script:
+    - pip install wheel
+    - pip wheel .
+    - pip install .
+    - python -c 'import saqc; print(f"{saqc.__version__=}")'
 
 # ===========================================================
 # Extra Pipeline (run with a successful run of all other jobs on develop)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 264d806a5214f3e5aa17da70ce45c4535880e7d6..5f83a8da16918b1de013fbce889b0b6d413aa6d1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,38 +6,52 @@ SPDX-License-Identifier: GPL-3.0-or-later
 
 # Changelog
 ## Unreleased
-[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.5.0...develop)
+[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.6.0...develop)
+### Added
+### Changed
+### Removed
+### Fixed
+### Deprecated
+
+## [2.6.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.6.0) - 2024-04-15
+[List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.5.0...v2.6.0)
 ### Added
 - `reindex`: base reindexer function
 - `flagGeneric`, `processGeneric`: target broadcasting and numpy array support
 - `SaQC`: automatic translation of incoming flags
 - Option to change the flagging scheme after initialization
 - `flagByClick`: manually assign flags using a graphical user interface
-- `SaQC`: support for selection, slicing and setting of items by use of subscription on SaQC objects (e.g. `qc[key]` and `qc[key] = value`).
-   Selection works with single keys, collections of keys and string slices (e.g. `qc["a":"f"]`).  Values can be SaQC objects, pd.Series,
-   Iterable of Series and dict-like with series values.
+- `SaQC`: support for selection, slicing and setting of items by subscription on `SaQC` objects
 - `transferFlags` is a multivariate function
 - `plot`: added `yscope` keyword
 - `setFlags`: function to replace `flagManual`
-- `flagUniLOF`: added defaultly applied correction to mitigate phenomenon of overflagging at relatively steep data value slopes. (parameter `slope_correct`).
+- `flagUniLOF`: added parameter `slope_correct` to correct for overflagging at relatively steep data value slopes
 - `History`: added option to change aggregation behavior
 - "horizontal" axis / multivariate mode for `rolling`
+- Translation scheme `AnnotatedFloatScheme`
 ### Changed
-- `flagPattern` uses *fastdtw* package now to compute timeseries distances
+- `SaQC.flags` always returns a `DictOfSeries`
 ### Removed
+- `SaQC` methods deprecated in version 2.4: `interpolate`, `interpolateIndex`, `interpolateInvalid`, `roll`, `linear`,`shift`, `flagCrossStatistics`
+- Method `Flags.toDios` deprecated in version 2.4
+- Method `DictOfSeries.index_of` method deprecated in version 2.4
+- Option `"complete"` for parameter `history` of method `plot`
+- Option `"cycleskip"` for parameter `ax_kwargs` of method `plot`
+- Parameter `phaseplot` from method `plot`
 ### Fixed
 - `flagConstants`: fixed flagging of rolling ramps
 - `Flags`: add meta entry to imported flags
 - group operations were overwriting existing flags
-- `SaQC._construct` : was not working for inherit classes (used hardcoded `SaQC` to construct a new instance).
+- `SaQC._construct` : was not working for inherited classes
 - `processgeneric`: improved numpy function compatability
 ### Deprecated
 - `flagManual` in favor of `setFlags`
-- `inverse_` + methodstring options for `concatFlags` parameter `method` deprecated in favor of `invert=True` setting
+- `inverse_**` options for `concatFlags` parameter `method` in favor of `invert=True`
 - `flagRaise` with delegation to better replacements `flagZScore`, `flagUniLOF`, `flagJumps` or `flagOffset`
 - `flagByGrubbs` with delegation to better replacements `flagZScore`, `flagUniLOF`s
 - `flagMVScore` with delegation to manual application of the steps
-## [2.5.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.4.1) - 2023-06-22
+
+## [2.5.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.5.0) - 2023-09-05
 [List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.4.1...v2.5.0)
 ### Added
 - WMO standard mean aggregations
diff --git a/README.md b/README.md
index 6fb0bf122ecbade708efee04053209f54b94a00f..22c8533330fb53bbfac50df3e0b9dcc44fb621e9 100644
--- a/README.md
+++ b/README.md
@@ -62,7 +62,7 @@ could look like [this](https://git.ufz.de/rdm-software/saqc/raw/develop/docs/res
 ```
 varname    ; test
 #----------; ---------------------------------------------------------------------
-SM2        ; shift(freq="15Min")
+SM2        ; align(freq="15Min")
 'SM(1|2)+' ; flagMissing()
 SM1        ; flagRange(min=10, max=60)
 SM2        ; flagRange(min=10, max=40)
@@ -103,7 +103,7 @@ data = pd.read_csv(
 
 qc = SaQC(data=data)
 qc = (qc
-      .shift("SM2", freq="15Min")
+      .align("SM2", freq="15Min")
       .flagMissing("SM(1|2)+", regex=True)
       .flagRange("SM1", min=10, max=60)
       .flagRange("SM2", min=10, max=40)
diff --git a/docs/Makefile b/docs/Makefile
index 304194f644e35bf811546526ecb2077f2f4034e8..00dbcd98ed7a2a0fa7dc1a9ad4ec8735c681c73c 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -30,7 +30,7 @@ clean:
 # make documentation
 doc:
 	# generate environment table from dictionary
-	@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+	@ $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 
 # run tests
 test:
diff --git a/docs/cookbooks/DataRegularisation.rst b/docs/cookbooks/DataRegularisation.rst
index a63a77c5ce36e55aa816716e0bd4e0fac4c2c97e..984ba3c2b639518271a1828d32c4c541f2226cfc 100644
--- a/docs/cookbooks/DataRegularisation.rst
+++ b/docs/cookbooks/DataRegularisation.rst
@@ -315,10 +315,10 @@ Aggregation
 If we want to comprise several values by aggregation and assign the result to the new regular timestamp, instead of
 selecting a single one, we can do this, with the :py:meth:`~saqc.SaQC.resample` method.
 Lets resample the *SoilMoisture* data to have a *20* minutes sample rate by aggregating every *20* minutes intervals
-content with the arithmetic mean (which is provided by the ``numpy.mean`` function for example).
+content with the arithmetic mean.
 
    >>> import numpy as np
-   >>> qc = qc.resample('SoilMoisture', target='SoilMoisture_mean', freq='20min', method='bagg', func=np.mean)
+   >>> qc = qc.resample('SoilMoisture', target='SoilMoisture_mean', freq='20min', method='bagg', func="mean")
    >>> qc.data # doctest: +SKIP
                        SoilMoisture |                     SoilMoisture_mean |
    ================================ | ===================================== |
diff --git a/docs/cookbooks/DriftDetection.rst b/docs/cookbooks/DriftDetection.rst
index d0f693ee823fc9440a550ed259ffd2152469199e..8f3870ad18fc32ff18c268e9a9e44b62a089ff30 100644
--- a/docs/cookbooks/DriftDetection.rst
+++ b/docs/cookbooks/DriftDetection.rst
@@ -140,7 +140,7 @@ Looking at the example data set more closely, we see that 2 of the 5 variables s
     qc.plot(variables, xscope=slice('2017-05', '2017-11'))
 
 Lets try to detect those drifts via saqc. The changes we observe in the data seem to develop significantly only in temporal spans over a month,
-so we go for ``"1M"`` as value for the
+so we go for ``"1ME"`` as value for the
 ``window`` parameter. We identified the majority group as a group containing three variables, whereby two variables
 seem to be scattered away, so that we can leave the ``frac`` value at its default ``.5`` level.
 The majority group seems on average not to be spread out more than 3 or 4 degrees. So, for the ``spread`` value
@@ -152,7 +152,7 @@ average in a month from any member of the majority group.
 .. doctest:: flagDriftFromNorm
 
    >>> variables = ['temp1 [degC]', 'temp2 [degC]', 'temp3 [degC]', 'temp4 [degC]', 'temp5 [degC]']
-   >>> qc = qc.flagDriftFromNorm(variables, window='1M', spread=3)
+   >>> qc = qc.flagDriftFromNorm(variables, window='1ME', spread=3)
 
 .. plot::
    :context: close-figs
@@ -160,7 +160,7 @@ average in a month from any member of the majority group.
    :class: center
 
    >>> variables = ['temp1 [degC]', 'temp2 [degC]', 'temp3 [degC]', 'temp4 [degC]', 'temp5 [degC]']
-   >>> qc = qc.flagDriftFromNorm(variables, window='1M', spread=3)
+   >>> qc = qc.flagDriftFromNorm(variables, window='1ME', spread=3)
 
 Lets check the results:
 
diff --git a/docs/cookbooks/ResidualOutlierDetection.rst b/docs/cookbooks/ResidualOutlierDetection.rst
index 834b5b003b8e7ce31ef84577465d6da7546de7c9..289d1287381818c57767b28c5fd8f731ce3b406a 100644
--- a/docs/cookbooks/ResidualOutlierDetection.rst
+++ b/docs/cookbooks/ResidualOutlierDetection.rst
@@ -147,19 +147,19 @@ Rolling Mean
 ^^^^^^^^^^^^
 
 Easiest thing to do, would be, to apply some rolling mean
-model via the method :py:meth:`saqc.SaQC.roll`.
+model via the method :py:meth:`saqc.SaQC.rolling`.
 
 .. doctest:: exampleOD
 
    >>> import numpy as np
-   >>> qc = qc.roll(field='incidents', target='incidents_mean', func=np.mean, window='13D')
+   >>> qc = qc.rolling(field='incidents', target='incidents_mean', func=np.mean, window='13D')
 
 .. plot::
    :context:
    :include-source: False
 
    import numpy as np
-   qc = qc.roll(field='incidents', target='incidents_mean', func=np.mean, window='13D')
+   qc = qc.rolling(field='incidents', target='incidents_mean', func=np.mean, window='13D')
 
 The ``field`` parameter is passed the variable name, we want to calculate the rolling mean of.
 The ``target`` parameter holds the name, we want to store the results of the calculation to.
@@ -174,13 +174,13 @@ under the name ``np.median``. We just calculate another model curve for the ``"i
 
 .. doctest:: exampleOD
 
-   >>> qc = qc.roll(field='incidents', target='incidents_median', func=np.median, window='13D')
+   >>> qc = qc.rolling(field='incidents', target='incidents_median', func=np.median, window='13D')
 
 .. plot::
    :context:
    :include-source: False
 
-   qc = qc.roll(field='incidents', target='incidents_median', func=np.median, window='13D')
+   qc = qc.rolling(field='incidents', target='incidents_median', func=np.median, window='13D')
 
 We chose another :py:attr:`target` value for the rolling *median* calculation, in order to not override our results from
 the previous rolling *mean* calculation.
@@ -318,18 +318,18 @@ for the point lying in the center of every window, we would define our function
 
    z_score = lambda D: abs((D[14] - np.mean(D)) / np.std(D))
 
-And subsequently, use the :py:meth:`~saqc.SaQC.roll` method to make a rolling window application with the scoring
+And subsequently, use the :py:meth:`~saqc.SaQC.rolling` method to make a rolling window application with the scoring
 function:
 
 .. doctest:: exampleOD
 
-   >>> qc = qc.roll(field='incidents_residuals', target='incidents_scores', func=z_score, window='27D', min_periods=27)
+   >>> qc = qc.rolling(field='incidents_residuals', target='incidents_scores', func=z_score, window='27D', min_periods=27)
 
 .. plot::
    :context: close-figs
    :include-source: False
 
-   qc = qc.roll(field='incidents_residuals', target='incidents_scores', func=z_score, window='27D', min_periods=27)
+   qc = qc.rolling(field='incidents_residuals', target='incidents_scores', func=z_score, window='27D', min_periods=27)
 
 Optimization by Decomposition
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -347,13 +347,13 @@ So the attempt works fine, only because our data set is small and strictly regul
 Meaning that it has constant temporal distances between subsequent meassurements.
 
 In order to tweak our calculations and make them much more stable, it might be useful to decompose the scoring
-into seperate calls to the :py:meth:`~saqc.SaQC.roll` function, by calculating the series of the
+into seperate calls to the :py:meth:`~saqc.SaQC.rolling` function, by calculating the series of the
 residuals *mean* and *standard deviation* seperately:
 
 .. doctest:: exampleOD
 
-   >>> qc = qc.roll(field='incidents_residuals', target='residuals_mean', window='27D', func=np.mean)
-   >>> qc = qc.roll(field='incidents_residuals', target='residuals_std', window='27D', func=np.std)
+   >>> qc = qc.rolling(field='incidents_residuals', target='residuals_mean', window='27D', func=np.mean)
+   >>> qc = qc.rolling(field='incidents_residuals', target='residuals_std', window='27D', func=np.std)
    >>> qc = qc.processGeneric(field=['incidents_scores', "residuals_mean", "residuals_std"], target="residuals_norm",
    ... func=lambda this, mean, std: (this - mean) / std)
 
@@ -362,15 +362,15 @@ residuals *mean* and *standard deviation* seperately:
    :context: close-figs
    :include-source: False
 
-   qc = qc.roll(field='incidents_residuals', target='residuals_mean', window='27D', func=np.mean)
-   qc = qc.roll(field='incidents_residuals', target='residuals_std', window='27D', func=np.std)
+   qc = qc.rolling(field='incidents_residuals', target='residuals_mean', window='27D', func=np.mean)
+   qc = qc.rolling(field='incidents_residuals', target='residuals_std', window='27D', func=np.std)
    qc = qc.processGeneric(field=['incidents_scores', "residuals_mean", "residuals_std"], target="residuals_norm", func=lambda this, mean, std: (this - mean) / std)
 
 
 With huge datasets, this will be noticably faster, compared to the method presented :ref:`initially <cookbooks/ResidualOutlierDetection:Scores>`\ ,
 because ``saqc`` dispatches the rolling with the basic numpy statistic methods to an optimized pandas built-in.
 
-Also, as a result of the :py:meth:`~saqc.SaQC.roll` assigning its results to the center of every window,
+Also, as a result of the :py:meth:`~saqc.SaQC.rolling` assigning its results to the center of every window,
 all the values are centered and we dont have to care about window center indices when we are generating
 the *Z*\ -Scores from the two series.
 
diff --git a/docs/funcs/filling.rst b/docs/funcs/filling.rst
index d158f8ff1c38a6947b7d8ad46ae32ce222eff569..38882094267fe80ce4a75d0a09a0b755c1e818d9 100644
--- a/docs/funcs/filling.rst
+++ b/docs/funcs/filling.rst
@@ -11,4 +11,3 @@ Gap filling
    :nosignatures:
 
    ~SaQC.interpolateByRolling
-   ~SaQC.interpolate
diff --git a/docs/funcs/flagTools.rst b/docs/funcs/flagTools.rst
index 91c1ff7ce383be0229d992d48980a5a1c82c7403..19e08a8ed836214327c526ada9d357c654287bad 100644
--- a/docs/funcs/flagTools.rst
+++ b/docs/funcs/flagTools.rst
@@ -15,3 +15,5 @@ Flagtools
    ~SaQC.flagManual
    ~SaQC.flagDummy
    ~SaQC.transferFlags
+   ~SaQC.andGroup
+   ~SaQC.orGroup
diff --git a/docs/funcs/genericWrapper.rst b/docs/funcs/genericWrapper.rst
index 5e487b1d7ce8ea1968288bb6c91069231ff4118b..6705ac0fb16137be5258af49c07caacfea94f4d7 100644
--- a/docs/funcs/genericWrapper.rst
+++ b/docs/funcs/genericWrapper.rst
@@ -13,6 +13,6 @@ Generic Functions
 
    ~SaQC.processGeneric
    ~SaQC.flagGeneric
-   ~SaQC.roll
-   ~SaQC.transform
-   ~SaQC.resample
+   ~SaQC.andGroup
+   ~SaQC.orGroup
+
diff --git a/docs/funcs/multivariateAnalysis.rst b/docs/funcs/multivariateAnalysis.rst
index 38fe8f5043afc352546837e59755dce96be3d74d..159d37619bf93051fe2dfb1218d230d37b30cf69 100644
--- a/docs/funcs/multivariateAnalysis.rst
+++ b/docs/funcs/multivariateAnalysis.rst
@@ -12,7 +12,6 @@ Multivariate outlier detection.
 .. autosummary::
    :nosignatures:
 
-   ~SaQC.flagCrossStatistics
    ~SaQC.flagLOF
    ~SaQC.flagZScore
 
diff --git a/docs/funcs/samplingAlignment.rst b/docs/funcs/samplingAlignment.rst
index 05b8762495e9fba7f8bac6416d7d50c8936ee75a..660bd1844dec872072465faa3df17d3df96b1931 100644
--- a/docs/funcs/samplingAlignment.rst
+++ b/docs/funcs/samplingAlignment.rst
@@ -10,10 +10,7 @@ Sampling Alignment
 .. autosummary::
    :nosignatures:
 
-   ~SaQC.linear
-   ~SaQC.shift
    ~SaQC.align
    ~SaQC.concatFlags
-   ~SaQC.interpolateIndex
    ~SaQC.resample
    ~SaQC.reindex
diff --git a/docs/funcs/tools.rst b/docs/funcs/tools.rst
index 261bcca0f4cdd99b6ec61f8203ddb047d036fdd4..38da126003b135868cb9f564a4b5498cbc570d82 100644
--- a/docs/funcs/tools.rst
+++ b/docs/funcs/tools.rst
@@ -15,3 +15,4 @@ Tools
    ~SaQC.renameField
    ~SaQC.selectTime
    ~SaQC.plot
+
diff --git a/docs/gettingstarted/TutorialAPI.rst b/docs/gettingstarted/TutorialAPI.rst
index 787056458b4786073c9396ec92a3651a0c20b982..1b322d6feb4b66ddd22a69b8ea7990929b7c04b5 100644
--- a/docs/gettingstarted/TutorialAPI.rst
+++ b/docs/gettingstarted/TutorialAPI.rst
@@ -50,7 +50,7 @@ with something more elaborate, is in fact a one line change. So let's start with
    from saqc import SaQC
 
    # we need some dummy data
-   values = np.array([12, 24, 36, 33, 89, 87, 45, 31, 18, 99])
+   values = np.array([12, 24, 36, 33, 89, 87, 45, 31, 18, 99], dtype="float")
    dates = pd.date_range(start="2020-01-01", periods=len(values), freq="D")
    data = pd.DataFrame({"a": values}, index=dates)
    # let's insert some constant values ...
@@ -103,32 +103,32 @@ number of different attributes, of which you likely might want to use the follow
 .. doctest:: python
 
    >>> qc.data  #doctest:+NORMALIZE_WHITESPACE
-                   a | 
-   ================= | 
-   2020-01-01   12.0 | 
-   2020-01-02   24.0 | 
-   2020-01-03   36.0 | 
-   2020-01-04   47.4 | 
-   2020-01-05   47.4 | 
-   2020-01-06   47.4 | 
-   2020-01-07   45.0 | 
-   2020-01-08   31.0 | 
-   2020-01-09  175.0 | 
-   2020-01-10   99.0 | 
+                   a |
+   ================= |
+   2020-01-01   12.0 |
+   2020-01-02   24.0 |
+   2020-01-03   36.0 |
+   2020-01-04   47.4 |
+   2020-01-05   47.4 |
+   2020-01-06   47.4 |
+   2020-01-07   45.0 |
+   2020-01-08   31.0 |
+   2020-01-09  175.0 |
+   2020-01-10   99.0 |
 
    >>> qc.flags  #doctest:+NORMALIZE_WHITESPACE
-                       a | 
-   ===================== | 
-   2020-01-01        BAD | 
-   2020-01-02  UNFLAGGED | 
-   2020-01-03  UNFLAGGED | 
-   2020-01-04  UNFLAGGED | 
-   2020-01-05  UNFLAGGED | 
-   2020-01-06  UNFLAGGED | 
-   2020-01-07  UNFLAGGED | 
-   2020-01-08  UNFLAGGED | 
-   2020-01-09        BAD | 
-   2020-01-10        BAD | 
+                       a |
+   ===================== |
+   2020-01-01        BAD |
+   2020-01-02  UNFLAGGED |
+   2020-01-03  UNFLAGGED |
+   2020-01-04  UNFLAGGED |
+   2020-01-05  UNFLAGGED |
+   2020-01-06  UNFLAGGED |
+   2020-01-07  UNFLAGGED |
+   2020-01-08  UNFLAGGED |
+   2020-01-09        BAD |
+   2020-01-10        BAD |
 
 
 Putting it together - The complete workflow
@@ -142,7 +142,7 @@ The snippet below provides you with a compete example from the things we have se
    from saqc import SaQC
 
    # we need some dummy data
-   values = np.random.randint(low=0, high=100, size=100)
+   values = np.random.randint(low=0, high=100, size=100).astype(float)
    dates = pd.date_range(start="2020-01-01", periods=len(values), freq="D")
    data = pd.DataFrame({"a": values}, index=dates)
    # let's insert some constant values ...
diff --git a/requirements.txt b/requirements.txt
index 7781576937d220e0069156b56207ff3913f327d7..027096da2a9a1ac17c0bb7b2718ef478728c37fa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,6 +4,7 @@
 
 Click==8.1.7
 docstring_parser==0.16
+fancy-collections==0.3.0
 fastdtw==0.3.4
 matplotlib==3.8.3
 numpy==1.26.4
@@ -13,4 +14,3 @@ pandas==2.2.1
 scikit-learn==1.4.1.post1
 scipy==1.12.0
 typing_extensions==4.10.0
-fancy-collections==0.2.1
diff --git a/saqc/__init__.py b/saqc/__init__.py
index 29e0b55c57f390c1c426c31e6da2d7a8c79c8de5..a801471cfe57053267271de325717b35e8a0233f 100644
--- a/saqc/__init__.py
+++ b/saqc/__init__.py
@@ -10,7 +10,13 @@
 from saqc.constants import BAD, DOUBTFUL, FILTER_ALL, FILTER_NONE, GOOD, UNFLAGGED
 from saqc.exceptions import ParsingError
 from saqc.core import Flags, DictOfSeries, SaQC
-from saqc.core.translation import DmpScheme, FloatScheme, PositionalScheme, SimpleScheme
+from saqc.core.translation import (
+    DmpScheme,
+    FloatScheme,
+    PositionalScheme,
+    SimpleScheme,
+    AnnotatedFloatScheme,
+)
 from saqc.parsing.reader import fromConfig
 from saqc.version import __version__
 
diff --git a/saqc/__main__.py b/saqc/__main__.py
index 9802e046eeca3412ddc611339473a8409ce1eac9..d5da0036369e3b1561e06b2cd38df59e4e5f9071 100644
--- a/saqc/__main__.py
+++ b/saqc/__main__.py
@@ -8,7 +8,6 @@
 
 from __future__ import annotations
 
-import json
 import logging
 from functools import partial
 from pathlib import Path
@@ -146,27 +145,27 @@ def main(
 
     saqc = cr.run()
 
-    data_result = saqc.data.to_pandas()
+    data_result = saqc.data
     flags_result = saqc.flags
-    if isinstance(flags_result, DictOfSeries):
-        flags_result = flags_result.to_pandas()
 
     if outfile:
-        data_result.columns = pd.MultiIndex.from_product(
-            [data_result.columns.tolist(), ["data"]]
-        )
-
-        if not isinstance(flags_result.columns, pd.MultiIndex):
-            flags_result.columns = pd.MultiIndex.from_product(
-                [flags_result.columns.tolist(), ["flags"]]
-            )
 
-        out = pd.concat([data_result, flags_result], axis=1).sort_index(
-            axis=1, level=0, sort_remaining=False
+        out = DictOfSeries()
+        for k in data_result.keys():
+            flagscol = flags_result[k]
+            if isinstance(flagscol, pd.Series):
+                flagscol = flagscol.rename("flags")
+            out[k] = pd.concat([data_result[k].rename("data"), flagscol], axis=1)
+
+        writeData(
+            writer,
+            out.to_pandas(
+                fill_value=-9999 if scheme == "positional" else np.nan,
+                multiindex=True,
+            ),
+            outfile,
         )
 
-        writeData(writer, out, outfile)
-
 
 if __name__ == "__main__":
     main()
diff --git a/saqc/core/core.py b/saqc/core/core.py
index 43448cd8ef05f2c8817f28c862750f2417715431..c669da5672c4a2f8e4db344605bcb5fd5f24193c 100644
--- a/saqc/core/core.py
+++ b/saqc/core/core.py
@@ -22,6 +22,7 @@ from saqc.core.frame import DictOfSeries
 from saqc.core.history import History
 from saqc.core.register import FUNC_MAP
 from saqc.core.translation import (
+    AnnotatedFloatScheme,
     DmpScheme,
     FloatScheme,
     PositionalScheme,
@@ -41,6 +42,7 @@ TRANSLATION_SCHEMES = {
     "float": FloatScheme,
     "dmp": DmpScheme,
     "positional": PositionalScheme,
+    "annotated-float": AnnotatedFloatScheme,
 }
 
 
@@ -118,13 +120,13 @@ class SaQC(FunctionsMixin):
         self._attrs = dict(value)
 
     @property
-    def data(self) -> MutableMapping[str, pd.Series]:
+    def data(self) -> DictOfSeries:
         data = self._data
         data.attrs = self._attrs.copy()
         return data
 
     @property
-    def flags(self) -> MutableMapping[str, pd.Series]:
+    def flags(self) -> DictOfSeries:
         flags = self._scheme.toExternal(self._flags, attrs=self._attrs)
         flags.attrs = self._attrs.copy()
         return flags
diff --git a/saqc/core/flags.py b/saqc/core/flags.py
index 1009f540d15fa8f1975d18978c4464ae0f79b1c4..e597e0ee184483abf593c815cd375897a29e0fe4 100644
--- a/saqc/core/flags.py
+++ b/saqc/core/flags.py
@@ -474,24 +474,6 @@ class Flags:
 
     # ----------------------------------------------------------------------
     # transformation and representation
-
-    def toDios(self) -> DictOfSeries:
-        """
-        Transform the flags container to a ``DictOfSeries``.
-
-        .. deprecated:: 2.4
-           use `saqc.DictOfSeries(obj)` instead.
-
-        Returns
-        -------
-        DictOfSeries
-        """
-        warnings.warn(
-            "toDios is deprecated, use `saqc.DictOfSeries(obj)` instead.",
-            category=DeprecationWarning,
-        )
-        return DictOfSeries(self).copy()
-
     def toFrame(self) -> pd.DataFrame:
         """
         Transform the flags container to a ``pd.DataFrame``.
diff --git a/saqc/core/frame.py b/saqc/core/frame.py
index acfe28ad3063f55ce5719f68cc6498ee40546a65..cd0346bd292b90f385b892720c14f5753522fe65 100644
--- a/saqc/core/frame.py
+++ b/saqc/core/frame.py
@@ -11,8 +11,8 @@ from fancy_collections import DictOfPandas
 
 
 class DictOfSeries(DictOfPandas):
-    _key_types = (str, int, float)
-    _value_types = (pd.Series,)
+    _key_types = (str, int, float, tuple)
+    _value_types = (pd.Series, pd.DataFrame)
 
     def __init__(self, *args, **kwargs):
         # data is needed to prevent an
@@ -35,42 +35,6 @@ class DictOfSeries(DictOfPandas):
     def attrs(self, value: Mapping[Hashable, Any]) -> None:
         self._attrs = dict(value)
 
-    def flatten(self, promote_index: bool = False) -> DictOfSeries:
-        """
-        Return a copy.
-        DictOfPandas compatibility
-        """
-        return self.copy()
-
-    def index_of(self, method="union") -> pd.Index:
-        """Return an index with indices from all columns.
-
-        .. deprecated:: 2.4
-           use `DictOfSeries.union_index()` and `DictOfSeries.shared_index()` instead.
-
-        Parameters
-        ----------
-        method : string, default 'all'
-            * 'union' : return the union of all indices from all columns
-            * 'shared' : return only indices that are present in every column
-            * 'all' : alias for 'union'
-            * 'intersection' : alias for 'shared'
-
-        See also
-        --------
-        DictOfSeries.to_pandas: convert a DictOfSeries to a pandas.DataFrame
-
-        Returns
-        -------
-        index: pd.Index
-            A duplicate-free index
-        """
-        if method in ["union", "all"]:
-            return self.union_index()
-        elif method in ["intersection", "shared"]:
-            return self.shared_index()
-        raise ValueError("method must be one of 'shared' or 'union'.")
-
     def astype(self, dtype: str | type) -> DictOfSeries:
         """
         Cast a DictOfSeries object to the specified ``dtype``
@@ -183,6 +147,6 @@ Missing data locations are filled with NaN's
 or is dropped if `how='inner'`
 
 >>> di.to_pandas(how='inner')   # doctest: +NORMALIZE_WHITESPACE
-      a     b     c
-1  11.0  22.0  33.0
+    a   b   c
+1  11  22  33
 """
diff --git a/saqc/core/translation/__init__.py b/saqc/core/translation/__init__.py
index fe2d85790a1f5d516c832527931c10ece45464a0..7549e43c599a7f838f710b03be734d8229707dda 100644
--- a/saqc/core/translation/__init__.py
+++ b/saqc/core/translation/__init__.py
@@ -5,11 +5,8 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 # -*- coding: utf-8 -*-
-from saqc.core.translation.basescheme import (
-    FloatScheme,
-    MappingScheme,
-    TranslationScheme,
-)
+from saqc.core.translation.basescheme import MappingScheme, TranslationScheme
 from saqc.core.translation.dmpscheme import DmpScheme
+from saqc.core.translation.floatscheme import AnnotatedFloatScheme, FloatScheme
 from saqc.core.translation.positionalscheme import PositionalScheme
 from saqc.core.translation.simplescheme import SimpleScheme
diff --git a/saqc/core/translation/basescheme.py b/saqc/core/translation/basescheme.py
index 66f9cb8db159faa7f4eccd46e78510d9e7a6a1ea..56bfa4cb286276791382f121684cb9ca43be158d 100644
--- a/saqc/core/translation/basescheme.py
+++ b/saqc/core/translation/basescheme.py
@@ -215,31 +215,3 @@ class MappingScheme(TranslationScheme):
         out = self._translate(flags, self._backward)
         out.attrs = attrs or {}
         return out
-
-
-class FloatScheme(TranslationScheme):
-    """
-    Acts as the default Translator, provides a changeable subset of the
-    internal float flags
-    """
-
-    DFILTER_DEFAULT: float = FILTER_ALL
-
-    def __call__(self, flag: float | int) -> float:
-        try:
-            return float(flag)
-        except (TypeError, ValueError, OverflowError):
-            raise ValueError(f"invalid flag, expected a numerical value, got: {flag}")
-
-    def toInternal(self, flags: pd.DataFrame | DictOfSeries) -> Flags:
-        try:
-            return Flags(flags.astype(float))
-        except (TypeError, ValueError, OverflowError):
-            raise ValueError(
-                f"invalid flag(s), expected a collection of numerical values, got: {flags}"
-            )
-
-    def toExternal(self, flags: Flags, attrs: dict | None = None) -> DictOfSeries:
-        out = DictOfSeries(flags)
-        out.attrs = attrs or {}
-        return out
diff --git a/saqc/core/translation/dmpscheme.py b/saqc/core/translation/dmpscheme.py
index 6ecd324d017847fb13cc6add3d2c41a7264d7849..fb68713d525520bda5369a497139fe8bb13d2a39 100644
--- a/saqc/core/translation/dmpscheme.py
+++ b/saqc/core/translation/dmpscheme.py
@@ -16,6 +16,7 @@ import pandas as pd
 
 from saqc import BAD, DOUBTFUL, GOOD, UNFLAGGED
 from saqc.core import Flags, History
+from saqc.core.frame import DictOfSeries
 from saqc.core.translation.basescheme import BackwardMap, ForwardMap, MappingScheme
 from saqc.lib.tools import getUnionIndex
 
@@ -68,29 +69,32 @@ class DmpScheme(MappingScheme):
     def __init__(self):
         super().__init__(forward=self._FORWARD, backward=self._BACKWARD)
 
-    def toHistory(self, field_flags: pd.DataFrame):
+    def toHistory(self, flags: pd.DataFrame):
         """
         Translate a single field of external ``Flags`` to a ``History``
         """
-        field_history = History(field_flags.index)
+        history = History(flags.index)
+
+        for (flag, cause, comment), values in flags.groupby(_QUALITY_LABELS):
+            if cause == "" and comment == "":
+                continue
 
-        for (flag, cause, comment), values in field_flags.groupby(_QUALITY_LABELS):
             try:
                 comment = json.loads(comment)
             except json.decoder.JSONDecodeError:
                 comment = {"test": "unknown", "comment": ""}
 
-            histcol = pd.Series(np.nan, index=field_flags.index)
-            histcol.loc[values.index] = self(flag)
+            column = pd.Series(np.nan, index=flags.index)
+            column.loc[values.index] = self(flag)
 
             meta = {
                 "func": comment["test"],
                 "kwargs": {"comment": comment["comment"], "cause": cause},
             }
-            field_history.append(histcol, meta=meta)
-        return field_history
+            history.append(column, meta=meta)
+        return history
 
-    def toInternal(self, df: pd.DataFrame) -> Flags:
+    def toInternal(self, flags: pd.DataFrame | DictOfSeries) -> Flags:
         """
         Translate from 'external flags' to 'internal flags'
 
@@ -104,18 +108,26 @@ class DmpScheme(MappingScheme):
         Flags object
         """
 
-        self.validityCheck(df)
+        if isinstance(flags, pd.DataFrame):
+            flags = DictOfSeries(flags)
+
+        self.validityCheck(flags)
 
         data = {}
 
-        for field in df.columns.get_level_values(0).drop_duplicates():
-            data[str(field)] = self.toHistory(df[field])
+        if isinstance(flags, pd.DataFrame):
+            fields = flags.columns.get_level_values(0).drop_duplicates()
+        else:
+            fields = flags.keys()
+
+        for field in fields:
+            data[str(field)] = self.toHistory(flags[field])
 
         return Flags(data)
 
     def toExternal(
         self, flags: Flags, attrs: dict | None = None, **kwargs
-    ) -> pd.DataFrame:
+    ) -> DictOfSeries:
         """
         Translate from 'internal flags' to 'external flags'
 
@@ -132,10 +144,7 @@ class DmpScheme(MappingScheme):
         """
         tflags = super().toExternal(flags, attrs=attrs)
 
-        out = pd.DataFrame(
-            index=getUnionIndex(tflags),
-            columns=pd.MultiIndex.from_product([flags.columns, _QUALITY_LABELS]),
-        )
+        out = DictOfSeries()
 
         for field in tflags.columns:
             df = pd.DataFrame(
@@ -163,13 +172,13 @@ class DmpScheme(MappingScheme):
                 df.loc[valid, "quality_comment"] = comment
                 df.loc[valid, "quality_cause"] = cause
 
-            out[field] = df.reindex(out.index)
+            out[field] = df
 
         self.validityCheck(out)
         return out
 
     @classmethod
-    def validityCheck(cls, df: pd.DataFrame) -> None:
+    def validityCheck(cls, flags: DictOfSeries) -> None:
         """
         Check wether the given causes and comments are valid.
 
@@ -177,22 +186,16 @@ class DmpScheme(MappingScheme):
         ----------
         df : external flags
         """
+        for df in flags.values():
 
-        cols = df.columns
-        if not isinstance(cols, pd.MultiIndex):
-            raise TypeError("DMP-Flags need multi-index columns")
-
-        if not cols.get_level_values(1).isin(_QUALITY_LABELS).all(axis=None):
-            raise TypeError(
-                f"DMP-Flags expect the labels {list(_QUALITY_LABELS)} in the secondary level"
-            )
+            if not df.columns.isin(_QUALITY_LABELS).all(axis=None):
+                raise TypeError(
+                    f"DMP-Flags expect the labels {list(_QUALITY_LABELS)} in the secondary level"
+                )
 
-        for field in df.columns.get_level_values(0):
-            # we might have NaN injected by DictOfSeries -> DataFrame conversions
-            field_df = df[field].dropna(how="all", axis="index")
-            flags = field_df["quality_flag"]
-            causes = field_df["quality_cause"]
-            comments = field_df["quality_comment"]
+            flags = df["quality_flag"]
+            causes = df["quality_cause"]
+            comments = df["quality_comment"]
 
             if not flags.isin(cls._FORWARD.keys()).all(axis=None):
                 raise ValueError(
diff --git a/saqc/core/translation/floatscheme.py b/saqc/core/translation/floatscheme.py
new file mode 100644
index 0000000000000000000000000000000000000000..55b19b6a038d198b936aead8cd72298a7a5abd30
--- /dev/null
+++ b/saqc/core/translation/floatscheme.py
@@ -0,0 +1,86 @@
+#! /usr/bin/env python
+
+# SPDX-FileCopyrightText: 2021 Helmholtz-Zentrum für Umweltforschung GmbH - UFZ
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -*- coding: utf-8 -*-
+
+from __future__ import annotations
+
+import numpy as np
+import pandas as pd
+
+from saqc.constants import FILTER_ALL, UNFLAGGED
+from saqc.core.flags import Flags
+from saqc.core.frame import DictOfSeries
+from saqc.core.history import History
+from saqc.core.translation.basescheme import TranslationScheme
+
+
+class FloatScheme(TranslationScheme):
+    """
+    Acts as the default Translator, provides a changeable subset of the
+    internal float flags
+    """
+
+    DFILTER_DEFAULT: float = FILTER_ALL
+
+    def __call__(self, flag: float | int) -> float:
+        try:
+            return float(flag)
+        except (TypeError, ValueError, OverflowError):
+            raise ValueError(f"invalid flag, expected a numerical value, got: {flag}")
+
+    def toInternal(self, flags: pd.DataFrame | DictOfSeries) -> Flags:
+        try:
+            return Flags(flags.astype(float))
+        except (TypeError, ValueError, OverflowError):
+            raise ValueError(
+                f"invalid flag(s), expected a collection of numerical values, got: {flags}"
+            )
+
+    def toExternal(self, flags: Flags, attrs: dict | None = None) -> DictOfSeries:
+        out = DictOfSeries(flags)
+        out.attrs = attrs or {}
+        return out
+
+
+class AnnotatedFloatScheme(FloatScheme):
+    def toExternal(self, flags: Flags, attrs: dict | None = None) -> DictOfSeries:
+
+        tflags = super().toExternal(flags, attrs=attrs)
+
+        out = DictOfSeries()
+        for field in tflags.columns:
+            df = pd.DataFrame(
+                {
+                    "flag": tflags[field],
+                    "func": "",
+                    "parameters": "",
+                }
+            )
+
+            history = flags.history[field]
+
+            for col in history.columns:
+                valid = (history.hist[col] != UNFLAGGED) & history.hist[col].notna()
+                meta = history.meta[col]
+                df.loc[valid, "func"] = meta["func"]
+                df.loc[valid, "parameters"] = str(meta["kwargs"])
+                out[field] = df
+
+        return out
+
+    def toInternal(self, flags: DictOfSeries) -> Flags:
+        data = {}
+        for key, frame in flags.items():
+            history = History(index=frame.index)
+            for (flag, func, kwargs), values in frame.groupby(
+                ["flag", "func", "parameters"]
+            ):
+                column = pd.Series(np.nan, index=frame.index)
+                column.loc[values.index] = self(flag)
+                history.append(column, meta={"func": func, "kwargs": kwargs})
+            data[key] = history
+        return Flags(data)
diff --git a/saqc/core/translation/positionalscheme.py b/saqc/core/translation/positionalscheme.py
index 6e4bbe4833663d14dd3b50927d0ed2ab6c00d821..2d12adfa565b9b2c3449e688bf5e225688dbbcb6 100644
--- a/saqc/core/translation/positionalscheme.py
+++ b/saqc/core/translation/positionalscheme.py
@@ -12,6 +12,7 @@ import pandas as pd
 
 from saqc.constants import BAD, DOUBTFUL, GOOD, UNFLAGGED
 from saqc.core import Flags, History
+from saqc.core.frame import DictOfSeries
 from saqc.core.translation.basescheme import BackwardMap, ForwardMap, MappingScheme
 
 
@@ -68,12 +69,11 @@ class PositionalScheme(MappingScheme):
             fflags = super()._translate(df, self._FORWARD)
             field_history = History(field_flags.index)
             for _, s in fflags.items():
-                field_history.append(s)
+                field_history.append(s.replace(UNFLAGGED, np.nan))
             data[str(field)] = field_history
-
         return Flags(data)
 
-    def toExternal(self, flags: Flags, **kwargs) -> pd.DataFrame:
+    def toExternal(self, flags: Flags, **kwargs) -> DictOfSeries:
         """
         Translate from 'internal flags' to 'external flags'
 
@@ -84,9 +84,9 @@ class PositionalScheme(MappingScheme):
 
         Returns
         -------
-        pd.DataFrame
+        DictOfSeries
         """
-        out = {}
+        out = DictOfSeries()
         for field in flags.columns:
             thist = flags.history[field].hist.replace(self._BACKWARD).astype(float)
             # concatenate the single flag values
@@ -95,6 +95,6 @@ class PositionalScheme(MappingScheme):
             bases = 10 ** np.arange(ncols - 1, -1, -1)
 
             tflags = init + (thist * bases).sum(axis=1)
-            out[field] = tflags
+            out[field] = tflags.fillna(-9999).astype(int)
 
-        return pd.DataFrame(out).fillna(-9999).astype(int)
+        return out
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
index 4ebb6580e2659c37739c218488ba2245395aace1..9f9765c6684cbb36dac6efc477e5345677f18ffd 100644
--- a/saqc/funcs/drift.py
+++ b/saqc/funcs/drift.py
@@ -362,7 +362,7 @@ class DriftMixin:
         for k, group in drift_grouper:
             data_series = group[field]
             data_fit, data_shiftTarget = _driftFit(
-                data_series, shift_targets.loc[k, :][0], cal_range, model
+                data_series, shift_targets.loc[k, :].iloc[0], cal_range, model
             )
             data_fit = pd.Series(data_fit, index=group.index)
             data_shiftTarget = pd.Series(data_shiftTarget, index=group.index)
diff --git a/saqc/funcs/flagtools.py b/saqc/funcs/flagtools.py
index 19862377fd47e4e8e11ebd534450bd3b39413717..d7ea5015a993c589355ae2c520a69ba4af2943fb 100644
--- a/saqc/funcs/flagtools.py
+++ b/saqc/funcs/flagtools.py
@@ -299,7 +299,7 @@ class FlagtoolsMixin:
            dtype: bool
         """
         warnings.warn(
-            "`flagManual` is deprecated and will be removed in version 2.9 of saqc. "
+            "`flagManual` is deprecated and will be removed in version 2.8 of saqc. "
             "Please use `setFlags` for similar functionality.",
             DeprecationWarning,
         )
@@ -368,7 +368,7 @@ class FlagtoolsMixin:
             raise ValueError(method)
 
         mask = mdata == mflag
-        mask = mask.reindex(dat.index).fillna(False)
+        mask = mask.reindex(dat.index, fill_value=False)  # .fillna(False)
 
         self._flags[mask, field] = flag
         return self
@@ -631,7 +631,12 @@ class FlagtoolsMixin:
         **kwargs,
     ) -> "SaQC":
         """
-        Flag all values, if all the given ``field`` values are already flagged.
+        Logical AND operation for Flags.
+
+        Flag the variable(s) `field` at every period, at wich `field` in all of the saqc objects in
+        `group` is flagged.
+
+        See Examples section for examples.
 
         Parameters
         ----------
@@ -639,6 +644,64 @@ class FlagtoolsMixin:
             A collection of ``SaQC`` objects. Flag checks are performed on all ``SaQC`` objects
             based on the variables specified in ``field``. Whenever all monitored variables
             are flagged, the associated timestamps will receive a flag.
+
+        Examples
+        --------
+        Flag data, if the values are above a certain threshold (determined by :py:meth:`~saqc.SaQC.flagRange`) AND if the values are
+        constant for 3 periods (determined by :py:meth:`~saqc.SaQC.flagConstants`)
+
+        .. doctest:: andGroupExample
+
+           >>> dat = pd.Series([1,0,0,0,1,2,3,4,5,5,5,4], name='data', index=pd.date_range('2000', freq='10min', periods=12))
+           >>> qc = saqc.SaQC(dat)
+           >>> qc = qc.andGroup('data', group=[qc.flagRange('data', max=4), qc.flagConstants('data', thresh=0, window=3)])
+           >>> qc.flags['data']
+           2000-01-01 00:00:00     -inf
+           2000-01-01 00:10:00     -inf
+           2000-01-01 00:20:00     -inf
+           2000-01-01 00:30:00     -inf
+           2000-01-01 00:40:00     -inf
+           2000-01-01 00:50:00     -inf
+           2000-01-01 01:00:00     -inf
+           2000-01-01 01:10:00     -inf
+           2000-01-01 01:20:00    255.0
+           2000-01-01 01:30:00    255.0
+           2000-01-01 01:40:00    255.0
+           2000-01-01 01:50:00     -inf
+           Freq: 10min, dtype: float64
+
+        Masking data, so that a test result only gets assigned during daytime (between 6 and 18 o clock for example).
+        The daytime condition is generated via :py:meth:`~saqc.SaQC.flagGeneric`:
+
+        .. doctest:: andGroupExample
+
+           >>> from saqc.lib.tools import periodicMask
+               >>> mask_func = lambda x: ~periodicMask(x.index, '06:00:00', '18:00:00', True)
+           >>> dat = pd.Series(range(100), name='data', index=pd.date_range('2000', freq='4h', periods=100))
+           >>> qc = saqc.SaQC(dat)
+           >>> qc = qc.andGroup('data', group=[qc.flagRange('data', max=5), qc.flagGeneric('data', func=mask_func)])
+           >>> qc.flags['data'].head(20)
+           2000-01-01 00:00:00     -inf
+           2000-01-01 04:00:00     -inf
+           2000-01-01 08:00:00     -inf
+           2000-01-01 12:00:00     -inf
+           2000-01-01 16:00:00     -inf
+           2000-01-01 20:00:00     -inf
+           2000-01-02 00:00:00     -inf
+           2000-01-02 04:00:00     -inf
+           2000-01-02 08:00:00    255.0
+           2000-01-02 12:00:00    255.0
+           2000-01-02 16:00:00    255.0
+           2000-01-02 20:00:00     -inf
+           2000-01-03 00:00:00     -inf
+           2000-01-03 04:00:00     -inf
+           2000-01-03 08:00:00    255.0
+           2000-01-03 12:00:00    255.0
+           2000-01-03 16:00:00    255.0
+           2000-01-03 20:00:00     -inf
+           2000-01-04 00:00:00     -inf
+           2000-01-04 04:00:00     -inf
+           Freq: 4h, dtype: float64
         """
         return _groupOperation(
             saqc=self,
@@ -666,7 +729,12 @@ class FlagtoolsMixin:
         **kwargs,
     ) -> "SaQC":
         """
-        Flag all values, if at least one of the given ``field`` values is already flagged.
+        Logical OR operation for Flags.
+
+        Flag the variable(s) `field` at every period, at wich `field` is flagged in at least one of the saqc objects
+        in `group`.
+
+        See Examples section for examples.
 
         Parameters
         ----------
@@ -674,6 +742,32 @@ class FlagtoolsMixin:
             A collection of ``SaQC`` objects. Flag checks are performed on all ``SaQC`` objects
             based on the variables specified in :py:attr:`field`. Whenever any of monitored variables
             is flagged, the associated timestamps will receive a flag.
+
+        Examples
+        --------
+        Flag data, if the values are above a certain threshold (determined by :py:meth:`~saqc.SaQC.flagRange`) OR if the values are
+        constant for 3 periods (determined by :py:meth:`~saqc.SaQC.flagConstants`)
+
+        .. doctest:: orGroupExample
+
+           >>> dat = pd.Series([1,0,0,0,0,2,3,4,5,5,7,8], name='data', index=pd.date_range('2000', freq='10min', periods=12))
+           >>> qc = saqc.SaQC(dat)
+           >>> qc = qc.orGroup('data', group=[qc.flagRange('data', max=5), qc.flagConstants('data', thresh=0, window=3)])
+           >>> qc.flags['data']
+           2000-01-01 00:00:00     -inf
+           2000-01-01 00:10:00    255.0
+           2000-01-01 00:20:00    255.0
+           2000-01-01 00:30:00    255.0
+           2000-01-01 00:40:00    255.0
+           2000-01-01 00:50:00     -inf
+           2000-01-01 01:00:00     -inf
+           2000-01-01 01:10:00     -inf
+           2000-01-01 01:20:00     -inf
+           2000-01-01 01:30:00     -inf
+           2000-01-01 01:40:00    255.0
+           2000-01-01 01:50:00    255.0
+           Freq: 10min, dtype: float64
+
         """
         return _groupOperation(
             saqc=self,
diff --git a/saqc/funcs/interpolation.py b/saqc/funcs/interpolation.py
index 1e7ed3add6b85f234fb3ceed5ef5a8b569a5b373..481c87d725d55acad95ca63b91413453953fc31a 100644
--- a/saqc/funcs/interpolation.py
+++ b/saqc/funcs/interpolation.py
@@ -34,25 +34,6 @@ if TYPE_CHECKING:
     from saqc import SaQC
 
 
-# TODO: remove, when `interpolateIndex` and `interpolateInvalid are removed`
-INTERPOLATION_METHODS = Literal[
-    "linear",
-    "time",
-    "nearest",
-    "zero",
-    "slinear",
-    "quadratic",
-    "cubic",
-    "spline",
-    "barycentric",
-    "polynomial",
-    "krogh",
-    "piecewise_polynomial",
-    "spline",
-    "pchip",
-    "akima",
-]
-
 DATA_REINDEXER = {"fshift": "last", "bshift": "first", "nshift": "first"}
 
 
@@ -176,184 +157,6 @@ class InterpolationMixin:
 
         return self
 
-    @register(
-        mask=["field"],
-        demask=[],
-        squeeze=[],  # func handles history by itself
-    )
-    def interpolate(
-        self: "SaQC",
-        field: str,
-        method: INTERPOLATION_METHODS = "time",
-        order: int = 2,
-        limit: int | str | None = None,
-        extrapolate: Literal["forward", "backward", "both"] | None = None,
-        flag: float = UNFLAGGED,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        Fill NaN and flagged values using an interpolation method.
-
-        .. deprecated:: 2.4.0
-           Use :py:meth:`~saqc.SaQC.align` instead.
-
-        Parameters
-        ----------
-        method :
-            Interpolation technique to use. One of:
-
-            * ‘linear’: Ignore the index and treat the values as equally spaced.
-            * ‘time’: Works on daily and higher resolution data to interpolate given length of interval.
-            * ‘index’, ‘values’: Use the actual numerical values of the index.
-            * ‘pad’: Fill in NaNs using existing values.
-            * ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘spline’, ‘barycentric’, ‘polynomial’:
-              Passed to scipy.interpolate.interp1d. These methods use the numerical values of the index.
-              Both ‘polynomial’ and ‘spline’ require that you also specify an order (int), e.g.
-              ``qc.interpolate(method='polynomial', order=5)``.
-            * ‘krogh’, ‘spline’, ‘pchip’, ‘akima’, ‘cubicspline’:
-              Wrappers around the SciPy interpolation methods of similar names.
-            * ‘from_derivatives’: Refers to scipy.interpolate.BPoly.from_derivatives
-
-        order :
-            Order of the interpolation method, ignored if not supported
-            by the chosen ``method``
-
-        limit :
-            Maximum number of missing values to interpolate. Only gaps
-            smaller than ``limit`` will be filled. The gap size can be
-            given as a number of values (integer) or a temporal extensions
-            (offset string). With ``None``, all missing values will be
-            interpolated.
-
-        extrapolate :
-            Use parameter to perform extrapolation instead of interpolation
-            onto the trailing and/or leading chunks of NaN values in data series.
-
-            * 'None' (default) - perform interpolation
-            * 'forward'/'backward' - perform forward/backward extrapolation
-            * 'both' - perform forward and backward extrapolation
-
-        Examples
-        --------
-        See some examples of the keyword interplay below:
-
-        Lets generate some dummy data:
-
-        .. doctest:: interpolate
-
-           >>> data = pd.DataFrame({'data':np.array([np.nan, 0, np.nan, np.nan, np.nan, 4, 5, np.nan, np.nan, 8, 9, np.nan, np.nan])}, index=pd.date_range('2000',freq='1H', periods=13))
-           >>> data
-                                data
-           2000-01-01 00:00:00   NaN
-           2000-01-01 01:00:00   0.0
-           2000-01-01 02:00:00   NaN
-           2000-01-01 03:00:00   NaN
-           2000-01-01 04:00:00   NaN
-           2000-01-01 05:00:00   4.0
-           2000-01-01 06:00:00   5.0
-           2000-01-01 07:00:00   NaN
-           2000-01-01 08:00:00   NaN
-           2000-01-01 09:00:00   8.0
-           2000-01-01 10:00:00   9.0
-           2000-01-01 11:00:00   NaN
-           2000-01-01 12:00:00   NaN
-
-        Use :py:meth:`~saqc.SaQC.interpolate` to do linear interpolation
-        of up to 2 consecutive missing values:
-
-        .. doctest:: interpolate
-
-           >>> qc = saqc.SaQC(data)
-           >>> qc = qc.interpolate("data", limit=3, method='time')
-           >>> qc.data # doctest:+NORMALIZE_WHITESPACE
-                               data |
-           ======================== |
-           2000-01-01 00:00:00  NaN |
-           2000-01-01 01:00:00  0.0 |
-           2000-01-01 02:00:00  NaN |
-           2000-01-01 03:00:00  NaN |
-           2000-01-01 04:00:00  NaN |
-           2000-01-01 05:00:00  4.0 |
-           2000-01-01 06:00:00  5.0 |
-           2000-01-01 07:00:00  6.0 |
-           2000-01-01 08:00:00  7.0 |
-           2000-01-01 09:00:00  8.0 |
-           2000-01-01 10:00:00  9.0 |
-           2000-01-01 11:00:00  NaN |
-           2000-01-01 12:00:00  NaN |
-           <BLANKLINE>
-
-
-        Use :py:meth:`~saqc.SaQC.interpolate` to do linear extrapolaiton
-        of up to 1 consecutive missing values:
-
-        .. doctest:: interpolate
-
-           >>> qc = saqc.SaQC(data)
-           >>> qc = qc.interpolate("data", limit=2, method='time', extrapolate='both')
-           >>> qc.data # doctest:+NORMALIZE_WHITESPACE
-                               data |
-           ======================== |
-           2000-01-01 00:00:00  0.0 |
-           2000-01-01 01:00:00  0.0 |
-           2000-01-01 02:00:00  NaN |
-           2000-01-01 03:00:00  NaN |
-           2000-01-01 04:00:00  NaN |
-           2000-01-01 05:00:00  4.0 |
-           2000-01-01 06:00:00  5.0 |
-           2000-01-01 07:00:00  NaN |
-           2000-01-01 08:00:00  NaN |
-           2000-01-01 09:00:00  8.0 |
-           2000-01-01 10:00:00  9.0 |
-           2000-01-01 11:00:00  NaN |
-           2000-01-01 12:00:00  NaN |
-           <BLANKLINE>
-        """
-        if limit is not None:
-            validateWindow(limit, "limit")
-
-        validateValueBounds(order, "order", left=0, strict_int=True)
-        validateChoice(
-            extrapolate, "extrapolate", ["forward", "backward", "both", None]
-        )
-
-        if "freq" in kwargs:
-            # the old interpolate version
-            warnings.warn(
-                f"The method `interpolate` is deprecated and will be removed "
-                f"in version 2.7 of saqc. To achieve the same behaviour "
-                f"please use: `qc.align(field={field}, freq={kwargs['freq']}, "
-                f"method={method}, order={order}, flag={flag})`",
-                DeprecationWarning,
-            )
-            return self.align(
-                field=field,
-                freq=kwargs.pop("freq", method),
-                method=method,
-                order=order,
-                flag=flag,
-                **kwargs,
-            )
-
-        inter_data = interpolateNANs(
-            self._data[field],
-            method,
-            order=order,
-            gap_limit=limit,
-            extrapolate=extrapolate,
-        )
-
-        interpolated = self._data[field].isna() & inter_data.notna()
-        self._data[field] = inter_data
-        new_col = pd.Series(np.nan, index=self._flags[field].index)
-        new_col.loc[interpolated] = np.nan if flag is None else flag
-
-        # todo kwargs must have all passed args except data,field,flags
-        self._flags.history[field].append(
-            new_col, {"func": "interpolateInvalid", "args": (), "kwargs": kwargs}
-        )
-        return self
-
     @register(mask=["field"], demask=[], squeeze=[])
     def align(
         self: "SaQC",
@@ -436,131 +239,6 @@ class InterpolationMixin:
         )
         return self
 
-    # ============================================================
-    ### Deprecated functions
-    # ============================================================
-
-    @register(mask=["field"], demask=[], squeeze=[])
-    def interpolateIndex(
-        self: "SaQC",
-        field: str,
-        freq: str,
-        method: INTERPOLATION_METHODS,
-        order: int = 2,
-        limit: int | None = 2,
-        extrapolate: Literal["forward", "backward", "both"] = None,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        Function to interpolate the data at regular (equidistant)
-        timestamps also known as or grid points.
-
-            .. deprecated:: 2.4.0
-               Use :py:meth:`~saqc.SaQC.align` instead.
-
-        Parameters
-        ----------
-        freq :
-            An Offset String, interpreted as the frequency of
-            the grid you want to interpolate your data to.
-
-        method :
-            The interpolation method you want to apply.
-
-        order :
-            If your selected interpolation method can be performed at
-            different 'orders' - here you pass the desired order.
-
-        limit :
-            Upper limit of missing index values (with respect to ``freq``)
-            to fill. The limit can either be expressed as the number of
-            consecutive missing values (integer) or temporal extension
-            of the gaps to be filled (Offset String). If ``None`` is passed,
-            no limit is set.
-
-        extrapolate :
-            Use parameter to perform extrapolation instead of interpolation
-            onto the trailing and/or leading chunks of NaN values in data
-            series.
-
-            * ``None`` (default) - perform interpolation
-            * ``'forward'``/``'backward'`` - perform forward/backward extrapolation
-            * ``'both'`` - perform forward and backward extrapolation
-        """
-        call = (
-            f'qc.align(field="{field}", freq="{freq}", method="{method}", '
-            f'order={order}, extrapolate="{extrapolate}")'
-        )
-        if limit != 2:
-            call = (
-                f'qc.interpolate(field="{field}", method="{method}", '
-                f'order="{order}", limit="{limit}", extrapolate="{extrapolate}")'
-            )
-        warnings.warn(
-            f"The method interpolateIndex is deprectated and will be removed with SaQC==3.0. Use `{call}` instead",
-            DeprecationWarning,
-        )
-
-        # HINT: checking is delegated to called functions
-
-        out = self.align(
-            field=field,
-            freq=freq,
-            method=method,
-            order=order,
-            extrapolate=extrapolate,
-            **kwargs,
-        )
-        if limit != 2:
-            out = out.interpolate(
-                field=field,
-                freq=freq,
-                method=method,
-                order=order,
-                limit=limit,
-                extrapolate=extrapolate,
-                **kwargs,
-            )
-        return out
-
-    @register(
-        mask=["field"],
-        demask=["field"],
-        squeeze=[],  # func handles history by itself
-    )
-    def interpolateInvalid(
-        self: "SaQC",
-        field: str,
-        method: INTERPOLATION_METHODS,
-        order: int = 2,
-        limit: int | None = None,
-        extrapolate: Literal["forward", "backward", "both"] | None = None,
-        flag: float = UNFLAGGED,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        .. deprecated:: 2.4.0
-           Use :py:meth:`~saqc.SaQC.interpolate` instead.
-        """
-        warnings.warn(
-            "The method `intepolateInvalid` is deprecated and will be removed "
-            "with version 2.7 of saqc. To achieve the same behavior, please "
-            f"use `qc.interpolate(field={field}, method={method}, order={order}, "
-            f"limit={limit}, extrapolate={extrapolate}, flag={flag})`",
-            DeprecationWarning,
-        )
-
-        # HINT: checking is delegated to called function
-        return self.interpolate(
-            field=field,
-            method=method,
-            order=order,
-            limit=limit,
-            extrapolate=extrapolate,
-            flag=flag,
-            **kwargs,
-        )
-
 
 def _shift(
     saqc: "SaQC",
diff --git a/saqc/funcs/noise.py b/saqc/funcs/noise.py
index 4afc99f338b9705a75a8f5b27f5969d4b1e8d234..7e18add8f6477fb6e57f4f3614331a402ead144d 100644
--- a/saqc/funcs/noise.py
+++ b/saqc/funcs/noise.py
@@ -54,6 +54,9 @@ class NoiseMixin:
         2. all (maybe overlapping) sub-chunks of the data chunks with length ``sub_window``,
            exceed ``sub_thresh`` with regard to ``func``
 
+            .. deprecated:: 2.5.0
+               Deprecated Function. See :py:meth:`~saqc.SaQC.flagByScatterLowpass`.
+
         Parameters
         ----------
         func :
diff --git a/saqc/funcs/outliers.py b/saqc/funcs/outliers.py
index 0ff5a8c52c21004f90313d42e17bdc4db12c47ad..5ea2e2ddaebd91074d58b427a05b762538825ef8 100644
--- a/saqc/funcs/outliers.py
+++ b/saqc/funcs/outliers.py
@@ -579,6 +579,9 @@ class OutliersMixin:
         hydrological data. See the notes section for an overview over the algorithms
         basic steps.
 
+            .. deprecated:: 2.6.0
+               Deprecated Function. Please refer to :py:meth:`~saqc.SaQC.flagByStray`.
+
         Parameters
         ----------
         trafo :
@@ -725,9 +728,9 @@ class OutliersMixin:
 
         warnings.warn(
             """
-                FlagMVScores is deprecated and will be removed with Version 2.8.
-                To replicate the function, transform the different fields involved 
-                via explicit applications of some transformations, than calculate the 
+                flagMVScores is deprecated and will be removed with Version 2.8.
+                To replicate the function, transform the different fields involved
+                via explicit applications of some transformations, than calculate the
                 kNN scores via `saqc.SaQC.assignkNScores` and finally assign the STRAY
                 algorithm via `saqc.SaQC.flagByStray`.
                 """,
@@ -861,10 +864,10 @@ class OutliersMixin:
 
         warnings.warn(
             "The function flagRaise is deprecated with no 100% exact replacement function."
-            "When looking for changes in the value course, the use of flagraise can be replicated and more easily aimed "
-            "for, via the method flagJump.\n"
+            "When looking for changes in the value course, the use of flagRaise can be replicated and more "
+            "easily aimed for, via the method flagJump.\n"
             "When looking for raises to outliers or plateaus, use one of: "
-            "flagZScore(outliers), flagUniLOF (outliers and small plateaus) or flagOffset(Plateaus)",
+            "flagZScore (outliers), flagUniLOF (outliers and small plateaus) or flagOffset (plateaus)",
             DeprecationWarning,
         )
 
@@ -971,6 +974,10 @@ class OutliersMixin:
 
         See references [1] for more details on the algorithm.
 
+            .. deprecated:: 2.6.0
+               Deprecated Function. Please refer to :py:meth:`~saqc.SaQC.flagZScore`.
+
+
         Note
         ----
         Data needs to be sampled at a regular equidistant time grid.
@@ -1082,7 +1089,7 @@ class OutliersMixin:
            import matplotlib
            import saqc
            import pandas as pd
-           data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
+           data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1h', periods=12))
 
 
         Lets generate a simple, regularly sampled timeseries with an hourly sampling rate and generate an
@@ -1091,7 +1098,7 @@ class OutliersMixin:
         .. doctest:: flagOffsetExample
 
            >>> import saqc
-           >>> data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1H', periods=12))
+           >>> data = pd.DataFrame({'data':np.array([5,5,8,16,17,7,4,4,4,1,1,4])}, index=pd.date_range('2000',freq='1h', periods=12))
            >>> data
                                 data
            2000-01-01 00:00:00     5
@@ -1115,7 +1122,7 @@ class OutliersMixin:
 
         .. doctest:: flagOffsetExample
 
-           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6h')
            >>> qc.plot('data')  # doctest: +SKIP
 
         .. plot::
@@ -1123,7 +1130,7 @@ class OutliersMixin:
            :include-source: False
 
            >>> qc = saqc.SaQC(data)
-           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, tolerance=1.5, window='6h')
            >>> qc.plot('data')  # doctest: +SKIP
 
         Note, that both, negative and positive jumps are considered starting points of negative or positive
@@ -1132,7 +1139,7 @@ class OutliersMixin:
 
         .. doctest:: flagOffsetExample
 
-           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6h')
            >>> qc.plot('data') # doctest:+SKIP
 
         .. plot::
@@ -1140,7 +1147,7 @@ class OutliersMixin:
            :include-source: False
 
            >>> qc = saqc.SaQC(data)
-           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=.9, tolerance=1.5, window='6h')
            >>> qc.plot('data')  # doctest: +SKIP
 
         Now, only positive jumps, that exceed a value gain of +90%* are considered starting points of offsets.
@@ -1151,7 +1158,7 @@ class OutliersMixin:
 
         .. doctest:: flagOffsetExample
 
-           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6h')
            >>> qc.plot('data') # doctest:+SKIP
 
         .. plot::
@@ -1159,7 +1166,7 @@ class OutliersMixin:
            :include-source: False
 
            >>> qc = saqc.SaQC(data)
-           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6H')
+           >>> qc = qc.flagOffset("data", thresh=2, thresh_relative=-.5, tolerance=1.5, window='6h')
            >>> qc.plot('data')  # doctest: +SKIP
         """
         validateWindow(window)
@@ -1262,8 +1269,8 @@ class OutliersMixin:
         """
 
         warnings.warn(
-            "The function flagGrubbs is deprecated due to its inferior performance, with no 100% exact replacement function."
-            "When looking for outliers use one of: "
+            "The function flagByGrubbs is deprecated due to its inferior performance, with "
+            "no 100% exact replacement function. When looking for outliers use one of: "
             "flagZScore, flagUniLOF",
             DeprecationWarning,
         )
@@ -1325,85 +1332,6 @@ class OutliersMixin:
         self._flags[to_flag, field] = flag
         return self
 
-    @register(
-        mask=["field"],
-        demask=["field"],
-        squeeze=["field"],
-        multivariate=True,
-        handles_target=False,
-        docstring={"field": DOC_TEMPLATES["field"]},
-    )
-    def flagCrossStatistics(
-        self: "SaQC",
-        field: Sequence[str],
-        thresh: float,
-        method: Literal["modZscore", "Zscore"] = "modZscore",
-        flag: float = BAD,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        Function checks for outliers relatively to the "horizontal" input data axis.
-
-        Notes
-        -----
-        The input variables dont necessarily have to be aligned. If the variables are unaligned, scoring
-        and flagging will only be performed on the subset of indices shared among all input variables.
-
-        For :py:attr:`field` :math:`=[f_1,f_2,...,f_N]` and timestamps :math:`[t_1,t_2,...,t_K]`,
-        the following steps are taken for outlier detection:
-
-        1. All timestamps :math:`t_i`, where there is one :math:`f_k`, with :math:`data[f_K]` having no
-           entry at :math:`t_i`, are excluded from the following process (inner join of the :math:`f_i` fields.)
-        2. for every :math:`0 <= i <= K`, the value
-           :math:`m_j = median(\\{data[f_1][t_i], data[f_2][t_i], ..., data[f_N][t_i]\\})` is calculated
-        3. for every :math:`0 <= i <= K`, the set
-           :math:`\\{data[f_1][t_i] - m_j, data[f_2][t_i] - m_j, ..., data[f_N][t_i] - m_j\\}` is tested for
-           outliers with the specified algorithm (:py:attr:`method` parameter).
-
-        Parameters
-        ----------
-        thresh :
-            Threshold which the outlier score of an value must exceed, for being flagged an outlier.
-
-        method :
-            Method used for calculating the outlier scores.
-
-            * ``'modZscore'``: Median based "sigma"-ish approach. See References [1].
-            * ``'Zscore'``: Score values by how many times the standard deviation they differ from the
-              median. See References [1].
-
-
-        References
-        ----------
-        [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm
-        """
-        new_method_string = {
-            "modZscore": "modified",
-            "Zscore": "standard",
-            np.mean: "standard",
-            np.median: "modified",
-        }
-        call = (
-            f"qc.flagZScore(field={field}, window=1, "
-            f"method={new_method_string[method]}, "
-            f"thresh={thresh}, axis=1)"
-        )
-        warnings.warn(
-            f"The method `flagCrossStatistics` is deprecated and will "
-            f"be removed in verion 2.7 of saqc. To achieve the same behavior "
-            f"use:`{call}`",
-            DeprecationWarning,
-        )
-
-        return self.flagZScore(
-            field=field,
-            window=1,
-            method=new_method_string[method],
-            thresh=thresh,
-            axis=1,
-            flag=flag,
-        )
-
     @register(
         mask=["field"],
         demask=["field"],
diff --git a/saqc/funcs/resampling.py b/saqc/funcs/resampling.py
index cb2840f767dc42e8d8e274efacf6913b840a861e..168d16fc0f41b803c281974e2b9d26b38185cb2d 100644
--- a/saqc/funcs/resampling.py
+++ b/saqc/funcs/resampling.py
@@ -321,90 +321,6 @@ class ResamplingMixin:
                     )
         return idx, idx_source, datcol
 
-    @register(mask=["field"], demask=[], squeeze=[])
-    def linear(
-        self: "SaQC",
-        field: str,
-        freq: str,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        A method to "regularize" data by interpolating linearly the data
-        at regular timestamp.
-
-            .. deprecated:: 2.4.0
-               Use :py:meth:`~saqc.SaQC.align` with ``method="linear"``
-               instead.
-
-        A series of data is considered "regular", if it is sampled regularly
-        (= having uniform sampling rate). Interpolated values will get
-        assigned the worst flag within freq-range. Note, that the data
-        only gets interpolated at those (regular) timestamps, that have
-        a valid (existing and not-na) datapoint preceeding them and one
-        succeeding them within freq range. Regular timestamp that do
-        not suffice this condition get nan assigned AND The associated
-        flag will be of value ``UNFLAGGED``.
-
-        Parameters
-        ----------
-        freq :
-            An offset string. The frequency of the grid you want to interpolate
-            your data at.
-        """
-        warnings.warn(
-            f"""
-            The method `shift` is deprecated and will be removed with version 2.6 of saqc.
-            To achieve the same behavior please use:
-            `qc.align(field={field}, freq={freq}. method="linear")`
-            """,
-            DeprecationWarning,
-        )
-        reserved = ["method", "order", "limit", "downgrade"]
-        kwargs = filterKwargs(kwargs, reserved)
-        return self.interpolateIndex(field, freq, "time", **kwargs)
-
-    @register(mask=["field"], demask=[], squeeze=[])
-    def shift(
-        self: "SaQC",
-        field: str,
-        freq: str,
-        method: Literal["fshift", "bshift", "nshift"] = "nshift",
-        **kwargs,
-    ) -> "SaQC":
-        """
-        Shift data points and flags to a regular frequency grid.
-
-            .. deprecated:: 2.4.0
-               Use :py:meth:`~saqc.SaQC.align` instead.
-
-        Parameters
-        ----------
-        freq :
-            Offset string. Sampling rate of the target frequency.
-
-        method :
-            Method to propagate values:
-
-            * 'nshift' : shift grid points to the nearest time stamp in the range = +/- 0.5 * ``freq``
-            * 'bshift' : shift grid points to the first succeeding time stamp (if any)
-            * 'fshift' : shift grid points to the last preceeding time stamp (if any)
-        """
-        warnings.warn(
-            f"""
-            The method `shift` is deprecated and will be removed with version 2.6 of saqc.
-            To achieve the same behavior please use: `qc.align(field={field}, freq={freq}. method={method})`
-            """,
-            DeprecationWarning,
-        )
-        validateChoice(method, "method", ["fshift", "bshift", "nshift"])
-        return self.reindex(
-            field=field,
-            index=freq,
-            method=method,
-            data_aggregation=DATA_REINDEXER[method],
-            **kwargs,
-        )
-
     @register(mask=["field"], demask=[], squeeze=[])
     def resample(
         self: "SaQC",
@@ -984,7 +900,7 @@ class ResamplingMixin:
         if method.split("_")[0] == "inverse":
             warnings.warn(
                 f""" Referring to a method that would invert a method 'A` via 'inverse_A' is deprecated and will
-                be removed in a future release. Please use method={method.split('_')[-1]} together 
+                be removed in version 2.7. Please use method={method.split('_')[-1]} together
                 with invert=True.
                 """,
                 DeprecationWarning,
@@ -995,7 +911,7 @@ class ResamplingMixin:
         if method == "match":
             warnings.warn(
                 f"The method 'match' is deprecated and will be removed "
-                f"in version 2.8 of SaQC. Please use `SaQC.transferFlags(field={field}, "
+                f"in version 2.7 of SaQC. Please use `SaQC.transferFlags(field={field}, "
                 f"target={target}, squeeze={squeeze}, overwrite={override})` instead",
                 DeprecationWarning,
             )
diff --git a/saqc/funcs/rolling.py b/saqc/funcs/rolling.py
index de872d506ed8046ae356e00a52805eb026af9ddf..dfcdfd23cd747f59b6ccece0a89ffd2e847d2fd3 100644
--- a/saqc/funcs/rolling.py
+++ b/saqc/funcs/rolling.py
@@ -116,69 +116,6 @@ class RollingMixin:
                 )
         return self
 
-    @register(mask=["field"], demask=[], squeeze=[])
-    def roll(
-        self: "SaQC",
-        field: str,
-        window: Union[str, int],
-        func: Callable[[pd.Series], np.ndarray] = np.mean,
-        min_periods: int = 0,
-        center: bool = True,
-        **kwargs,
-    ) -> "SaQC":
-        """
-        Calculate a rolling-window function on the data.
-
-        .. deprecated:: 2.4.0
-           Use :py:meth:`~saqc.SaQC.rolling` instead.
-
-        Note, that the data gets assigned the worst flag present in the original data.
-
-        Parameters
-        ----------
-        window :
-            The size of the window you want to roll with. If an integer is passed, the size
-            refers to the number of periods for every fitting window. If an offset string
-            is passed, the size refers to the total temporal extension. For regularly
-            sampled timeseries, the period number will be casted down to an odd number if
-            ``center=True``.
-
-        func : default mean
-            Function to roll with.
-
-        min_periods :
-            The minimum number of periods to get a valid value
-
-        center :
-            If True, center the rolling window.
-
-        """
-        import warnings
-
-        warnings.warn(
-            """The function `roll` was renamed to `rolling` and will be removed with version 3.0 of saqc
-            Please use `SaQC.rolling` with the same arguments, instead
-            """,
-            DeprecationWarning,
-        )
-
-        validateFuncSelection(func, allow_operator_str=True)
-        validateWindow(window)
-        validateMinPeriods(min_periods)
-
-        # HINT: checking in  _roll
-        self._data, self._flags = _roll(
-            data=self._data,
-            field=field,
-            flags=self._flags,
-            window=window,
-            func=func,
-            min_periods=min_periods,
-            center=center,
-            **kwargs,
-        )
-        return self
-
 
 def _hroll(
     data: DictOfSeries,
diff --git a/saqc/funcs/scores.py b/saqc/funcs/scores.py
index f9e42f59a675f4f69dddace69dbe0d3a9ade4195..f72e2362f822d4d65d49f6d7332e5d83dd2cb483 100644
--- a/saqc/funcs/scores.py
+++ b/saqc/funcs/scores.py
@@ -521,9 +521,9 @@ class ScoresMixin:
         na_idx = na_bool_ser.index[na_bool_ser.values]
         # notna_bool = vals.notna()
         val_no = (~na_bool_ser).sum()
-        if 1 < val_no < n:
-            n = val_no
-        elif val_no <= 1:
+        if 2 < val_no <= n:
+            n = val_no - 2
+        elif val_no <= 2:
             return self
 
         d_var = d_var.drop(na_idx, axis=0).values
diff --git a/saqc/funcs/tools.py b/saqc/funcs/tools.py
index 54cff17af101e8b99eec64c232a16b3107e9441e..453d28d882350363bbae52dece9085d981f82f08 100644
--- a/saqc/funcs/tools.py
+++ b/saqc/funcs/tools.py
@@ -10,7 +10,7 @@ from __future__ import annotations
 import pickle
 import tkinter as tk
 import warnings
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING
 
 import matplotlib as mpl
 import matplotlib.pyplot as plt
@@ -24,7 +24,7 @@ from saqc.lib.checking import validateChoice
 from saqc.lib.docs import DOC_TEMPLATES
 from saqc.lib.plotting import makeFig
 from saqc.lib.selectionGUI import MplScroller, SelectionOverlay
-from saqc.lib.tools import periodicMask, toSequence
+from saqc.lib.tools import periodicMask
 
 if TYPE_CHECKING:
     from saqc import SaQC
@@ -138,7 +138,7 @@ class ToolsMixin:
             if not _TEST_MODE:
                 root.destroy()
         else:  # show figure if only overlay is used
-            plt.show(block=~_TEST_MODE)
+            plt.show(block=not _TEST_MODE)
             plt.rcParams["toolbar"] = "toolbar2"
 
         # disconnect mouse events when GUI is closed
@@ -329,7 +329,7 @@ class ToolsMixin:
         datcol_idx = self._data[field].index
 
         if mode == "periodic":
-            mask = periodicMask(datcol_idx, start, end, ~closed)
+            mask = periodicMask(datcol_idx, start, end, closed)
         elif mode == "selection_field":
             idx = self._data[selection_field].index.intersection(datcol_idx)
             mask = self._data[selection_field].loc[idx]
@@ -357,7 +357,7 @@ class ToolsMixin:
         mode: Literal["subplots", "oneplot"] | str = "oneplot",
         history: Literal["valid", "complete"] | list[str] | None = "valid",
         xscope: slice | str | None = None,
-        yscope: tuple | list[tuple] | dict = None,
+        yscope: tuple | list[tuple] | dict | None = None,
         store_kwargs: dict | None = None,
         ax: mpl.axes.Axes | None = None,
         ax_kwargs: dict | None = None,
@@ -475,36 +475,6 @@ class ToolsMixin:
         * Check/modify the module parameter `saqc.lib.plotting.SCATTER_KWARGS` to see/modify global marker defaults
         * Check/modify the module parameter `saqc.lib.plotting.PLOT_KWARGS` to see/modify global plot line defaults
         """
-        if history == "complete":
-            warnings.warn(
-                "Plotting with history='complete' is deprecated and will be removed in a future release (2.5)."
-                "To get access to an saqc variables complete flagging history and analyze or plot it in detail, use flags"
-                "history acces via `qc._flags.history[variable_name].hist` and a plotting library, such as pyplot.\n"
-                "Minimal Pseudo example, having a saqc.SaQC instance `qc`, holding a variable `'data1'`, "
-                "and having matplotlib.pyplot imported as `plt`:\n\n"
-                "plt.plot(data)\n"
-                "for f in qc._flags.history['data1'].hist \n"
-                "    markers = qc._flags.history['data1'].hist[f] > level \n"
-                "    markers=data[markers] \n"
-                "    plt.scatter(markers.index, markers.values) \n",
-                DeprecationWarning,
-            )
-
-        if "phaseplot" in kwargs:
-            warnings.warn(
-                'Parameter "phaseplot" is deprecated and will be removed in a future release (2.5). Assign to parameter "mode" instead. (plot(field, mode=phaseplot))',
-                DeprecationWarning,
-            )
-            mode = kwargs["phaseplot"]
-
-        if "cycleskip" in (ax_kwargs or {}):
-            warnings.warn(
-                'Passing "cycleskip" option with the "ax_kwargs" parameter is deprecated and will be removed in a future release (2.5). '
-                'The option now has to be passed with the "marker_kwargs" parameter',
-                DeprecationWarning,
-            )
-            marker_kwargs["cycleskip"] = ax_kwargs.pop("cycleskip")
-
         data, flags = self._data.copy(), self._flags.copy()
 
         level = kwargs.get("flag", UNFLAGGED)
diff --git a/saqc/parsing/visitor.py b/saqc/parsing/visitor.py
index 7224e85a0cd0a582df1c5ea0dc5575910d7b2f52..82c8abec2dd491a3c5ae41feecabd011922c4322 100644
--- a/saqc/parsing/visitor.py
+++ b/saqc/parsing/visitor.py
@@ -9,8 +9,6 @@
 import ast
 import importlib
 
-import numpy as np
-
 from saqc.core.register import FUNC_MAP
 from saqc.parsing.environ import ENVIRONMENT
 
@@ -28,13 +26,12 @@ class ConfigExpressionParser(ast.NodeVisitor):
     """
 
     SUPPORTED = (
-        ast.Str,
+        ast.Constant,
         ast.Expression,
         ast.UnaryOp,
         ast.BinOp,
         ast.BitOr,
         ast.BitAnd,
-        ast.Num,
         ast.Compare,
         ast.Add,
         ast.Sub,
@@ -86,10 +83,8 @@ class ConfigExpressionParser(ast.NodeVisitor):
 class ConfigFunctionParser(ast.NodeVisitor):
     SUPPORTED_NODES = (
         ast.Call,
-        ast.Num,
-        ast.Str,
+        ast.Constant,
         ast.keyword,
-        ast.NameConstant,
         ast.UnaryOp,
         ast.Name,
         ast.Load,
diff --git a/setup.py b/setup.py
index 92ad10d4874fcdc73d1be600a228c630d429215a..adffaf7168ace1a074c6eb990438f0425bda23f8 100644
--- a/setup.py
+++ b/setup.py
@@ -28,19 +28,12 @@ if v["dirty"]:
         f"The repository you build is dirty. Please commit changes first {v}."
     )
 
-if "dev" in v["version"] and name == "saqc":
-    raise ValueError(
-        f"An saqc release must have version in the format X.Y.Z, "
-        f"which requires a git tag on the same commit. Please set "
-        f"a tag, then build again. {v}"
-    )
-
 
 setup(
     name=name,
     version=versioneer.get_version(),  # keep this line as it is
     cmdclass=versioneer.get_cmdclass(),  # keep this line as it is
-    author="Bert Palm, David Schaefer, Florian Gransee, Peter Luenenschloss",
+    author="David Schaefer, Bert Palm, Peter Luenenschloss",
     author_email="david.schaefer@ufz.de",
     description="A timeseries data quality control and processing tool/framework",
     long_description=long_description,
@@ -50,9 +43,9 @@ setup(
     python_requires=">=3.9",
     install_requires=[
         "Click",
-        "dtw",
         "docstring_parser",
         "fancy-collections",
+        "fastdtw",
         "matplotlib>=3.4",
         "numpy",
         "outlier-utils",
diff --git a/tests/cli/test_integration.py b/tests/cli/test_integration.py
index b660fad88db06e6840bee0508bc4d86c0496c7a5..5f9d2b25bd395e769dc2a358d3698cc3a787a1eb 100644
--- a/tests/cli/test_integration.py
+++ b/tests/cli/test_integration.py
@@ -35,13 +35,13 @@ SIMPLE = [
 POSITIONAL = [
     ",Battery,Battery,SM1,SM1,SM2,SM2\n",
     ",data,flags,data,flags,data,flags\n",
-    "2016-04-01 00:00:00,nan,-9999,nan,-9999,29.3157,90000\n",
-    "2016-04-01 00:05:48,3573.0,9,32.685,90,nan,-9999\n",
-    "2016-04-01 00:15:00,nan,-9999,nan,-9999,29.3157,90000\n",
-    "2016-04-01 00:20:42,3572.0,9,32.7428,90,nan,-9999\n",
-    "2016-04-01 00:30:00,nan,-9999,nan,-9999,29.3679,90002\n",
-    "2016-04-01 00:35:37,3572.0,9,32.6186,90,nan,-9999\n",
-    "2016-04-01 00:45:00,nan,-9999,nan,-9999,29.3679,90000\n",
+    "2016-04-01 00:00:00,-9999,-9999,-9999.0,-9999,29.3157,90000\n",
+    "2016-04-01 00:05:48,3573,9,32.685,90,-9999.0,-9999\n",
+    "2016-04-01 00:15:00,-9999,-9999,-9999.0,-9999,29.3157,90000\n",
+    "2016-04-01 00:20:42,3572,9,32.7428,90,-9999.0,-9999\n",
+    "2016-04-01 00:30:00,-9999,-9999,-9999.0,-9999,29.3679,90002\n",
+    "2016-04-01 00:35:37,3572,9,32.6186,90,-9999.0,-9999\n",
+    "2016-04-01 00:45:00,-9999,-9999,-9999.0,-9999,29.3679,90000\n",
 ]
 
 DMP = [
diff --git a/tests/core/test_flags.py b/tests/core/test_flags.py
index c9628d49cd7f333e9fc7527dd05db48ddaf0cffc..9e67651880f5981349d70444049b0164085a1e77 100644
--- a/tests/core/test_flags.py
+++ b/tests/core/test_flags.py
@@ -295,15 +295,6 @@ def _validate_flags_equals_frame(flags, df):
         assert df[c].equals(flags[c])  # respects nan's
 
 
-@pytest.mark.parametrize("data", testdata)
-def test_to_dios(data: Union[pd.DataFrame, DictOfSeries, Dict[str, pd.Series]]):
-    flags = Flags(data)
-    with pytest.deprecated_call():
-        result = flags.toDios()
-        assert isinstance(result, DictOfSeries)
-        _validate_flags_equals_frame(flags, result)
-
-
 @pytest.mark.parametrize("data", testdata)
 def test_toFrame(data: Union[pd.DataFrame, DictOfSeries, Dict[str, pd.Series]]):
     flags = Flags(data)
diff --git a/tests/core/test_translator.py b/tests/core/test_translator.py
index f07e42f6e4d469a81c673f308a219b921496eab0..3d4ca37846a9c9b85225908a05456d2131ec9e33 100644
--- a/tests/core/test_translator.py
+++ b/tests/core/test_translator.py
@@ -13,9 +13,10 @@ import numpy as np
 import pandas as pd
 import pytest
 
-from saqc import BAD, DOUBTFUL, FILTER_NONE, UNFLAGGED, SaQC
-from saqc.core import Flags
+from saqc.constants import BAD, DOUBTFUL, FILTER_NONE, UNFLAGGED
+from saqc.core import Flags, SaQC
 from saqc.core.translation import DmpScheme, MappingScheme, PositionalScheme
+from saqc.core.translation.floatscheme import AnnotatedFloatScheme
 from tests.common import initData
 
 
@@ -93,38 +94,37 @@ def test_dmpTranslator():
 
     tflags = scheme.toExternal(flags)
 
-    assert set(tflags.columns.get_level_values(1)) == {
-        "quality_flag",
-        "quality_comment",
-        "quality_cause",
-    }
+    for df in tflags.values():
+        assert set(df.columns) == {
+            "quality_flag",
+            "quality_comment",
+            "quality_cause",
+        }
 
-    assert (tflags.loc[:, ("var1", "quality_flag")] == "DOUBTFUL").all(axis=None)
+    assert (tflags["var1"]["quality_flag"] == "DOUBTFUL").all(axis=None)
     assert (
-        tflags.loc[:, ("var1", "quality_comment")]
+        tflags["var1"]["quality_comment"]
         == '{"test": "flagBar", "comment": "I did it"}'
     ).all(axis=None)
 
-    assert (tflags.loc[:, ("var1", "quality_cause")] == "OTHER").all(axis=None)
+    assert (tflags["var1"]["quality_cause"] == "OTHER").all(axis=None)
 
-    assert (tflags.loc[:, ("var2", "quality_flag")] == "BAD").all(axis=None)
+    assert (tflags["var2"]["quality_flag"] == "BAD").all(axis=None)
     assert (
-        tflags.loc[:, ("var2", "quality_comment")]
-        == '{"test": "flagFoo", "comment": ""}'
+        tflags["var2"]["quality_comment"] == '{"test": "flagFoo", "comment": ""}'
     ).all(axis=None)
-    assert (tflags.loc[:, ("var2", "quality_cause")] == "BELOW_OR_ABOVE_MIN_MAX").all(
-        axis=None
-    )
+    assert (tflags["var2"]["quality_cause"] == "BELOW_OR_ABOVE_MIN_MAX").all(axis=None)
 
     assert (
-        tflags.loc[flags["var3"] == BAD, ("var3", "quality_comment")]
+        tflags["var3"].loc[flags["var3"] == BAD, "quality_comment"]
         == '{"test": "unknown", "comment": ""}'
     ).all(axis=None)
-    assert (tflags.loc[flags["var3"] == BAD, ("var3", "quality_cause")] == "OTHER").all(
+    assert (tflags["var3"].loc[flags["var3"] == BAD, "quality_cause"] == "OTHER").all(
+        axis=None
+    )
+    assert (tflags["var3"].loc[flags["var3"] == UNFLAGGED, "quality_cause"] == "").all(
         axis=None
     )
-    mask = flags["var3"] == UNFLAGGED
-    assert (tflags.loc[mask, ("var3", "quality_cause")] == "").all(axis=None)
 
 
 def test_positionalTranslator():
@@ -154,9 +154,10 @@ def test_positionalTranslatorIntegration():
 
     round_trip = scheme.toExternal(scheme.toInternal(flags))
 
-    assert (flags.values == round_trip.values).all()
-    assert (flags.index == round_trip.index).all()
     assert (flags.columns == round_trip.columns).all()
+    for col in flags.columns:
+        assert (flags[col] == round_trip[col]).all()
+        assert (flags[col].index == round_trip[col].index).all()
 
 
 def test_dmpTranslatorIntegration():
@@ -168,26 +169,28 @@ def test_dmpTranslatorIntegration():
     saqc = saqc.flagMissing(col).flagRange(col, min=3, max=10)
     flags = saqc.flags
 
-    qflags = flags.xs("quality_flag", axis="columns", level=1)
-    qfunc = flags.xs("quality_comment", axis="columns", level=1).map(
-        lambda v: json.loads(v)["test"] if v else ""
-    )
-    qcause = flags.xs("quality_cause", axis="columns", level=1)
+    qflags = pd.DataFrame({k: v["quality_flag"] for k, v in flags.items()})
+    qfunc = pd.DataFrame({k: v["quality_comment"] for k, v in flags.items()})
+    qcause = pd.DataFrame({k: v["quality_cause"] for k, v in flags.items()})
 
     assert qflags.isin(scheme._forward.keys()).all(axis=None)
-    assert qfunc.isin({"", "flagMissing", "flagRange"}).all(axis=None)
+    assert (
+        qfunc.map(lambda v: json.loads(v)["test"] if v else "")
+        .isin({"", "flagMissing", "flagRange"})
+        .all(axis=None)
+    )
     assert (qcause[qflags[col] == "BAD"] == "OTHER").all(axis=None)
 
     round_trip = scheme.toExternal(scheme.toInternal(flags))
 
-    assert round_trip.xs("quality_flag", axis="columns", level=1).equals(qflags)
-
-    assert round_trip.xs("quality_comment", axis="columns", level=1).equals(
-        flags.xs("quality_comment", axis="columns", level=1)
+    assert pd.DataFrame({k: v["quality_flag"] for k, v in round_trip.items()}).equals(
+        qflags
     )
-
-    assert round_trip.xs("quality_cause", axis="columns", level=1).equals(
-        flags.xs("quality_cause", axis="columns", level=1)
+    assert pd.DataFrame(
+        {k: v["quality_comment"] for k, v in round_trip.items()}
+    ).equals(qfunc)
+    assert pd.DataFrame({k: v["quality_cause"] for k, v in round_trip.items()}).equals(
+        qcause
     )
 
 
@@ -275,3 +278,23 @@ def test_positionalMulitcallsPreserveState():
         expected = tflags1[k].str.slice(start=1) * 2
         got = tflags2[k].str.slice(start=1)
         assert expected.equals(got)
+
+
+def test_annotatedFloatScheme():
+    data = initData(1)
+    col = data.columns[0]
+
+    scheme = AnnotatedFloatScheme()
+    saqc = SaQC(data=data, scheme=scheme)
+    saqc = saqc.setFlags(col, data=data[col].index[::4], flag=DOUBTFUL).flagRange(
+        col, min=3, max=10, flag=BAD
+    )
+    flags = saqc.flags
+
+    assert flags[col]["flag"].isin({DOUBTFUL, BAD, UNFLAGGED}).all(axis=None)
+    assert flags[col]["func"].isin({"", "setFlags", "flagRange"}).all(axis=None)
+
+    round_trip = scheme.toExternal(scheme.toInternal(flags))
+    assert tuple(round_trip.keys()) == tuple(flags.keys())
+    for key in flags.keys():
+        assert round_trip[key].equals(flags[key])
diff --git a/tests/funcs/test_proc_functions.py b/tests/funcs/test_proc_functions.py
index e83f77c6aa2e52da5c01664992585ace5ac876d4..df960969b062e7a76e2a53e938f0200843d62ebf 100644
--- a/tests/funcs/test_proc_functions.py
+++ b/tests/funcs/test_proc_functions.py
@@ -66,30 +66,6 @@ def test_rollingInterpolateMissing(course_5):
     assert qc.data[field][characteristics["missing"]].isna().all()
 
 
-def test_interpolate(course_5):
-    data, characteristics = course_5(periods=10, nan_slice=[5])
-    field = data.columns[0]
-    data = DictOfSeries(data)
-    flags = initFlagsLike(data)
-    qc = SaQC(data, flags)
-
-    qc_lin = qc.interpolate(field, method="linear")
-    qc_poly = qc.interpolate(field, method="polynomial")
-    assert qc_lin.data[field][characteristics["missing"]].notna().all()
-    assert qc_poly.data[field][characteristics["missing"]].notna().all()
-
-    data, characteristics = course_5(periods=10, nan_slice=[5, 6, 7])
-
-    qc = SaQC(data, flags)
-    qc_lin_1 = qc.interpolate(field, method="linear", limit=2)
-    qc_lin_2 = qc.interpolate(field, method="linear", limit=3)
-    qc_lin_3 = qc.interpolate(field, method="linear", limit=4)
-
-    assert qc_lin_1.data[field][characteristics["missing"]].isna().all()
-    assert qc_lin_2.data[field][characteristics["missing"]].isna().all()
-    assert qc_lin_3.data[field][characteristics["missing"]].notna().all()
-
-
 def test_transform(course_5):
     data, characteristics = course_5(periods=10, nan_slice=[5, 6])
     field = data.columns[0]
diff --git a/tests/funcs/test_tools.py b/tests/funcs/test_tools.py
index 231ab5faef59e1b9aa1111bd8bce4803acc3430d..78340fe0c3055be2b2da8d30ea989947945e3b31 100644
--- a/tests/funcs/test_tools.py
+++ b/tests/funcs/test_tools.py
@@ -35,8 +35,6 @@ def test_makeFig(tmp_path):
     d_saqc = d_saqc.plot(
         field="data", path=outfile, history="valid", yscope=[(-50, 1000)]
     )
-    with pytest.deprecated_call():
-        d_saqc = d_saqc.plot(field="data", path=outfile, history="complete")
 
     d_saqc = d_saqc.plot(
         field="data",