diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9296a9dd5c6de68d61665315ce9b758b8bbaf068..8cc3c307f46b6425fd937344d7091bbf781d2f95 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,12 +13,13 @@ SPDX-License-Identifier: GPL-3.0-or-later
 - Option to change the flagging scheme after initialization
 - `flagByClick`: manually assign flags using a graphical user interface
 - `SaQC`: support for selection, slicing and setting of items by use of subscription on SaQC objects (e.g. `qc[key]` and `qc[key] = value`).
-   Selection works with single keys, collections of keys and string slices (e.g. `qc["a":"f"]`).  Values can be SaQC objects, pd.Series, 
+   Selection works with single keys, collections of keys and string slices (e.g. `qc["a":"f"]`).  Values can be SaQC objects, pd.Series,
    Iterable of Series and dict-like with series values.
 - `transferFlags` is a multivariate function
 - `plot`: added `yscope` keyword
 - `setFlags`: function to replace `flagManual`
-- `flagUniLOF`: added defaultly applied correction to mitigate phenomenon of overflagging at relatively steep data value slopes. (parameter `slope_correct`). 
+- `flagUniLOF`: added defaultly applied correction to mitigate phenomenon of overflagging at relatively steep data value slopes. (parameter `slope_correct`).
+- `History`: added option to change aggregation behavior
 ### Changed
 ### Removed
 ### Fixed
@@ -28,7 +29,8 @@ SPDX-License-Identifier: GPL-3.0-or-later
 - `SaQC._construct` : was not working for inherit classes (used hardcoded `SaQC` to construct a new instance).
 ### Deprecated
 - `flagManual` in favor of `setFlags`
-
+- `flagRaise` with delegation to better replacements `flagZScore`, `flagUniLOF`, `flagJumps` or `flagOffset`
+- `flagByGrubbs` with delegation to better replacements `flagZScore`, `flagUniLOF`s
 ## [2.5.0](https://git.ufz.de/rdm-software/saqc/-/tags/v2.4.1) - 2023-06-22
 [List of commits](https://git.ufz.de/rdm-software/saqc/-/compare/v2.4.1...v2.5.0)
 ### Added
diff --git a/docs/funcs/outlierDetection.rst b/docs/funcs/outlierDetection.rst
index d158e5236bf017a9ecda6767c2c4342bef8809e7..4598810f6631cf2d85cec04545279f78a66e27ce 100644
--- a/docs/funcs/outlierDetection.rst
+++ b/docs/funcs/outlierDetection.rst
@@ -16,7 +16,6 @@ Univariate Outlier Detection
    ~SaQC.flagByStray
    ~SaQC.flagMAD
    ~SaQC.flagOffset
-   ~SaQC.flagByGrubbs
    ~SaQC.flagRange
    ~SaQC.flagLOF
    ~SaQC.flagZScore
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 8c277e95aa5b807e6b859d04b6c5b40dd86dd4b1..17efb2028390e1bc655dd91ea1b553bf32d447e0 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -8,7 +8,7 @@ sphinx-automodapi==0.17.0
 sphinxcontrib-fulltoc==1.2.0
 sphinx-markdown-tables==0.0.17
 jupyter-sphinx==0.5.3
-sphinx_autodoc_typehints==2.0.0
+sphinx_autodoc_typehints==1.23.0
 sphinx-tabs==3.4.5
 sphinx-design==0.5.0
 pydata-sphinx-theme==0.15.2
diff --git a/requirements.txt b/requirements.txt
index c9553ab49916a6ba411ae6533e51b90a48285995..5d136f0697b436d4f5ba65d3053c951ea5102dbe 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,14 +3,14 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 Click==8.1.7
-docstring_parser==0.15
+docstring_parser==0.16
 dtw==1.4.0
 matplotlib==3.8.3
 numpy==1.26.4
 outlier-utils==0.0.5
-pyarrow==15.0.0
-pandas==2.1.4
+pyarrow==15.0.2
+pandas==2.2.1
 scikit-learn==1.4.1.post1
 scipy==1.12.0
-typing_extensions==4.5.0
+typing_extensions==4.6.1
 fancy-collections==0.2.1
diff --git a/saqc/core/flags.py b/saqc/core/flags.py
index c90d05b6e2d58c66871dab2389c0d61b2a472162..1009f540d15fa8f1975d18978c4464ae0f79b1c4 100644
--- a/saqc/core/flags.py
+++ b/saqc/core/flags.py
@@ -66,7 +66,7 @@ class Flags:
         once was set in this object, but in the most cases this is irrelevant. For simplicity one can safely assume,
         that this class works just stores the flag-series one sets.
 
-    See Also
+    See also
     --------
     initFlagsLike : create a Flags instance, with same dimensions as a reference object.
     History : class that actually store the flags
@@ -433,7 +433,7 @@ class Flags:
         history : History
             Accessor for the flags history
 
-        See Also
+        See also
         --------
         saqc.core.History : History storage class.
         """
diff --git a/saqc/core/history.py b/saqc/core/history.py
index 3391697c9b85326204623b82200702367647e01a..18b97c75daddb382ae0f63081999d3189eff1d76 100644
--- a/saqc/core/history.py
+++ b/saqc/core/history.py
@@ -15,6 +15,13 @@ from pandas.api.types import is_float_dtype
 
 from saqc import UNFLAGGED
 
+AGGRGEGATIONS = {
+    "last": lambda x: x.ffill(axis=1).iloc[:, -1],
+    "max": lambda x: x.max(axis=1),
+    "min": lambda x: x.min(axis=1),
+}
+AGGREGATION = "last"
+
 
 class History:
     """
@@ -38,7 +45,7 @@ class History:
     index: pd.Index
         A index that fit the flags to be insert.
 
-    See Also
+    See also
     --------
     createHistoryFromData: function to create History from existing data
     """
@@ -306,7 +313,7 @@ class History:
         if hist.empty:
             result = pd.Series(data=np.nan, index=self._hist.index, dtype=float)
         else:
-            result = hist.ffill(axis=1).iloc[:, -1]
+            result = AGGRGEGATIONS[AGGREGATION](hist)
         if not raw:
             result = result.fillna(UNFLAGGED)
         result.name = None
diff --git a/saqc/core/register.py b/saqc/core/register.py
index 5a7c12bc7f3cbf0690ba84002ddd771a97a30210..0ef647f277617482bd97b50f27510cf685b3b019 100644
--- a/saqc/core/register.py
+++ b/saqc/core/register.py
@@ -450,7 +450,7 @@ def flagging(**kwargs):
     For full control over masking, demasking and squeezing or to implement
     a multivariate function (multiple in- or outputs) use the `@register` decorator.
 
-    See Also
+    See also
     --------
         resister: generalization of of this function
     """
@@ -472,7 +472,7 @@ def processing(**kwargs):
     For full control over masking, demasking and squeezing or to implement
     a multivariate function (multiple in- or outputs) use the `@register` decorator.
 
-    See Also
+    See also
     --------
         resister: generalization of of this function
     """
diff --git a/saqc/core/translation/basescheme.py b/saqc/core/translation/basescheme.py
index c072907c8ddb477ccd8e112d23822d4414212192..66f9cb8db159faa7f4eccd46e78510d9e7a6a1ea 100644
--- a/saqc/core/translation/basescheme.py
+++ b/saqc/core/translation/basescheme.py
@@ -148,7 +148,8 @@ class MappingScheme(TranslationScheme):
         out = DictOfSeries()
         expected = pd.Index(trans_map.values())
         for field in flags.columns:
-            out[field] = flags[field].replace(trans_map)
+            with pd.option_context("future.no_silent_downcasting", True):
+                out[field] = flags[field].replace(trans_map).infer_objects()
             diff = pd.Index(out[field]).difference(expected)
             if not diff.empty:
                 raise ValueError(
diff --git a/saqc/funcs/breaks.py b/saqc/funcs/breaks.py
index b9e6ae22aa481155ec431327105dfb63c9096f3c..1b24f8d615873632137d94ab8d535ab50b22a52b 100644
--- a/saqc/funcs/breaks.py
+++ b/saqc/funcs/breaks.py
@@ -93,10 +93,9 @@ class BreaksMixin:
         timestamps :math:`t_k,t_{k+1},...,t_{k+n}`, is considered to be isolated, if:
 
         1. :math:`t_{k+1} - t_n <` `group_window`
-        2. None of the :math:`x_j` with :math:`0 < t_k - t_j <` `gap_window`,
-            is valid (preceeding gap).
-        3. None of the :math:`x_j` with :math:`0 < t_j - t_(k+n) <` `gap_window`,
-            is valid (succeding gap).
+        2. None of the :math:`x_j` with :math:`0 < t_k - t_j <` `gap_window`, is valid (preceding gap).
+        3. None of the :math:`x_j` with :math:`0 < t_j - t_(k+n) <` `gap_window`, is valid (succeeding gap).
+
         """
         validateWindow(gap_window, name="gap_window", allow_int=False)
         validateWindow(group_window, name="group_window", allow_int=False)
@@ -146,7 +145,7 @@ class BreaksMixin:
         Flag data where the mean of its values significantly changes (where the data "jumps" from one
         value level to another).
         Value changes are detected by comparing the mean for two adjacent rolling windows. Whenever
-        the difference between the mean in the two windows exceeds py:attr:`thresh`, the value between
+        the difference between the mean in the two windows exceeds :py:attr:`thresh` , the value between
         the windows is flagged.
 
         Parameters
@@ -160,10 +159,10 @@ class BreaksMixin:
             samples for a reliable mean calculation, but it should also not be arbitrarily big, since
             it also limits the density of jumps that can be detected.
             More precisely: Jumps that are not distanced to each other by more than three fourth (3/4)
-            of the selected py:attr:`window` size, will not be detected reliably.
+            of the selected :py:attr:`window` size, will not be detected reliably.
 
         min_periods :
-            The minimum number of observations in py:attr:`window` required to calculate a valid mean value.
+            The minimum number of observations in :py:attr:`window` required to calculate a valid mean value.
 
         Examples
         --------
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
index 317bb6e380cfa8b5b82a16e2d3d12dfcc0327396..4ebb6580e2659c37739c218488ba2245395aace1 100644
--- a/saqc/funcs/drift.py
+++ b/saqc/funcs/drift.py
@@ -11,13 +11,12 @@ from __future__ import annotations
 import functools
 import inspect
 import warnings
-from typing import TYPE_CHECKING, Callable, Optional, Sequence, Tuple
+from typing import TYPE_CHECKING, Callable, Literal, Optional, Sequence, Tuple
 
 import numpy as np
 import pandas as pd
 from scipy.optimize import curve_fit
 from scipy.spatial.distance import pdist
-from typing_extensions import Literal
 
 from saqc import BAD
 from saqc.core import DictOfSeries, Flags, flagging, register
@@ -310,13 +309,16 @@ class DriftMixin:
 
         Linear drift modell (no free parameters).
 
+        .. doctest::
 
-        >>> Model = lambda t, origin, target: origin + t*target
+            >>> Model = lambda t, origin, target: origin + t*target
 
         exponential drift model (exponential raise!)
 
-        >>> expFunc = lambda t, a, b, c: a + b * (np.exp(c * x) - 1)
-        >>> Model = lambda t, p, origin, target: expFunc(t, (target - origin) / (np.exp(abs(c)) - 1), abs(c))
+        .. doctest::
+
+            >>> expFunc = lambda t, a, b, c: a + b * (np.exp(c * x) - 1)
+            >>> Model = lambda t, p, origin, target: expFunc(t, (target - origin) / (np.exp(abs(c)) - 1), abs(c))
 
         Exponential and linear driftmodels are part of the ``ts_operators`` library, under the names
         ``expDriftModel`` and ``linearDriftModel``.
diff --git a/saqc/funcs/flagtools.py b/saqc/funcs/flagtools.py
index 029665a3f624beea6614fb2f577d4e8d1046175a..57f69e71fd9abe784b70116fbf1b2e758d0541c7 100644
--- a/saqc/funcs/flagtools.py
+++ b/saqc/funcs/flagtools.py
@@ -37,6 +37,10 @@ class FlagtoolsMixin:
     def flagDummy(self: "SaQC", field: str, **kwargs) -> "SaQC":
         """
         Function does nothing but returning data and flags.
+
+        Parameters
+        ----------
+
         """
         return self
 
@@ -45,10 +49,14 @@ class FlagtoolsMixin:
         """
         Set whole column to a flag value.
 
-        See Also
+        Parameters
+        ----------
+
+        See also
         --------
         clearFlags : set whole column to UNFLAGGED
         flagUnflagged : set flag value at all unflagged positions
+
         """
         self._flags[:, field] = flag
         return self
@@ -56,7 +64,7 @@ class FlagtoolsMixin:
     @register(mask=[], demask=[], squeeze=["field"])
     def clearFlags(self: "SaQC", field: str, **kwargs) -> "SaQC":
         """
-        Set whole column to UNFLAGGED.
+        Assign UNFLAGGED value to all periods in field.
 
         Notes
         -----
@@ -65,7 +73,7 @@ class FlagtoolsMixin:
         A warning is triggered if the ``flag`` keyword is given, because
         the flags are always set to `UNFLAGGED`.
 
-        See Also
+        See also
         --------
         forceFlags : set whole column to a flag value
         flagUnflagged : set flag value at all unflagged positions
@@ -83,16 +91,21 @@ class FlagtoolsMixin:
         """
         Function sets a flag at all unflagged positions.
 
-        See Also
-        --------
-        clearFlags : set whole column to UNFLAGGED
-        forceFlags : set whole column to a flag value
+        Parameters
+        ----------
 
         Notes
         -----
         This function ignores the ``dfilter`` keyword, because the
         data is not relevant for processing.
+
+        See also
+        --------
+        clearFlags : set whole column to UNFLAGGED
+        forceFlags : set whole column to a flag value
+
         """
+
         unflagged = self._flags[field].isna() | (self._flags[field] == UNFLAGGED)
         self._flags[unflagged, field] = flag
         return self
@@ -376,16 +389,15 @@ class FlagtoolsMixin:
         """
         Transfer Flags of one variable to another.
 
+        Parameters
+        ----------
+
         squeeze :
             Squeeze the history into a single column if ``True``, function specific flag information is lost.
 
         overwrite :
             Overwrite existing flags if ``True``.
 
-        See Also
-        --------
-        * :py:meth:`saqc.SaQC.flagGeneric`
-        * :py:meth:`saqc.SaQC.concatFlags`
 
         Examples
         --------
@@ -422,6 +434,12 @@ class FlagtoolsMixin:
                   a      b      c
            0   -inf   -inf   -inf
            1  255.0  255.0  255.0
+
+        See also
+        --------
+        * :py:meth:`saqc.SaQC.flagGeneric`
+        * :py:meth:`saqc.SaQC.concatFlags`
+
         """
 
         fields, targets, broadcasting = multivariateParameters(field, target)
@@ -617,7 +635,7 @@ class FlagtoolsMixin:
         ----------
         group:
             A collection of ``SaQC`` objects. Flag checks are performed on all ``SaQC`` objects
-            based on the variables specified in :py:attr:`field`. Whenever all monitored variables
+            based on the variables specified in ``field``. Whenever all monitored variables
             are flagged, the associated timestamps will receive a flag.
         """
         return _groupOperation(
diff --git a/saqc/funcs/interpolation.py b/saqc/funcs/interpolation.py
index b3d71b883fc056ef5d5864a7a50ea919e7a13fed..5c45bdbc097c33275fc140a5443dfaabf3ef3d41 100644
--- a/saqc/funcs/interpolation.py
+++ b/saqc/funcs/interpolation.py
@@ -205,11 +205,11 @@ class InterpolationMixin:
             * ‘index’, ‘values’: Use the actual numerical values of the index.
             * ‘pad’: Fill in NaNs using existing values.
             * ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘spline’, ‘barycentric’, ‘polynomial’:
-                 Passed to scipy.interpolate.interp1d. These methods use the numerical values of the index.
-                 Both ‘polynomial’ and ‘spline’ require that you also specify an order (int), e.g.
-                 ``qc.interpolate(method='polynomial', order=5)``.
+              Passed to scipy.interpolate.interp1d. These methods use the numerical values of the index.
+              Both ‘polynomial’ and ‘spline’ require that you also specify an order (int), e.g.
+              ``qc.interpolate(method='polynomial', order=5)``.
             * ‘krogh’, ‘spline’, ‘pchip’, ‘akima’, ‘cubicspline’:
-                 Wrappers around the SciPy interpolation methods of similar names.
+              Wrappers around the SciPy interpolation methods of similar names.
             * ‘from_derivatives’: Refers to scipy.interpolate.BPoly.from_derivatives
 
         order :
@@ -375,32 +375,32 @@ class InterpolationMixin:
         method :
             Interpolation technique to use. One of:
 
-            * ``'nshift'``: shift grid points to the nearest time stamp
-                in the range = +/- 0.5 * ``freq``
-            * ``'bshift'``: shift grid points to the first succeeding
-                time stamp (if any)
-            * ``'fshift'``: shift grid points to the last preceeding time
-                stamp (if any)
+            * ``'nshift'``: Shift grid points to the nearest time stamp
+              in the range = +/- 0.5 * ``freq``.
+            * ``'bshift'``: Shift grid points to the first succeeding
+              time stamp (if any).
+            * ``'fshift'``: Shift grid points to the last preceeding time
+              stamp (if any).
             * ``'linear'``: Ignore the index and treat the values as equally
-                spaced.
-            * ``'time'``, ``'index'``, 'values': Use the actual numerical
-                values of the index.
+              spaced.
+            * ``'time'``, ``'index'``, ``'values'``: Use the actual numerical
+              values of the index.
             * ``'pad'``: Fill in NaNs using existing values.
             * ``'spline'``, ``'polynomial'``:
-                Passed to ``scipy.interpolate.interp1d``. These methods
-                use the numerical values of the index.  An ``order`` must be
-                specified, e.g. ``qc.interpolate(method='polynomial', order=5)``.
+              Passed to ``scipy.interpolate.interp1d``. These methods
+              use the numerical values of the index.  An ``order`` must be
+              specified, e.g. ``qc.interpolate(method='polynomial', order=5)``.
             * ``'nearest'``, ``'zero'``, ``'slinear'``, ``'quadratic'``, ``'cubic'``, ``'barycentric'``:
-                Passed to ``scipy.interpolate.interp1d``. These methods use
-                the numerical values of the index.
+              Passed to ``scipy.interpolate.interp1d``. These methods use
+              the numerical values of the index.
             * ``'krogh'``, ``'spline'``, ``'pchip'``, ``'akima'``, ``'cubicspline'``:
-                Wrappers around the SciPy interpolation methods of similar
-                names.
-            * ``'from_derivatives'``: Refers to ``scipy.interpolate.BPoly.from_derivatives``
+              Wrappers around the SciPy interpolation methods of similar
+              names.
+            * ``'from_derivatives'``: Refers to ``scipy.interpolate.BPoly.from_derivatives``.
 
         order :
             Order of the interpolation method, ignored if not supported
-            by the chosen ``method``
+            by the chosen ``method``.
 
         extrapolate :
             Use parameter to perform extrapolation instead of interpolation
@@ -411,7 +411,7 @@ class InterpolationMixin:
             * ``'both'`` - perform forward and backward extrapolation
 
         overwrite :
-           If set to True, existing flags will be cleared
+           If set to `True`, existing flags will be cleared.
         """
 
         # TODO:
diff --git a/saqc/funcs/noise.py b/saqc/funcs/noise.py
index 18edfd7b1b3c16aef4dd970e6cab9aac3c706b89..4afc99f338b9705a75a8f5b27f5969d4b1e8d234 100644
--- a/saqc/funcs/noise.py
+++ b/saqc/funcs/noise.py
@@ -58,9 +58,11 @@ class NoiseMixin:
         ----------
         func :
             Either a String value, determining the aggregation function applied on every chunk.
+
             * 'std': standard deviation
             * 'var': variance
             * 'mad': median absolute deviation
+
             Or a Callable function mapping 1 dimensional arraylikes onto scalars.
 
         window :
@@ -124,10 +126,12 @@ class NoiseMixin:
         Parameters
         ----------
         func :
-            Either a string, determining the aggregation function applied on every chunk
+            Either a string, determining the aggregation function applied on every chunk:
+
             * 'std': standard deviation
             * 'var': variance
             * 'mad': median absolute deviation
+
             Or a Callable, mapping 1 dimensional array likes onto scalars.
 
         window :
diff --git a/saqc/funcs/outliers.py b/saqc/funcs/outliers.py
index dfb1a84ef8a51ddba8a2f7a43b31ec60e8549499..c75a340f670cd4d084be238c699ffa5ff8b29d7c 100644
--- a/saqc/funcs/outliers.py
+++ b/saqc/funcs/outliers.py
@@ -98,9 +98,7 @@ class OutliersMixin:
             * The "automatic" threshing introduced with the publication
               of the algorithm defaults to ``1.5``.
             * In this implementation, :py:attr:`thresh` defaults (``'auto'``)
-              to flagging the scores with a modified 3-sigma rule, resulting
-              in a :py:attr:`thresh` `` > 1.5`` which usually mitigates
-              over-flagging compared to the literature recommendation.
+              to flagging the scores with a modified 3-sigma rule.
 
         algorithm :
             Algorithm used for calculating the :py:attr:`n`-nearest neighbors.
@@ -234,6 +232,7 @@ class OutliersMixin:
         p :
             Degree of the metric ("Minkowski"), according to which distance
             to neighbors is determined. Most important values are:
+
             * ``1`` - Manhatten Metric
             * ``2`` - Euclidian Metric
 
@@ -258,11 +257,6 @@ class OutliersMixin:
             by sufficiently large value "jumps". Defaults to estimating the sufficient value jumps from
             the median over the absolute step sizes between data points.
 
-        See Also
-        --------
-        :ref:`introduction to outlier detection with
-            saqc <cookbooks/OutlierDetection:Outlier Detection>`
-
         Notes
         -----
 
@@ -356,6 +350,9 @@ class OutliersMixin:
            qc = qc.flagUniLOF('sac254_raw')
            qc.plot('sac254_raw')
 
+        See also
+        --------
+        :ref:`introduction to outlier detection with saqc <cookbooks/OutlierDetection:Outlier Detection>`
         """
         self._validateLOF(algorithm, n, p, density)
         if thresh != "auto" and not isFloatLike(thresh):
@@ -791,36 +788,10 @@ class OutliersMixin:
         **kwargs,
     ) -> "SaQC":
         """
-        The function flags raises and drops in value courses, that exceed a certain
-        threshold within a certain timespan.
-
-        The parameter variety of the function is owned to the intriguing case of
-        values, that "return" from outlierish or anomalious value levels and thus
-        exceed the threshold, while actually being usual values.
-
-        Notes
-        -----
-        The dataset is NOT supposed to be harmonized to a time series with an
-        equidistant requency grid.
-
-        The value :math:`x_{k}` of a time series :math:`x` with associated
-        timestamps :math:`t_i`, is flagged a raise, if:
-
-        1. There is any value :math:`x_{s}`, preceeding :math:`x_{k}` within
-           :py:attr:`raise_window` range, so that
-           :math:`M = |x_k - x_s | >`  :py:attr:`thresh` :math:`> 0`
+        The function flags raises and drops in value courses, that exceed a certain threshold within a certain timespan.
 
-        2. The weighted average :math:`\\mu^{*}` of the values, preceding
-           :math:`x_{k}` within :py:attr:`average_window` range indicates,
-           that :math:`x_{k}` does not return from an "outlierish" value
-           course, meaning that
-           :math:`x_k > \\mu^* + ( M` / :py:attr:`raise_factor` :math:`)`
-
-        3. Additionally, if :py:attr:`slope` is not ``None``, :math:`x_{k}`
-           is checked or being sufficiently divergent from its very predecessor
-           :math:`x_{k-1}`, meaning that, it is additionally checked if:
-           * :math:`x_k - x_{k-1} >` :py:attr:`slope`
-           * :math:`t_k - t_{k-1} >` :py:attr:`weight` :math:`\\times` :py:attr:`freq`
+        .. deprecated:: 2.6.0
+           Function is deprecated since its not humanly parameterisable. Also more suitable alternatives are available. Depending on use case, use: :py:meth:`~saqc.SaQC.flagUniLOF`, :py:meth:`~saqc.SaQC.flagZScore`, :py:meth:`~saqc.SaQC.flagJumps` instead.
 
         Parameters
         ----------
@@ -851,7 +822,41 @@ class OutliersMixin:
 
         weight :
             See condition (3).
+
+        Notes
+        -----
+        The dataset is NOT supposed to be harmonized to a time series with an
+        equidistant requency grid.
+
+        The value :math:`x_{k}` of a time series :math:`x` with associated
+        timestamps :math:`t_i`, is flagged a raise, if:
+
+        1. There is any value :math:`x_{s}`, preceeding :math:`x_{k}` within
+           :py:attr:`raise_window` range, so that
+           :math:`M = |x_k - x_s | >`  :py:attr:`thresh` :math:`> 0`
+
+        2. The weighted average :math:`\\mu^{*}` of the values, preceding
+           :math:`x_{k}` within :py:attr:`average_window` range indicates,
+           that :math:`x_{k}` does not return from an "outlierish" value
+           course, meaning that
+           :math:`x_k > \\mu^* + ( M` / :py:attr:`raise_factor` :math:`)`
+
+        3. Additionally, if :py:attr:`slope` is not ``None``, :math:`x_{k}`
+           is checked or being sufficiently divergent from its very predecessor
+           :math:`x_{k-1}`, meaning that, it is additionally checked if:
+           * :math:`x_k - x_{k-1} >` :py:attr:`slope`
+           * :math:`t_k - t_{k-1} >` :py:attr:`weight` :math:`\\times` :py:attr:`freq`
         """
+
+        warnings.warn(
+            "The function flagRaise is deprecated with no 100% exact replacement function."
+            "When looking for changes in the value course, the use of flagraise can be replicated and more easily aimed "
+            "for, via the method flagJump.\n"
+            "When looking for raises to outliers or plateaus, use one of: "
+            "flagZScore(outliers), flagUniLOF (outliers and small plateaus) or flagOffset(Plateaus)",
+            DeprecationWarning,
+        )
+
         validateWindow(raise_window, "raise_window", allow_int=False)
         validateWindow(freq, "freq", allow_int=False)
         validateWindow(average_window, "average_window", allow_int=False, optional=True)
@@ -1022,6 +1027,7 @@ class OutliersMixin:
 
         Values :math:`x_n, x_{n+1}, .... , x_{n+k}` of a timeseries :math:`x` with
         associated timestamps :math:`t_n, t_{n+1}, .... , t_{n+k}` are considered spikes, if:
+
         1. :math:`|x_{n-1} - x_{n + s}| >` :py:attr:`thresh`, for all :math:`s \\in [0,1,2,...,k]`
         2. if :py:attr:`thresh_relative` > 0, :math:`x_{n + s} > x_{n - 1}*(1+` :py:attr:`thresh_relative` :math:`)`
         3. if :py:attr:`thresh_relative` < 0, :math:`x_{n + s} < x_{n - 1}*(1+` :py:attr:`thresh_relative` :math:`)`
@@ -1215,21 +1221,11 @@ class OutliersMixin:
         """
         Flag outliers using the Grubbs algorithm.
 
-        See [1] for more information on the grubbs tests definition.
-
-        The (two-sided) test gets applied to data chunks of size :py:attr:`window`. The
-        tests will be iterated chunkwise until no more outliers are detected.
-
-        Note
-        ----
-        * The data is expected to be normally distributed!
-        * The test performs poorly for small data chunks, resulting in considerable
-          overflagging. Select :py:attr:`window` such that every data chunk contains at
-          least 8 values and also adjust the :py:attr:`min_periods` values accordingly.
+        .. deprecated:: 2.6.0
+           Use :py:meth:`~saqc.SaQC.flagUniLOF` or :py:meth:`~saqc.SaQC.flagZScore` instead.
 
         Parameters
         ----------
-
         window :
             Size of the testing window.
             If an integer, the fixed number of observations used for each window.
@@ -1253,6 +1249,14 @@ class OutliersMixin:
 
         [1] https://en.wikipedia.org/wiki/Grubbs%27s_test_for_outliers
         """
+
+        warnings.warn(
+            "The function flagGrubbs is deprecated due to its inferior performance, with no 100% exact replacement function."
+            "When looking for outliers use one of: "
+            "flagZScore, flagUniLOF",
+            DeprecationWarning,
+        )
+
         validateWindow(window)
         validateFraction(alpha, "alpha")
         validateMinPeriods(min_periods, optional=False)
diff --git a/saqc/funcs/pattern.py b/saqc/funcs/pattern.py
index fe910aa35408416c5f1d94069cf12b2c17f1c0be..1615e5cb3932ea30bc97746f6535c0648612fe07 100644
--- a/saqc/funcs/pattern.py
+++ b/saqc/funcs/pattern.py
@@ -27,7 +27,7 @@ def calculateDistanceByDTW(
     The size of the rolling window is determined by the timespan defined
     by the first and last timestamp of the reference data's datetime index.
 
-    For details see the linked functions in the `See Also` section.
+    For details see the linked functions in the `See also` section.
 
     Parameters
     ----------
@@ -59,7 +59,7 @@ def calculateDistanceByDTW(
     The data must be regularly sampled, otherwise a ValueError is raised.
     NaNs in the data will be dropped before dtw distance calculation.
 
-    See Also
+    See also
     --------
     flagPatternByDTW : flag data by DTW
     """
diff --git a/saqc/funcs/resampling.py b/saqc/funcs/resampling.py
index 450cdc81e1e54f1da2a7eb455e0ac571961e7c35..ec6a3d73dac4dc78bc2bea46fc377d8f88bfbd9a 100644
--- a/saqc/funcs/resampling.py
+++ b/saqc/funcs/resampling.py
@@ -209,11 +209,11 @@ class ResamplingMixin:
         ``method``. The following methods are available:
 
         * ``'nagg'``: all values in the range (+/- `freq`/2) of a grid point get
-            aggregated with func and assigned to it.
+          aggregated with func and assigned to it.
         * ``'bagg'``: all values in a sampling interval get aggregated with func and
-            the result gets assigned to the last grid point.
+          the result gets assigned to the last grid point.
         * ``'fagg'``: all values in a sampling interval get aggregated with func and
-            the result gets assigned to the next grid point.
+          the result gets assigned to the next grid point.
 
         Note
         ----
@@ -236,8 +236,8 @@ class ResamplingMixin:
             succeeding or "surrounding" interval). See description above for more details.
 
         maxna :
-            Maximum number of allowed ``NaN``s in a resampling interval. If exceeded, the
-            entire interval is filled with ``NaN``.
+            Maximum number of allowed NaN values in a resampling interval. If exceeded, the
+            entire interval is filled with NaN.
 
         maxna_group :
             Same as `maxna` but for consecutive NaNs.
diff --git a/saqc/funcs/residuals.py b/saqc/funcs/residuals.py
index 72d02982081f9ff80d4cfe0bde8d403d1f0b9571..5ef4a6a409168bb8b05cee3075921de0f1c48ee5 100644
--- a/saqc/funcs/residuals.py
+++ b/saqc/funcs/residuals.py
@@ -36,7 +36,7 @@ class ResidualsMixin:
         The residual  is calculated by fitting a polynomial of degree `order` to a data
         slice of size `window`, that has x at its center.
 
-        Note, that calculating the residuals tends to be quite costy, because a function
+        Note, that calculating the residuals tends to be quite costly, because a function
         fitting is performed for every sample. To improve performance, consider the
         following possibilities:
 
diff --git a/saqc/funcs/scores.py b/saqc/funcs/scores.py
index 9998c153477390add9805a11f1eb2c27c21ca09d..f9e42f59a675f4f69dddace69dbe0d3a9ade4195 100644
--- a/saqc/funcs/scores.py
+++ b/saqc/funcs/scores.py
@@ -180,7 +180,7 @@ class ScoresMixin:
         **kwargs,
     ) -> "SaQC":
         """
-        Score datapoints by an aggregation of the dictances to their k nearest neighbors.
+        Score datapoints by an aggregation of the distances to their `k` nearest neighbors.
 
         The function is a wrapper around the NearestNeighbors method from pythons sklearn library (See reference [1]).
 
@@ -300,10 +300,10 @@ class ScoresMixin:
         Parameters
         ----------
         window :
-            Size of the window. Either determined via an Offset String, denoting the windows temporal extension or
-            by an integer, denoting the windows number of periods.
-            `NaN` measurements also count as periods.
-            If `None` is passed, All data points share the same scoring window, which than equals the whole
+            Size of the window. can be determined as:
+            * Offset String, denoting the windows temporal extension
+            * Integer, denoting the windows number of periods.
+            * `None` (default), All data points share the same scoring window, which than equals the whole
             data.
         model_func : default std
             Function to calculate the center moment in every window.
@@ -323,9 +323,9 @@ class ScoresMixin:
         containing the value :math:`y_{K}` wich is to be checked.
         (The index of :math:`K` depends on the selection of the parameter `center`.)
 
-        2. The "moment" :math:`M` for the window gets calculated via :math:`M=` `model_func(:math:`W`)
+        2. The "moment" :math:`M` for the window gets calculated via :math:`M=` model_func(:math:`W`)
 
-        3. The "scaling" :math:`N` for the window gets calculated via :math:`N=` `norm_func(:math:`W`)
+        3. The "scaling" :math:`N` for the window gets calculated via :math:`N=` norm_func(:math:`W`)
 
         4. The "score" :math:`S` for the point :math:`x_{k}`gets calculated via :math:`S=(x_{k} - M) / N`
         """
@@ -371,13 +371,6 @@ class ScoresMixin:
         n :
             Number of periods to be included into the LOF calculation. Defaults to `20`, which is a value found to be
             suitable in the literature.
-
-            * `n` determines the "locality" of an observation (its `n` nearest neighbors) and sets the upper limit of
-              values of an outlier clusters (i.e. consecutive outliers). Outlier clusters of size greater than `n/2`
-              may not be detected reliably.
-            * The larger `n`, the lesser the algorithm's sensitivity to local outliers and small or singleton outliers
-              points. Higher values greatly increase numerical costs.
-
         freq :
             Determines the segmentation of the data into partitions, the kNN algorithm is
             applied onto individually.
@@ -386,8 +379,18 @@ class ScoresMixin:
         p :
             Degree of the metric ("Minkowski"), according to wich distance to neighbors is determined.
             Most important values are:
+
             * `1` - Manhatten Metric
             * `2` - Euclidian Metric
+
+        Notes
+        -----
+
+        * `n` determines the "locality" of an observation (its `n` nearest neighbors) and sets the upper limit of
+          values of an outlier clusters (i.e. consecutive outliers). Outlier clusters of size greater than `n/2`
+          may not be detected reliably.
+        * The larger `n`, the lesser the algorithm's sensitivity to local outliers and small or singleton outliers
+          points. Higher values greatly increase numerical costs.
         """
         from saqc.funcs.outliers import OutliersMixin
 
@@ -464,6 +467,7 @@ class ScoresMixin:
         p :
             Degree of the metric ("Minkowski"), according to wich distance to neighbors is determined.
             Most important values are:
+
             * `1` - Manhatten Metric
             * `2` - Euclidian Metric
 
@@ -481,7 +485,7 @@ class ScoresMixin:
         -----
         Algorithm steps for uniLOF flagging of variable `x`:
 
-        1. The temporal density `dt(x)` is calculated according o the `density` parameter.
+        1. The temporal density `dt(x)` is calculated according to the `density` parameter.
         2. LOF scores `LOF(x)` are calculated for the concatenation [`x`, `dt(x)`]
         3. `x` is flagged where `LOF(x)` exceeds the threshold determined by the parameter `thresh`.
 
diff --git a/saqc/funcs/tools.py b/saqc/funcs/tools.py
index e0f53fabbc32a8b6a89c2d0af1a68267f1dfc565..54cff17af101e8b99eec64c232a16b3107e9441e 100644
--- a/saqc/funcs/tools.py
+++ b/saqc/funcs/tools.py
@@ -171,7 +171,13 @@ class ToolsMixin:
         **kwargs,
     ) -> "SaQC":
         """
-        Copy data and flags to a new name (preserve flags history).
+        Make a copy of the data and flags of `field`.
+
+        Parameters
+        ----------
+        overwrite :
+            overwrite target, if already existant.
+
         """
         if field == target:
             return self
@@ -235,8 +241,7 @@ class ToolsMixin:
         1. dublicate "field" in the input data (`copyField`)
         2. mask the dublicated data (this, `selectTime`)
         3. apply the tests you only want to be applied onto the masked data chunks (a saqc function)
-        4. project the flags, calculated on the dublicated and masked data onto the original field data
-            (`concateFlags` or `flagGeneric`)
+        4. project the flags, calculated on the dublicated and masked data onto the original field data (`concateFlags` or `flagGeneric`)
         5. drop the dublicated data (`dropField`)
 
         To see an implemented example, checkout flagSeasonalRange in the saqc.functions module
@@ -272,37 +277,52 @@ class ToolsMixin:
         Examples
         --------
         The `period_start` and `end` parameters provide a conveniant way to generate seasonal / date-periodic masks.
-        They have to be strings of the forms: "mm-ddTHH:MM:SS", "ddTHH:MM:SS" , "HH:MM:SS", "MM:SS" or "SS"
+        They have to be strings of the forms:
+
+        * "mm-ddTHH:MM:SS"
+        * "ddTHH:MM:SS"
+        * "HH:MM:SS"
+        * "MM:SS" or "SS"
+
         (mm=month, dd=day, HH=hour, MM=minute, SS=second)
         Single digit specifications have to be given with leading zeros.
         `period_start` and `seas   on_end` strings have to be of same length (refer to the same periodicity)
         The highest date unit gives the period.
         For example:
 
-        >>> start = "01T15:00:00"
-        >>> end = "13T17:30:00"
+        .. doctest::
+
+           >>> start = "01T15:00:00"
+           >>> end = "13T17:30:00"
 
         Will result in all values sampled between 15:00 at the first and  17:30 at the 13th of every month get masked
 
-        >>> start = "01:00"
-        >>> end = "04:00"
+        .. doctest::
+
+           >>> start = "01:00"
+           >>> end = "04:00"
 
         All the values between the first and 4th minute of every hour get masked.
 
-        >>> start = "01-01T00:00:00"
-        >>> end = "01-03T00:00:00"
+        .. doctest::
+
+           >>> start = "01-01T00:00:00"
+           >>> end = "01-03T00:00:00"
 
         Mask january and february of evcomprosed in theery year. masking is inclusive always, so in this case the mask will
         include 00:00:00 at the first of march. To exclude this one, pass:
 
-        >>> start = "01-01T00:00:00"
-        >>> end = "02-28T23:59:59"
+        .. doctest::
+
+           >>> start = "01-01T00:00:00"
+           >>> end = "02-28T23:59:59"
 
         To mask intervals that lap over a seasons frame, like nights, or winter, exchange sequence of season start and
         season end. For example, to mask night hours between 22:00:00 in the evening and 06:00:00 in the morning, pass:
 
-        >>> start = "22:00:00"
-        >>> end = "06:00:00"
+        >> start = "22:00:00"
+        >> end = "06:00:00"
+
         """
         validateChoice(mode, "mode", ["periodic", "selection_field"])
 
@@ -371,6 +391,7 @@ class ToolsMixin:
 
         mode :
            How to process multiple variables to be plotted:
+
            * `"oneplot"` : plot all variables with their flags in one axis (default)
            * `"subplots"` : generate subplot grid where each axis contains one variable plot with associated flags
            * `"biplot"` : plotting first and second variable in field against each other in a scatter plot  (point cloud).
diff --git a/saqc/lib/docs.py b/saqc/lib/docs.py
index 99f194e35cc0ffc78773642b2b9874dae6e6b862..99f7848ace086de39a2062447d1765a47f5eb5b6 100644
--- a/saqc/lib/docs.py
+++ b/saqc/lib/docs.py
@@ -4,9 +4,15 @@
 
 from __future__ import annotations
 
-from typing import Any, TypedDict
+from typing import TypedDict
 
-from docstring_parser import DocstringParam, DocstringReturns, compose, parse
+from docstring_parser import (
+    DocstringParam,
+    DocstringReturns,
+    DocstringStyle,
+    compose,
+    parse,
+)
 
 
 class ParamDict(TypedDict):
@@ -50,18 +56,17 @@ COMMON = {
 }
 
 
-class FunctionParam(DocstringParam):
-    def __init__(
-        self, name: str, typehint: str, description: str, optional: bool = False
-    ):
-        super().__init__(
-            args=["param", name],
-            description=description,
-            arg_name=name,
-            type_name=typehint,
-            is_optional=optional,
-            default=None,
-        )
+def toParameter(
+    name: str, typehint: str, description: str, optional: bool = False
+) -> DocstringParam:
+    return DocstringParam(
+        args=["param", name],
+        description=description,
+        arg_name=name,
+        type_name=typehint,
+        is_optional=optional,
+        default=None,
+    )
 
 
 def docurator(func, defaults: dict[str, ParamDict] | None = None):
@@ -76,7 +81,7 @@ def docurator(func, defaults: dict[str, ParamDict] | None = None):
         return_name="SaQC",
     )
 
-    tree = parse(func.__doc__)
+    tree = parse(func.__doc__, style=DocstringStyle.NUMPYDOC)
 
     if tree.returns:
         raise ValueError(
@@ -84,7 +89,7 @@ def docurator(func, defaults: dict[str, ParamDict] | None = None):
         )
 
     # rewrite parameters
-    meta = [FunctionParam(**{**COMMON["field"], **defaults.get("field", {})})]
+    meta = [toParameter(**{**COMMON["field"], **defaults.get("field", {})})]
     for p in tree.params:
         if p.arg_name in COMMON:
             raise ValueError(
@@ -94,7 +99,7 @@ def docurator(func, defaults: dict[str, ParamDict] | None = None):
 
     # additional parameters
     for p in ("target", "dfilter", "flag"):
-        meta.append(FunctionParam(**{**COMMON[p], **defaults.get(p, {})}))
+        meta.append(toParameter(**{**COMMON[p], **defaults.get(p, {})}))
 
     # return sections
     meta.append(docstring_return)
@@ -107,4 +112,5 @@ def docurator(func, defaults: dict[str, ParamDict] | None = None):
     tree.meta = meta
 
     func.__doc__ = compose(tree)
+
     return func
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index 4dbcf710813b7668b60a716609fb0183ea443c83..feb33af8e8c5e637cea386e5ec8a58b33e472a52 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -177,7 +177,24 @@ def periodicMask(
             x[e:e] = True
             return x
 
-    freq = "1" + "mmmhhhdddMMMYYY"[len(season_start)]
+    freq = (
+        "1",
+        "m",
+        "m",
+        "m",
+        "h",
+        "h",
+        "h",
+        "d",
+        "d",
+        "d",
+        "M",
+        "M",
+        "M",
+        "YE",
+        "YE",
+        "YE",
+    )[len(season_start)]
     out = mask.groupby(pd.Grouper(freq=freq)).transform(_selector)
     if invert:
         out = ~out
diff --git a/tests/core/test_history.py b/tests/core/test_history.py
index cb3412c370f90046bf14a062176932ed0bee9984..bbd9ed24001761b29cb09d3598708b2d891b1112 100644
--- a/tests/core/test_history.py
+++ b/tests/core/test_history.py
@@ -9,7 +9,7 @@ import pandas as pd
 import pytest
 from pandas.api.types import is_categorical_dtype, is_float_dtype
 
-from saqc.core.history import History, createHistoryFromData
+from saqc.core.history import AGGREGATION, History, createHistoryFromData
 from tests.common import dummyHistory
 
 # see #GH143 combined backtrack
@@ -240,3 +240,25 @@ def test_append_force(__hist, s, max_val):
     hist.append(s)
     check_invariants(hist)
     assert all(hist.squeeze() == max_val)
+
+
+@pytest.mark.parametrize(
+    "col, expected",
+    [
+        (pd.Series(0, index=range(6), dtype=float), {"last": 0, "min": 0, "max": 0}),
+        (pd.Series(1, index=range(6), dtype=float), {"last": 1, "min": 0, "max": 1}),
+        (pd.Series(6, index=range(6), dtype=float), {"last": 6, "min": 0, "max": 6}),
+        (pd.Series(4, index=range(6), dtype=float), {"last": 4, "min": 0, "max": 6}),
+    ],
+)
+def test_aggregations(col, expected, hist=History(index=pd.Index(range(6)))):
+    import saqc.core.history
+
+    hist.append(col)
+    check_invariants(hist)
+    for aggregation in ["last", "min", "max"]:
+        saqc.core.history.AGGREGATION = aggregation
+        assert (hist.squeeze() == expected[aggregation]).all()
+
+    # reset to not disturb the other tests...
+    saqc.core.history.AGGREGATION = "last"
diff --git a/tests/funcs/test_functions.py b/tests/funcs/test_functions.py
index e567835680c85db4f5b1cedc6abbc227fa32ede6..d956e39b1a1fb6532b328bd93672966c8cc41c55 100644
--- a/tests/funcs/test_functions.py
+++ b/tests/funcs/test_functions.py
@@ -52,7 +52,7 @@ def test_flagRange(data, field):
     assert all(flagged == expected)
 
 
-def test_flagSeasonalRange(data, field):
+def test_selectTime(data, field):
     data[field].iloc[::2] = 0
     data[field].iloc[1::2] = 50
     nyears = len(data[field].index.year.unique())
diff --git a/tests/funcs/test_outlier_detection.py b/tests/funcs/test_outlier_detection.py
index d23ef30b5d5ef0983cc49a043c4d06cfec9e26f6..21487d6c132a302f795ebdc122a0273900df698f 100644
--- a/tests/funcs/test_outlier_detection.py
+++ b/tests/funcs/test_outlier_detection.py
@@ -33,7 +33,7 @@ def test_flagMad(spiky_data):
     field, *_ = data.columns
     flags = initFlagsLike(data)
     qc = SaQC(data, flags).flagZScore(
-        field, window="1H", method="modified", thresh=3.5, flag=BAD
+        field, window="1h", method="modified", thresh=3.5, flag=BAD
     )
     flag_result = qc.flags[field]
     test_sum = (flag_result.iloc[spiky_data[1]] == BAD).sum()
@@ -52,6 +52,7 @@ def test_flagSpikesBasic(spiky_data):
     assert test_sum == len(spiky_data[1])
 
 
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
 @pytest.mark.slow
 @pytest.mark.parametrize("dat", ["course_1", "course_2", "course_3", "course_4"])
 def test_flagSpikesLimitRaise(dat, request):
@@ -113,7 +114,7 @@ def test_grubbs(course_3):
         out_val=-10,
     )
     flags = initFlagsLike(data)
-    qc = SaQC(data, flags).flagByGrubbs("data", window=20, min_periods=15, flag=BAD)
+    qc = SaQC(data, flags).flagUniLOF("data", density=0.4)
     assert np.all(qc.flags["data"][char_dict["drop"]] > UNFLAGGED)
 
 
diff --git a/tests/fuzzy/lib.py b/tests/fuzzy/lib.py
index 3604bd472b9b4435ff3fb830e583c1d4aeedf4aa..a70a159f3ad1fede24e2c295ce74d6a672a8ffb4 100644
--- a/tests/fuzzy/lib.py
+++ b/tests/fuzzy/lib.py
@@ -104,13 +104,13 @@ def daterangeIndexes(draw, min_size=0, max_size=100):
     max_date = pd.Timestamp("2099-12-31").to_pydatetime()
     start = draw(datetimes(min_value=min_date, max_value=max_date))
     periods = draw(integers(min_value=min_size, max_value=max_size))
-    freq = draw(sampled_from(["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"]))
+    freq = draw(sampled_from(["D", "h", "min", "s", "ms", "us", "ns"]))
     return pd.date_range(start, periods=periods, freq=freq)
 
 
 @composite
 def frequencyStrings(draw, _):
-    freq = draw(sampled_from(["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"]))
+    freq = draw(sampled_from(["D", "h", "min", "s", "ms", "us", "ns"]))
     mult = draw(integers(min_value=1, max_value=10))
     value = f"{mult}{freq}"
     return value
diff --git a/tests/requirements.txt b/tests/requirements.txt
index af4cf289db422bccb9129207a9640cd64cce6ac4..77dadce52ee36006cc1b82c6f92cdfbf6486bf3f 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 beautifulsoup4==4.12.3
-hypothesis==6.98.15
-Markdown==3.5.2
-pytest==8.0.2
+Markdown==3.6
+hypothesis==6.100.0
+pytest==8.1.1
 requests==2.31.0