diff --git a/dios/dios/base.py b/dios/dios/base.py
index 29c0601bc044c5f3e4ff4a66670ff9973da9a994..ad9a3698511d13c8d912e6e7a16ac0ddab03ff8c 100644
--- a/dios/dios/base.py
+++ b/dios/dios/base.py
@@ -47,7 +47,6 @@ class _DiosBase:
         cast_policy="save",
         fastpath=False,
     ):
-
         self._attrs = {}
         self.cast_policy = cast_policy  # set via property
 
@@ -62,7 +61,6 @@ class _DiosBase:
                 self._data = pd.Series(dtype="O", index=columns)
 
         else:
-
             if index is not None and not isinstance(index, pd.Index):
                 index = pd.Index(index)
 
@@ -295,7 +293,6 @@ class _DiosBase:
                     self._data.at[c][series.index] = series
 
     def _setitem_listlike(self, data, value):
-
         value = value.values if isinstance(value, pd.Series) else value
 
         if len(value) != len(data.columns):
diff --git a/dios/dios/indexer.py b/dios/dios/indexer.py
index f045a4f51d3d212f4a6cf4157c51903bc354caae..cbf0ffa5631f74048e8994875532c49627263d3e 100644
--- a/dios/dios/indexer.py
+++ b/dios/dios/indexer.py
@@ -15,7 +15,6 @@ class _Indexer:
         self._data = obj._data
 
     def _unpack_key(self, key):
-
         key = list(key) if pdextra.is_iterator(key) else key
 
         if isinstance(key, tuple):
@@ -89,7 +88,6 @@ class _LocIndexer(_Indexer):
         super().__init__(*args, **kwargs)
 
     def __getitem__(self, key):
-
         rowkey, colkey = self._unpack_key(key)
         if _is_dios_like(rowkey) or _is_dios_like(colkey):
             raise ValueError("Could not index with multidimensional key")
@@ -109,7 +107,6 @@ class _LocIndexer(_Indexer):
         else:
             k = "?"
             try:
-
                 for k in data.index:
                     data.at[k] = data.at[k].loc[rowkey]
 
@@ -128,14 +125,12 @@ class _LocIndexer(_Indexer):
         return new
 
     def __setitem__(self, key, value):
-
         rowkey, colkey = self._unpack_key(key)
         if _is_dios_like(rowkey) or _is_dios_like(colkey):
             raise ValueError("Cannot index with multi-dimensional key")
 
         # .loc[any, scalar] - set on single column
         if pdextra.is_hashable(colkey):
-
             # .loc[dont-care, new-scalar] = val
             if colkey not in self.obj.columns:
                 self.obj._insert(colkey, value)
@@ -180,7 +175,6 @@ class _iLocIndexer(_Indexer):
         else:
             k = "?"
             try:
-
                 for k in data.index:
                     data.at[k] = data.at[k].iloc[rowkey]
 
@@ -248,7 +242,6 @@ class _aLocIndexer(_Indexer):
 
         c = "?"
         try:
-
             for i, c in enumerate(data.index):
                 data.at[c] = self._data.at[c].loc[rowkeys[i]]
 
@@ -268,7 +261,6 @@ class _aLocIndexer(_Indexer):
         def iter_self(colkeys, position=False):
             c = "?"
             try:
-
                 for i, c in enumerate(colkeys):
                     dat = self._data.at[c]
                     rk = rowkeys[i]
@@ -347,7 +339,6 @@ class _aLocIndexer(_Indexer):
 
         # .aloc[dios]
         if _is_dios_like(rowkey):
-
             if not pdextra.is_null_slice(colkey):
                 raise ValueError(
                     f"Could not index with a dios-like indexer as rowkey,"
@@ -385,7 +376,6 @@ class _aLocIndexer(_Indexer):
         return rowkey, colkey, lowdim
 
     def _get_rowkey(self, rowkey, colkey, depth=0):
-
         if pdextra.is_nested_list_like(rowkey) and depth == 0:
             rowkey = rowkey.values if isinstance(rowkey, pd.Series) else rowkey
             if len(rowkey) != len(colkey):
diff --git a/dios/profiling/generate_testsets.py b/dios/profiling/generate_testsets.py
index 6c121c805066608b15d137ab4de37c0c2f00bdd2..42c24c97e7921aa7f7504a01af6862c0f7ae543a 100644
--- a/dios/profiling/generate_testsets.py
+++ b/dios/profiling/generate_testsets.py
@@ -25,7 +25,6 @@ def _gen_testset(rowsz, colsz, freq="1min", disalign=True, randstart=True):
     freqv = int(freq[: -len(frequ)])
 
     for i in range(colsz):
-
         if randstart:
             # generate random startpoint for each series
             r = str(np.random.randint(int(rowsz * 0.05), int(rowsz * 0.6) + 2)) + frequ
diff --git a/dios/profiling/memory.py b/dios/profiling/memory.py
index 7341c3ab27bc14f10b2e162cb685e2a4b3e12c4a..3078c654936182266f1dd4be76300cb30eb661bd 100644
--- a/dios/profiling/memory.py
+++ b/dios/profiling/memory.py
@@ -40,7 +40,6 @@ def rows_by_time(nsec, mdays):
 
 
 if __name__ == "__main__":
-
     # dios      - linear in rows and colums, same size for r=10,c=100 or r=100,c=10
     do_real_check = True
     cols = 10
diff --git a/dios/test/test_dflike.py b/dios/test/test_dflike.py
index 550fd0e2e019372a691a4b1a3edb09e6dd9c982c..445b4e0785adb32cf3289c8c4d2441e0a4ff2f1a 100644
--- a/dios/test/test_dflike.py
+++ b/dios/test/test_dflike.py
@@ -33,7 +33,6 @@ TESTDATA = [
 @pytest.mark.parametrize("data", TESTDATA)
 @pytest.mark.parametrize("with_column_param", [False, True])
 def test_dios_create(data, with_column_param):
-
     data_copy0 = deepcopy(data)
     data_copy1 = deepcopy(data)
 
diff --git a/dios/test/test_dflike__setget__.py b/dios/test/test_dflike__setget__.py
index 9ce2aa2e3ed6129873ae97a74bc50f6297a31f70..7388d58e0dee1a5cd7f3861f371304b320d3311d 100644
--- a/dios/test/test_dflike__setget__.py
+++ b/dios/test/test_dflike__setget__.py
@@ -8,7 +8,6 @@ from .test_setup import *
 
 
 def _test(res, exp):
-
     if isinstance(exp, pd.DataFrame):
         eq, msg = dios_eq_df(res, exp, with_msg=True)
         assert eq, msg
diff --git a/saqc/__main__.py b/saqc/__main__.py
index 58d83731a2b675b4386db604b650b027beec9a1b..24ba5b8b09ac4ce7ee049da2b177728580b72c3d 100644
--- a/saqc/__main__.py
+++ b/saqc/__main__.py
@@ -117,7 +117,6 @@ def main(config, data, scheme, outfile, nodata, log_level):
         flags_result = flags_result.to_df()
 
     if outfile:
-
         data_result.columns = pd.MultiIndex.from_product(
             [data_result.columns.tolist(), ["data"]]
         )
diff --git a/saqc/constants.py b/saqc/constants.py
index 839795e0e02e1d8cc1b3f3a3cefb7119e9e01eda..985bcbdc12348c96def01dd41def071559dc28d4 100644
--- a/saqc/constants.py
+++ b/saqc/constants.py
@@ -56,6 +56,7 @@ BAD = 255.0
 FILTER_ALL = -np.inf
 FILTER_NONE = np.inf
 
+
 # ----------------------------------------------------------------------
 # other
 # ----------------------------------------------------------------------
diff --git a/saqc/core/core.py b/saqc/core/core.py
index 021f22f08d0823710f63d131534c9bff0edc66d6..ebce7c049c1ed9ded44bbc04202106e46b6f84f4 100644
--- a/saqc/core/core.py
+++ b/saqc/core/core.py
@@ -152,7 +152,6 @@ class SaQC(FunctionsMixin):
         )
 
     def _initData(self, data) -> DictOfSeries:
-
         if data is None:
             return DictOfSeries()
 
diff --git a/saqc/core/flags.py b/saqc/core/flags.py
index a1602507599b3d62bf4a800146202c83add8fe4f..34042c8a9c2b505c79d5cae097b9ac6b824bd18b 100644
--- a/saqc/core/flags.py
+++ b/saqc/core/flags.py
@@ -192,7 +192,6 @@ class Flags:
     """
 
     def __init__(self, raw_data: DictLike | Flags | None = None, copy: bool = False):
-
         self._data: dict[str, History]
 
         if raw_data is None:
@@ -215,7 +214,6 @@ class Flags:
         result = {}
 
         for k, item in data.items():
-
             if not isinstance(k, str):
                 raise ValueError("column names must be string")
             if k in result:
@@ -524,7 +522,6 @@ def initFlagsLike(
         reference = reference.to_frame(name=name)
 
     for k, item in reference.items():
-
         if not isinstance(k, str):
             raise TypeError(
                 f"cannot use '{k}' as a column name, currently only string keys are allowed"
diff --git a/saqc/core/history.py b/saqc/core/history.py
index e9cf472bfc6a186393bc650d18012f70a286de0c..480c9593a56fa0bfcdea15db7375ef8711c2ca6b 100644
--- a/saqc/core/history.py
+++ b/saqc/core/history.py
@@ -45,7 +45,6 @@ class History:
     """
 
     def __init__(self, index: pd.Index | None):
-
         self._hist = pd.DataFrame(index=index)
         self._meta = []
 
@@ -451,7 +450,6 @@ class History:
         return len(self._hist.columns)
 
     def __repr__(self):
-
         if self.empty:
             return str(self._hist).replace("DataFrame", "History")
 
diff --git a/saqc/core/reader.py b/saqc/core/reader.py
index 781190cc85e761c056e6371a67fea01ef67c37d0..b5ff437875f2bb225a05f5a2ae082b7a8ee1db3e 100644
--- a/saqc/core/reader.py
+++ b/saqc/core/reader.py
@@ -44,7 +44,6 @@ def _closeFile(fobj):
 
 
 def readFile(fname) -> pd.DataFrame:
-
     fobj = _openFile(fname)
 
     out = []
@@ -81,7 +80,6 @@ def fromConfig(fname, *args, **func_kwargs):
     config = readFile(fname)
 
     for _, field, expr in config.itertuples():
-
         regex = False
         if isQuoted(field):
             fld = field[1:-1]
diff --git a/saqc/core/register.py b/saqc/core/register.py
index cdbcc48a36fec9b474833799cab6b41a674dc476..7ac2c33ec8bae3386878d1fbd3c6d56b3d59e1d5 100644
--- a/saqc/core/register.py
+++ b/saqc/core/register.py
@@ -136,7 +136,6 @@ def _squeezeFlags(old_flags, new_flags: Flags, columns: pd.Index, meta) -> Flags
     for col in columns.union(
         new_flags.columns.difference(old_flags.columns)
     ):  # account for newly added columns
-
         if col not in out:  # ensure existence
             out.history[col] = History(index=new_flags.history[col].index)
 
@@ -202,7 +201,6 @@ def _unmaskData(
     columns = mask.columns.intersection(columns)
 
     for c in columns:
-
         # ignore
         if data[c].empty or mask[c].empty:
             continue
@@ -315,7 +313,6 @@ def register(
     """
 
     def outer(func: Callable[P, SaQC]) -> Callable[P, SaQC]:
-
         func_signature = inspect.signature(func)
         _checkDecoratorKeywords(
             func_signature, func.__name__, mask, demask, squeeze, handles_target
@@ -330,7 +327,6 @@ def register(
             flag: ExternalFlag | OptionalNone = OptionalNone(),
             **kwargs,
         ) -> "SaQC":
-
             # args -> kwargs
             paramnames = tuple(func_signature.parameters.keys())[
                 2:
diff --git a/saqc/core/translation/basescheme.py b/saqc/core/translation/basescheme.py
index c9b2b38643bf824ee7c81819930a258939a8198d..b7a3d67f4d8df76d996aa9dcee77144fbc77e957 100644
--- a/saqc/core/translation/basescheme.py
+++ b/saqc/core/translation/basescheme.py
@@ -200,7 +200,6 @@ class FloatScheme(TranslationScheme):
     DFILTER_DEFAULT: float = FILTER_ALL
 
     def __call__(self, flag: float | int) -> float:
-
         try:
             return float(flag)
         except (TypeError, ValueError, OverflowError):
diff --git a/saqc/core/translation/dmpscheme.py b/saqc/core/translation/dmpscheme.py
index c8598a8fa254ea1d958647aa5fe6f1fed4cdf04f..17f958484643941cc1641d0cebb1ad69a4e93d67 100644
--- a/saqc/core/translation/dmpscheme.py
+++ b/saqc/core/translation/dmpscheme.py
@@ -139,7 +139,6 @@ class DmpScheme(MappingScheme):
         )
 
         for field in tflags.columns:
-
             df = pd.DataFrame(
                 {
                     "quality_flag": tflags[field],
@@ -150,7 +149,6 @@ class DmpScheme(MappingScheme):
 
             history = flags.history[field]
             for col in history.columns:
-
                 valid = (history.hist[col] != UNFLAGGED) & history.hist[col].notna()
 
                 # extract from meta
@@ -191,7 +189,6 @@ class DmpScheme(MappingScheme):
             )
 
         for field in df.columns.get_level_values(0):
-
             # we might have NaN injected by DictOfSeries -> DataFrame conversions
             field_df = df[field].dropna(how="all", axis="index")
             flags = field_df["quality_flag"]
diff --git a/saqc/core/translation/positionalscheme.py b/saqc/core/translation/positionalscheme.py
index e4dea64b380fb5be7a26b1a8debf137ee808d0cd..23b724293b121a15ba0ba3988ea60e250e1b085a 100644
--- a/saqc/core/translation/positionalscheme.py
+++ b/saqc/core/translation/positionalscheme.py
@@ -59,7 +59,6 @@ class PositionalScheme(MappingScheme):
 
         data = {}
         for field, field_flags in flags.items():
-
             # explode the flags into sperate columns and drop the leading `9`
             df = pd.DataFrame(
                 field_flags.astype(str).str.slice(start=1).apply(tuple).tolist(),
diff --git a/saqc/core/visitor.py b/saqc/core/visitor.py
index b5a7b33b902d36e749f2723b2b5c57dc6c4aa481..294a5a812807d91326ce8ea635e1f4b353c84f4c 100644
--- a/saqc/core/visitor.py
+++ b/saqc/core/visitor.py
@@ -81,7 +81,6 @@ class ConfigExpressionParser(ast.NodeVisitor):
 
 
 class ConfigFunctionParser(ast.NodeVisitor):
-
     SUPPORTED_NODES = (
         ast.Call,
         ast.Num,
@@ -107,7 +106,6 @@ class ConfigFunctionParser(ast.NodeVisitor):
         return func, self.kwargs
 
     def visit_Call(self, node):
-
         if not isinstance(node, ast.Call):
             raise TypeError("expected function call")
 
@@ -126,7 +124,6 @@ class ConfigFunctionParser(ast.NodeVisitor):
         return func_name
 
     def visit_keyword(self, node):
-
         key, value = node.arg, node.value
         check_tree = True
 
diff --git a/saqc/funcs/curvefit.py b/saqc/funcs/curvefit.py
index bed9088ae7accd2b0163dbd2d4a3d4d0d6d630a5..0444d7983ddf695d1458757f1b506ea5b756c292 100644
--- a/saqc/funcs/curvefit.py
+++ b/saqc/funcs/curvefit.py
@@ -169,7 +169,6 @@ def _fitPolynomial(
     min_periods: int = 0,
     **kwargs,
 ) -> Tuple[DictOfSeries, Flags]:
-
     # TODO: some (rather large) parts are functional similar to saqc.funcs.rolling.roll
     if data[field].empty:
         return data, flags
diff --git a/saqc/funcs/drift.py b/saqc/funcs/drift.py
index aaf9e410e6975da065a71786a608d77d270cdce0..4560c2058237589ea9ab545f519b8ab437b1adc6 100644
--- a/saqc/funcs/drift.py
+++ b/saqc/funcs/drift.py
@@ -144,7 +144,6 @@ class DriftMixin:
 
         segments = data_to_flag.groupby(pd.Grouper(freq=freq))
         for segment in segments:
-
             if segment[1].shape[0] <= 1:
                 continue
 
@@ -229,7 +228,6 @@ class DriftMixin:
 
         segments = data_to_flag.groupby(pd.Grouper(freq=freq))
         for segment in segments:
-
             if segment[1].shape[0] <= 1:
                 continue
 
diff --git a/saqc/funcs/generic.py b/saqc/funcs/generic.py
index 67237dd5666dafaa1268b46cf0ea4b3f9e105f73..556f6bfb29f6d47ff1e75159bb40849fcbb756ef 100644
--- a/saqc/funcs/generic.py
+++ b/saqc/funcs/generic.py
@@ -63,7 +63,6 @@ def _execGeneric(
     func: GenericFunction,
     dfilter: float = FILTER_ALL,
 ) -> DictOfSeries:
-
     globs = {
         "isflagged": lambda data, label=None: _isflagged(
             _flagSelect(data.name, flags, label), thresh=dfilter
@@ -171,7 +170,6 @@ class GenericMixin:
 
         # update data & flags
         for i, col in enumerate(targets):
-
             datacol = result.iloc[:, i]
             self._data[col] = datacol
 
@@ -291,7 +289,6 @@ class GenericMixin:
 
         # update flags & data
         for i, col in enumerate(targets):
-
             maskcol = result.iloc[:, i]
 
             # make sure the column exists
diff --git a/saqc/funcs/outliers.py b/saqc/funcs/outliers.py
index 1fc519f035502bbfa0bbed2dc623138260c85de3..05ee5fe85f59b1bba4b8f0f5b61471d02d0a26e7 100644
--- a/saqc/funcs/outliers.py
+++ b/saqc/funcs/outliers.py
@@ -149,7 +149,6 @@ class OutliersMixin:
 
         # calculate flags for every partition
         for _, partition in partitions:
-
             if partition.empty | (partition.shape[0] < min_periods):
                 continue
 
@@ -1042,7 +1041,6 @@ class OutliersMixin:
         df = self._data[fields].loc[self._data[fields].index_of("shared")].to_df()
 
         if isinstance(method, str):
-
             if method == "modZscore":
                 MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
                 diff_scores = (
@@ -1062,7 +1060,6 @@ class OutliersMixin:
                 raise ValueError(method)
 
         else:
-
             try:
                 stat = getattr(df, method.__name__)(axis=1)
             except AttributeError:
@@ -1248,7 +1245,6 @@ def _evalStrayLabels(
 
     for var in target:
         for index in enumerate(to_flag_frame.index):
-
             index_slice = slice(
                 index[1] - pd.Timedelta(reduction_range),
                 index[1] + pd.Timedelta(reduction_range),
diff --git a/saqc/funcs/pattern.py b/saqc/funcs/pattern.py
index 43e4f5f97d26b27391d4d67a3754b9f8776b9019..c3d02be0a55ad27ed4a3551858a8334de9f34155 100644
--- a/saqc/funcs/pattern.py
+++ b/saqc/funcs/pattern.py
@@ -88,7 +88,6 @@ def calculateDistanceByDTW(
 
 
 class PatternMixin:
-
     # todo should we mask `reference` even if the func fail if reference has NaNs
     @flagging()
     def flagPatternByDTW(
diff --git a/saqc/lib/rolling.py b/saqc/lib/rolling.py
index aa2faee30cf283df0dec4593c1ba18e065139d4b..eb15343e04914c1901bfb7a4b546584af522d2b2 100644
--- a/saqc/lib/rolling.py
+++ b/saqc/lib/rolling.py
@@ -30,12 +30,10 @@ class ForwardMixin:
         center: bool | None = None,
         closed: str | None = None,
     ) -> tuple[np.ndarray, np.ndarray]:
-
         if closed is None:
             closed = "right"
 
         if self.forward:
-
             # this is only set with variable window indexer
             if self.index_array is not None:  # noqa
                 self.index_array = self.index_array[::-1]  # noqa
diff --git a/saqc/lib/tools.py b/saqc/lib/tools.py
index 5f3fdd7a0bc4e990f7c379a0a07103e26861b78c..41a3cd96172b0c25bd06eb706c5e1b7425a140ec 100644
--- a/saqc/lib/tools.py
+++ b/saqc/lib/tools.py
@@ -245,7 +245,6 @@ def estimateFrequency(
     max_freqs=10,
     bins=None,
 ):
-
     """
     Function to estimate the sampling rate of an index.
 
diff --git a/saqc/lib/types.py b/saqc/lib/types.py
index 0e84ff16272d96cd7e206c6287e303a4cf986e62..2c11b3c7768342192a431d0b7ce94027892b6ab2 100644
--- a/saqc/lib/types.py
+++ b/saqc/lib/types.py
@@ -43,7 +43,6 @@ class CurveFitter(Protocol):
 
 
 class GenericFunction(Protocol):
-
     __name__: str
     __globals__: Dict[str, Any]
 
diff --git a/tests/core/test_reader.py b/tests/core/test_reader.py
index 70a00d0164b14daea1014d952850a6702b446815..f6b55f31b240a396e662663acc7533c8e2724bfe 100644
--- a/tests/core/test_reader.py
+++ b/tests/core/test_reader.py
@@ -31,7 +31,6 @@ def getTestedVariables(flags: Flags, test: str):
 
 
 def test_variableRegex(data):
-
     header = f"varname;test"
     function = "flagDummy"
     tests = [
@@ -92,7 +91,6 @@ def test_configReaderLineNumbers():
 
 @pytest.mark.filterwarnings("ignore::RuntimeWarning")
 def test_configFile(data):
-
     # check that the reader accepts different whitespace patterns
 
     config = f"""
@@ -111,7 +109,6 @@ def test_configFile(data):
 
 
 def test_configChecks(data):
-
     var1, _, var3, *_ = data.columns
 
     @flagging()
@@ -134,7 +131,6 @@ def test_configChecks(data):
 
 
 def test_supportedArguments(data):
-
     # test if the following function arguments
     # are supported (i.e. parsing does not fail)
 
diff --git a/tests/core/test_translator.py b/tests/core/test_translator.py
index 9d4a6c706847f6de253d616354e1797271e82f9d..cfacdbd12135c363c6145e458b0694524878da00 100644
--- a/tests/core/test_translator.py
+++ b/tests/core/test_translator.py
@@ -32,7 +32,6 @@ def _genTranslators():
 
 
 def _genFlags(data: Dict[str, Union[Sequence, pd.Series]]) -> Flags:
-
     flags = Flags()
     for k, v in data.items():
         if not isinstance(v, pd.Series):
@@ -76,7 +75,6 @@ def test_backwardTranslationFail():
 
 
 def test_dmpTranslator():
-
     scheme = DmpScheme()
     # generate a bunch of dummy flags
     keys = np.array(tuple(scheme._backward.keys()) * 50)
@@ -144,7 +142,6 @@ def test_positionalTranslator():
 
 
 def test_positionalTranslatorIntegration():
-
     data = initData(3)
     col: str = data.columns[0]
 
@@ -164,7 +161,6 @@ def test_positionalTranslatorIntegration():
 
 
 def test_dmpTranslatorIntegration():
-
     data = initData(1)
     col = data.columns[0]
 
@@ -211,7 +207,6 @@ def test_dmpValidCombinations():
 
 
 def _buildupSaQCObjects():
-
     """
     return two evaluated saqc objects calling the same functions,
     whereas the flags from the evaluetion of the first objetc are
@@ -233,7 +228,6 @@ def _buildupSaQCObjects():
 
 
 def test_translationPreservesFlags():
-
     saqc1, saqc2 = _buildupSaQCObjects()
     flags1 = saqc1._flags
     flags2 = saqc2._flags
@@ -270,7 +264,6 @@ def test_multicallsPreserveHistory():
 
 
 def test_positionalMulitcallsPreserveState():
-
     saqc1, saqc2 = _buildupSaQCObjects()
 
     scheme = PositionalScheme()
diff --git a/tests/fixtures.py b/tests/fixtures.py
index f1c356590bd4f3a87c954ce123ae40eb546f24f2..ea11559296a8b59b4cfbd8d3ae18ab5cd8ccc010 100644
--- a/tests/fixtures.py
+++ b/tests/fixtures.py
@@ -69,6 +69,7 @@ def course_2(char_dict):
     one "anomalous" or "outlierish" value of magnitude "out_val" at position "periods/2"
     number of periods better be even!
     """
+
     # SINGLE_SPIKE
     def fix_funk(
         freq="10min",
@@ -149,7 +150,6 @@ def course_3(char_dict):
         crowd_size=5,
         crowd_spacing=1,
     ):
-
         t_index = pd.date_range(initial_index, freq=freq, periods=periods)
         data = np.linspace(initial_level, final_level, int(np.floor(len(t_index))))
         data = pd.Series(data=data, index=t_index)
diff --git a/tests/funcs/test_flagtools.py b/tests/funcs/test_flagtools.py
index f885c91e1a106d44ac69f2fe22784d4fcb6ff47d..6286a2145425ab1856f490704bc9ab0a6b2a1166 100644
--- a/tests/funcs/test_flagtools.py
+++ b/tests/funcs/test_flagtools.py
@@ -110,7 +110,6 @@ def test_propagateFlagsIrregularIndex(got, expected, kwargs):
     ],
 )
 def test_andGroup(left, right, expected):
-
     data = pd.DataFrame({"data": [1, 2, 3, 4]})
 
     base = SaQC(data=data)
@@ -130,7 +129,6 @@ def test_andGroup(left, right, expected):
     ],
 )
 def test_orGroup(left, right, expected):
-
     data = pd.DataFrame({"data": [1, 2, 3, 4]})
 
     base = SaQC(data=data)
@@ -150,7 +148,6 @@ def test_orGroup(left, right, expected):
     ],
 )
 def test__groupOperation(left, right, expected):
-
     data = pd.DataFrame(
         {"x": [0, 1, 2, 3], "y": [0, 11, 22, 33], "z": [0, 111, 222, 333]}
     )
diff --git a/tests/funcs/test_generic_api_functions.py b/tests/funcs/test_generic_api_functions.py
index 6f3c77691bec10bb084b3149cbb422a43d17fe91..dd1c7f4aea68e8cea28cd8fdd6172707bea1ab21 100644
--- a/tests/funcs/test_generic_api_functions.py
+++ b/tests/funcs/test_generic_api_functions.py
@@ -74,7 +74,6 @@ def test_writeTargetFlagGeneric(data, targets, func):
     ],
 )
 def test_overwriteFieldFlagGeneric(data, fields, func):
-
     flag = 12
 
     expected_meta = {
diff --git a/tests/funcs/test_generic_config_functions.py b/tests/funcs/test_generic_config_functions.py
index 1106cb21718f19e131d8e72b7c6e653d3cb371d1..3f61e775647c705512cb21ed572bfcf2c9237a9c 100644
--- a/tests/funcs/test_generic_config_functions.py
+++ b/tests/funcs/test_generic_config_functions.py
@@ -95,7 +95,6 @@ def test_comparisonOperators(data):
 
 
 def test_arithmeticOperators(data):
-
     var1, *_ = data.columns
 
     data = data[var1]
@@ -150,7 +149,6 @@ def test_bitOps(data):
 
 
 def test_variableAssignments(data):
-
     config = f"""
     varname ; test
     dummy1  ; processGeneric(field=["var1", "var2"], func=x + y)
@@ -218,7 +216,6 @@ def test_flagTargetExistingFail(data_diff):
 
 @pytest.mark.slow
 def test_callableArgumentsUnary(data):
-
     window = 5
 
     @register(mask=["field"], demask=["field"], squeeze=["field"])
@@ -239,7 +236,7 @@ def test_callableArgumentsUnary(data):
         ("std(exp(x))", lambda x: np.std(np.exp(x))),
     ]
 
-    for (name, func) in tests:
+    for name, func in tests:
         fobj = writeIO(config.format(name))
         result_config = fromConfig(fobj, data).data
         result_api = SaQC(data).testFuncUnary(var, func=func).data
@@ -266,7 +263,7 @@ def test_callableArgumentsBinary(data):
         ("y - (x * 2)", lambda y, x: y - (x * 2)),
     ]
 
-    for (name, func) in tests:
+    for name, func in tests:
         fobj = writeIO(config.format(name))
         result_config = fromConfig(fobj, data).data
         result_api = SaQC(data).testFuncBinary(var1, func=func).data
@@ -276,7 +273,6 @@ def test_callableArgumentsBinary(data):
 
 
 def test_isflagged(data):
-
     var1, var2, *_ = data.columns
     flags = initFlagsLike(data)
     flags[data[var1].index[::2], var1] = BAD
diff --git a/tests/fuzzy/lib.py b/tests/fuzzy/lib.py
index b8f4435e32341f43efde4cc81fc910a9e4d341b4..6210389fb18e13333da604ea6f82c207af2ac672 100644
--- a/tests/fuzzy/lib.py
+++ b/tests/fuzzy/lib.py
@@ -134,7 +134,6 @@ def functionCalls(draw, module: str = None):
 
 @contextmanager
 def applyStrategies(strategies: dict):
-
     for dtype, strategy in strategies.items():
         register_type_strategy(dtype, strategy)