diff --git a/test/core/test_core.py b/test/core/test_core.py index c8d1d2d06e41eea8b034f9cdfcb674c6df3416d4..a7d34c8b622e31ae401063306fdf012cdbf1f2ca 100644 --- a/test/core/test_core.py +++ b/test/core/test_core.py @@ -114,12 +114,11 @@ def test_missingConfig(data, flagger, flags): @pytest.mark.parametrize("flagger", TESTFLAGGER) -def test_missingVariable(flagger): +def test_missingVariable(data, flagger): """ Test if variables available in the config but not dataset are handled correctly, i.e. are ignored """ - data = initData(1) var, *_ = data.columns metadict = [ @@ -131,6 +130,30 @@ def test_missingVariable(flagger): runner(metafobj, flagger, data) +@pytest.mark.parametrize("flagger", TESTFLAGGER) +def test_errorHandling(data, flagger): + + @register("raisingFunc") + def _raisingFunc(data, fielf, flagger, **kwargs): + raise TypeError + + var1, *_ = data.columns + + metadict = [ + {F.VARNAME: var1, F.TESTS: "raisingFunc()"}, + ] + + tests = [ + "ignore", + "warn" + ] + + for policy in tests: + # NOTE: should not fail, that's all we are testing here + metafobj, _ = initMetaDict(metadict, data) + runner(metafobj, flagger, data, error_policy=policy) + + @pytest.mark.parametrize("flagger", TESTFLAGGER) def test_duplicatedVariable(flagger): data = initData(1) @@ -152,7 +175,6 @@ def test_duplicatedVariable(flagger): assert (pflags.columns == [var1]).all() - @pytest.mark.parametrize("flagger", TESTFLAGGER) def test_assignVariable(flagger): """