Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • berntm/saqc
  • rdm-software/saqc
  • schueler/saqc
3 results
Show changes
......@@ -169,7 +169,6 @@ def _fitPolynomial(
min_periods: int = 0,
**kwargs,
) -> Tuple[DictOfSeries, Flags]:
# TODO: some (rather large) parts are functional similar to saqc.funcs.rolling.roll
if data[field].empty:
return data, flags
......
......@@ -144,7 +144,6 @@ class DriftMixin:
segments = data_to_flag.groupby(pd.Grouper(freq=freq))
for segment in segments:
if segment[1].shape[0] <= 1:
continue
......@@ -229,7 +228,6 @@ class DriftMixin:
segments = data_to_flag.groupby(pd.Grouper(freq=freq))
for segment in segments:
if segment[1].shape[0] <= 1:
continue
......
......@@ -63,7 +63,6 @@ def _execGeneric(
func: GenericFunction,
dfilter: float = FILTER_ALL,
) -> DictOfSeries:
globs = {
"isflagged": lambda data, label=None: _isflagged(
_flagSelect(data.name, flags, label), thresh=dfilter
......@@ -171,7 +170,6 @@ class GenericMixin:
# update data & flags
for i, col in enumerate(targets):
datacol = result.iloc[:, i]
self._data[col] = datacol
......@@ -291,7 +289,6 @@ class GenericMixin:
# update flags & data
for i, col in enumerate(targets):
maskcol = result.iloc[:, i]
# make sure the column exists
......
......@@ -149,7 +149,6 @@ class OutliersMixin:
# calculate flags for every partition
for _, partition in partitions:
if partition.empty | (partition.shape[0] < min_periods):
continue
......@@ -1042,7 +1041,6 @@ class OutliersMixin:
df = self._data[fields].loc[self._data[fields].index_of("shared")].to_df()
if isinstance(method, str):
if method == "modZscore":
MAD_series = df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
diff_scores = (
......@@ -1062,7 +1060,6 @@ class OutliersMixin:
raise ValueError(method)
else:
try:
stat = getattr(df, method.__name__)(axis=1)
except AttributeError:
......@@ -1248,7 +1245,6 @@ def _evalStrayLabels(
for var in target:
for index in enumerate(to_flag_frame.index):
index_slice = slice(
index[1] - pd.Timedelta(reduction_range),
index[1] + pd.Timedelta(reduction_range),
......
......@@ -88,7 +88,6 @@ def calculateDistanceByDTW(
class PatternMixin:
# todo should we mask `reference` even if the func fail if reference has NaNs
@flagging()
def flagPatternByDTW(
......
......@@ -30,12 +30,10 @@ class ForwardMixin:
center: bool | None = None,
closed: str | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if closed is None:
closed = "right"
if self.forward:
# this is only set with variable window indexer
if self.index_array is not None: # noqa
self.index_array = self.index_array[::-1] # noqa
......
......@@ -245,7 +245,6 @@ def estimateFrequency(
max_freqs=10,
bins=None,
):
"""
Function to estimate the sampling rate of an index.
......
......@@ -43,7 +43,6 @@ class CurveFitter(Protocol):
class GenericFunction(Protocol):
__name__: str
__globals__: Dict[str, Any]
......
......@@ -31,7 +31,6 @@ def getTestedVariables(flags: Flags, test: str):
def test_variableRegex(data):
header = f"varname;test"
function = "flagDummy"
tests = [
......@@ -92,7 +91,6 @@ def test_configReaderLineNumbers():
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_configFile(data):
# check that the reader accepts different whitespace patterns
config = f"""
......@@ -111,7 +109,6 @@ def test_configFile(data):
def test_configChecks(data):
var1, _, var3, *_ = data.columns
@flagging()
......@@ -134,7 +131,6 @@ def test_configChecks(data):
def test_supportedArguments(data):
# test if the following function arguments
# are supported (i.e. parsing does not fail)
......
......@@ -32,7 +32,6 @@ def _genTranslators():
def _genFlags(data: Dict[str, Union[Sequence, pd.Series]]) -> Flags:
flags = Flags()
for k, v in data.items():
if not isinstance(v, pd.Series):
......@@ -76,7 +75,6 @@ def test_backwardTranslationFail():
def test_dmpTranslator():
scheme = DmpScheme()
# generate a bunch of dummy flags
keys = np.array(tuple(scheme._backward.keys()) * 50)
......@@ -144,7 +142,6 @@ def test_positionalTranslator():
def test_positionalTranslatorIntegration():
data = initData(3)
col: str = data.columns[0]
......@@ -164,7 +161,6 @@ def test_positionalTranslatorIntegration():
def test_dmpTranslatorIntegration():
data = initData(1)
col = data.columns[0]
......@@ -211,7 +207,6 @@ def test_dmpValidCombinations():
def _buildupSaQCObjects():
"""
return two evaluated saqc objects calling the same functions,
whereas the flags from the evaluetion of the first objetc are
......@@ -233,7 +228,6 @@ def _buildupSaQCObjects():
def test_translationPreservesFlags():
saqc1, saqc2 = _buildupSaQCObjects()
flags1 = saqc1._flags
flags2 = saqc2._flags
......@@ -270,7 +264,6 @@ def test_multicallsPreserveHistory():
def test_positionalMulitcallsPreserveState():
saqc1, saqc2 = _buildupSaQCObjects()
scheme = PositionalScheme()
......
......@@ -69,6 +69,7 @@ def course_2(char_dict):
one "anomalous" or "outlierish" value of magnitude "out_val" at position "periods/2"
number of periods better be even!
"""
# SINGLE_SPIKE
def fix_funk(
freq="10min",
......@@ -149,7 +150,6 @@ def course_3(char_dict):
crowd_size=5,
crowd_spacing=1,
):
t_index = pd.date_range(initial_index, freq=freq, periods=periods)
data = np.linspace(initial_level, final_level, int(np.floor(len(t_index))))
data = pd.Series(data=data, index=t_index)
......
......@@ -110,7 +110,6 @@ def test_propagateFlagsIrregularIndex(got, expected, kwargs):
],
)
def test_andGroup(left, right, expected):
data = pd.DataFrame({"data": [1, 2, 3, 4]})
base = SaQC(data=data)
......@@ -130,7 +129,6 @@ def test_andGroup(left, right, expected):
],
)
def test_orGroup(left, right, expected):
data = pd.DataFrame({"data": [1, 2, 3, 4]})
base = SaQC(data=data)
......@@ -150,7 +148,6 @@ def test_orGroup(left, right, expected):
],
)
def test__groupOperation(left, right, expected):
data = pd.DataFrame(
{"x": [0, 1, 2, 3], "y": [0, 11, 22, 33], "z": [0, 111, 222, 333]}
)
......
......@@ -74,7 +74,6 @@ def test_writeTargetFlagGeneric(data, targets, func):
],
)
def test_overwriteFieldFlagGeneric(data, fields, func):
flag = 12
expected_meta = {
......
......@@ -95,7 +95,6 @@ def test_comparisonOperators(data):
def test_arithmeticOperators(data):
var1, *_ = data.columns
data = data[var1]
......@@ -150,7 +149,6 @@ def test_bitOps(data):
def test_variableAssignments(data):
config = f"""
varname ; test
dummy1 ; processGeneric(field=["var1", "var2"], func=x + y)
......@@ -218,7 +216,6 @@ def test_flagTargetExistingFail(data_diff):
@pytest.mark.slow
def test_callableArgumentsUnary(data):
window = 5
@register(mask=["field"], demask=["field"], squeeze=["field"])
......@@ -239,7 +236,7 @@ def test_callableArgumentsUnary(data):
("std(exp(x))", lambda x: np.std(np.exp(x))),
]
for (name, func) in tests:
for name, func in tests:
fobj = writeIO(config.format(name))
result_config = fromConfig(fobj, data).data
result_api = SaQC(data).testFuncUnary(var, func=func).data
......@@ -266,7 +263,7 @@ def test_callableArgumentsBinary(data):
("y - (x * 2)", lambda y, x: y - (x * 2)),
]
for (name, func) in tests:
for name, func in tests:
fobj = writeIO(config.format(name))
result_config = fromConfig(fobj, data).data
result_api = SaQC(data).testFuncBinary(var1, func=func).data
......@@ -276,7 +273,6 @@ def test_callableArgumentsBinary(data):
def test_isflagged(data):
var1, var2, *_ = data.columns
flags = initFlagsLike(data)
flags[data[var1].index[::2], var1] = BAD
......
......@@ -134,7 +134,6 @@ def functionCalls(draw, module: str = None):
@contextmanager
def applyStrategies(strategies: dict):
for dtype, strategy in strategies.items():
register_type_strategy(dtype, strategy)
......
......@@ -2,9 +2,9 @@
#
# SPDX-License-Identifier: GPL-3.0-or-later
beautifulsoup4==4.11.1
hypothesis==6.61.0
beautifulsoup4==4.11.2
hypothesis==6.65.2
Markdown==3.3.7
pytest==7.1.3
pytest==7.2.1
pytest-lazy-fixture==0.6.3
requests==2.27.1
requests==2.28.2