diff --git a/environment.yml b/environment.yml index 3206e33..eda7bdb 100644 --- a/environment.yml +++ b/environment.yml @@ -18,6 +18,9 @@ dependencies: - pyresample - matplotlib>=1.2.0 - pip + # optional, for documentation and testing: + # - nb_conda + # ---------------------------------------- - pip: - pynetcf>=0.4.0 - pygeogrids diff --git a/tests/test_validation_framework/__init__.py b/tests/test_validation_framework/__init__.py index e69de29..40a96af 100644 --- a/tests/test_validation_framework/__init__.py +++ b/tests/test_validation_framework/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/test_validation_framework/test_metric_calculators.py b/tests/test_validation_framework/test_metric_calculators.py index f2bc349..3d7164b 100644 --- a/tests/test_validation_framework/test_metric_calculators.py +++ b/tests/test_validation_framework/test_metric_calculators.py @@ -798,68 +798,6 @@ def make_testdata_random(): return make_datasets(df), expected -@pytest.mark.parametrize( - "testdata_generator", [make_testdata_known_results, make_testdata_random] -) -@pytest.mark.parametrize("metrics_adapter", [MonthsMetricsAdapter]) # todo -def test_PairwiseIntercomparisonMetrics_adapted(testdata_generator, - metrics_adapter): - datasets, expected = testdata_generator() - metrics_calculator = PairwiseIntercomparisonMetrics( - calc_spearman=True, analytical_cis=False - ) - metrics_calculator = metrics_adapter(metrics_calculator) - - val = Validation( - datasets, - "reference_name", - scaling="mean_std", # doesn't work with the constant test data - temporal_matcher=make_combined_temporal_matcher(pd.Timedelta(6, "h")), - metrics_calculators={(4, 2): (metrics_calculator.calc_metrics)}, - ) - results_pw = val.calc( - [0], [1], [1], rename_cols=False, only_with_reference=True - ) - - expected_metrics = [ - "R", - "p_R", - "BIAS", - "RMSD", - "mse", - "RSS", - "mse_corr", - "mse_bias", - "urmsd", - "mse_var", - "n_obs", - "gpi", - "lat", - "lon", - "rho", - "p_rho", - "tau", - "p_tau", - ] - - seasons = ["ALL", "DJF", "MAM", "JJA", "SON"] - - metrics = [] - for seas in seasons: - metrics += list(map(lambda x: (seas, x), expected_metrics)) - - for key in results_pw: - assert isinstance(key, tuple) - assert len(key) == 2 - assert all(map(lambda x: isinstance(x, tuple), key)) - assert isinstance(results_pw[key], dict) - res_metrics = list(results_pw[key].keys()) - assert all([v in res_metrics for v in ["lon", "lat", "gpi"]]) - for m in metrics: - if m in expected[key]: - assert_equal(results_pw[key][m], expected[key][m]) - - @pytest.mark.parametrize( "testdata_generator", [make_testdata_known_results, make_testdata_random] ) @@ -1341,14 +1279,3 @@ def test_TripleCollocationMetrics_failure(): # results = val.calc(1, 1, 1) # assert 0 - -if __name__ == '__main__': - test_PairwiseIntercomparisonMetrics(testdata_known_results) - - # - # from pytesmo.df_metrics import mse_var - # data, results = testdata_known_results() - # ref = data['reference_name']['class'].read(0) - # can = data['plus2_name']['class'].read(0) - # mv = mse_var(pd.DataFrame(data={'one': ref.iloc[:, 0].values, - # 'two': can.iloc[:, 0].values}))