-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_ipsos_raw_input.py
More file actions
172 lines (139 loc) · 5.5 KB
/
test_ipsos_raw_input.py
File metadata and controls
172 lines (139 loc) · 5.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
"""Data tests for SDD raw input from IPSOS
"""
import json
from pathlib import Path
from typing import Dict, List
import numpy as np
import pandas as pd
import pytest
from sdd_code.utilities.data_import import import_sav_values
from sdd_code.utilities.metadata import convert_sdd_dtypes
from sdd_code.utilities.parameters import DROP_COLUMNS, PUPIL_DATA_PATH, META_DIR
# Set any test constants
NUM_ROWS = 13664
# sdd_file is set via command line arguments, defaults to None
# session scoping to cache dataframes, enable fast testing
@pytest.fixture(scope="session")
def sdd_all(sdd_file: str) -> pd.DataFrame:
"""Get main sdd output table and output to test"""
if sdd_file:
data = convert_sdd_dtypes(import_sav_values(sdd_file, DROP_COLUMNS))
else:
file_path = PUPIL_DATA_PATH
data = convert_sdd_dtypes(import_sav_values(file_path, DROP_COLUMNS))
return data
# sdd_metadata is set via command line
@pytest.fixture(scope="session")
def sdd_dtypes(sdd_metadata: str) -> Dict[str, str]:
"""Get expected sdd attributes from metadata JSON"""
if sdd_metadata:
sdd_meta_loc = Path(sdd_metadata)
else:
sdd_meta_loc = META_DIR
with open(sdd_meta_loc / "sdd_dtype_map.json", "r") as f:
dtype_map = json.load(f)
return dtype_map
@pytest.fixture(scope="session")
def sdd_allowed_values(sdd_metadata: str) -> Dict[str, List[float]]:
"""Get allowed values, dict map of col name to list"""
if sdd_metadata:
sdd_meta_loc = Path(sdd_metadata)
else:
sdd_meta_loc = META_DIR
with open(sdd_meta_loc / "sdd_allowed_values_map.json", "r") as f:
value_map = json.load(f)
return value_map
@pytest.fixture(scope="session")
def sdd_columns(sdd_metadata: str) -> Dict[str, List[str]]:
"""Get all different column lists that could want to be tested
This uses dtypes from metadata to separate columns, logic may change
as metadata structure is determined
"""
if sdd_metadata:
sdd_meta_loc = Path(sdd_metadata)
else:
sdd_meta_loc = META_DIR
with open(sdd_meta_loc / "sdd_column_types.json", "r") as f:
cols = json.load(f)
return cols
def test_input_attributes(sdd_all: pd.DataFrame, sdd_dtypes: Dict[str, str]):
"""Does table have basic attributes as expected?
Tests: No. rows, dtypes, column names
"""
results = sdd_all
exp_cols = sdd_dtypes.keys()
# Convert string repr. of types to numpy.dtype for comparison
exp_dtypes = {col: np.dtype(dtype) for col, dtype in sdd_dtypes.items()}
exp_rows = NUM_ROWS # Know exactly how many rows we should have
assert list(exp_cols) == list(results.columns), "Expected columns not found"
assert exp_rows == results.shape[0], "Not expected number of rows"
assert exp_dtypes == results.dtypes.to_dict(), "Not expected data types"
# Currently cant pass fixtures as the list for parameterise
# TODO: Investigate pytest-lazy plugins plus similar
@pytest.mark.slow
# Parametrize loops over all columns
@pytest.mark.parametrize("column_index", list(range(0, 700)))
def test_discrete_values(
sdd_all: pd.DataFrame,
sdd_columns: Dict[str, List[str]],
sdd_allowed_values: Dict[str, List[float]],
column_index: int,
):
"""Do all discrete values fall within expected sets"""
results = sdd_all
# Use index list to get column name, can't pass exact list into
# parametrize so use try/except
try:
column = sdd_columns["discrete"][column_index]
except IndexError:
return
# Discrete values are integer response codes that should all be within
# the allowed list in metadata
exp_values = sdd_allowed_values[column]
values_in_list = results[column].isin(exp_values)
assert all(
values_in_list
), f"Unexpected value in {column}: {dict(results.loc[~values_in_list, column])}"
@pytest.mark.slow
@pytest.mark.parametrize("column_index", list(range(0, 700)))
def test_continuous_values(
sdd_all: pd.DataFrame,
sdd_columns: Dict[str, List[str]],
sdd_allowed_values: Dict[str, List[float]],
column_index: int,
):
"""Do all continuous col values fall within expected range"""
results = sdd_all
try:
column = sdd_columns["continuous"][column_index]
except IndexError:
return
# Check if column has a min/max mapping to use as a range
values = sdd_allowed_values[column]
min_max = [val for val in values if val >= 0]
if min_max:
max_val = max(min_max)
min_val = min(min_max) if min(min_max) != max_val else 0
else:
min_val = 0
max_val = 500
# Continuous values are float responses (i.e. units drank) that should be
# in this range, or are coded as negatives missiing/unknown/other
values_in_range = results[column].between(min_val, max_val) | results[column].isin(
[-9, -8, -1]
)
assert all(
values_in_range
), f"Unexpected values in {column}, {dict(results.loc[~values_in_range, column])}"
def test_pupilwt(sdd_all: pd.DataFrame):
"""Do all weights lie in expected ranges and sum to appropriate values"""
assert (
sdd_all["pupilwt"].between(0.01, 10).all()
), "Pupil weighting variable is not in range"
assert sdd_all["pupilwt"].sum() == pytest.approx(
NUM_ROWS
), "Pupil weighting does not sum to pupil count"
def test_null_vals(sdd_all: pd.DataFrame):
assert any(sdd_all.isnull()), "Null values found in dataframe"
def test_unique_keys(sdd_all: pd.DataFrame):
assert sdd_all["archsn"].is_unique, "ID column is not unique"