@@ -71,12 +71,17 @@ def test_rvdss_repiratory_detections(self, mock_sql):
7171 TEST_DIR = Path (__file__ ).parent .parent .parent .parent
7272 detection_data = pd .read_csv (str (TEST_DIR ) + "/testdata/acquisition/rvdss/RVD_CurrentWeekTable_Formatted.csv" )
7373 detection_data ['time_type' ] = "week"
74- detection_data = detection_data .replace ({np .nan : None })
75- #detection_data=detection_data.replace({float('nan'): None})
7674
77- pdb .set_trace ()
75+ # get the index of the subset of data we want to use
76+ subset_index = detection_data [(detection_data ['geo_value' ].isin (['nl' , 'nb' ])) &
77+ (detection_data ['time_value' ].isin ([20240831 , 20240907 ]))].index
78+
79+
80+ # change issue so the data has more than one
81+ detection_data .loc [subset_index ,"issue" ] = 20250227
82+
7883 # take a small subset just for testing insertion
79- detection_subset = detection_data [( detection_data [ 'geo_value' ]. isin ([ 'nl' , 'nb' ])) & ( detection_data [ 'time_value' ]. isin ([ 20240831 , 20240907 ])) ]
84+ detection_subset = detection_data . loc [ subset_index ]
8085
8186 # get the expected response when calling the API
8287 # the dataframe needs to add the missing columns and replace nan with None
@@ -90,7 +95,7 @@ def test_rvdss_repiratory_detections(self, mock_sql):
9095 "message" : "success" ,
9196 }
9297
93- # get the rest of the data not in the subset to test more calling options
98+ # get another subset of the data not in the subset to test more calling options
9499 detection_subset2 = detection_data [(detection_data ['geo_value' ].isin (['nu' , 'nt' ])) & (detection_data ['time_value' ].isin ([20240831 , 20240907 ])) ]
95100
96101 df2 = detection_subset2 .reindex (rvdss_cols ,axis = 1 )
@@ -102,12 +107,20 @@ def test_rvdss_repiratory_detections(self, mock_sql):
102107 "message" : "success" ,
103108 }
104109
105- # after two aquisitions
106- df_full = pd .concat ([detection_subset , detection_subset2 ], ignore_index = True ).reindex (rvdss_cols ,axis = 1 )
107- df_full = df_full .replace ({np .nan : None }).sort_values (by = ["epiweek" ,"geo_value" ])
108- df_full = df_full .to_dict (orient = "records" )
110+ # get another subset of the data for a single geo_value with multiple issues
111+ subset_index2 = detection_data [(detection_data ['geo_value' ].isin (['ouest du québec' ])) &
112+ (detection_data ['time_value' ].isin ([20240831 , 20240907 ]))].index
109113
110- expected_response_full = {"epidata" : df_full ,
114+ detection_data .loc [subset_index2 ,"issue" ] = [20250220 ,20250227 ]
115+ detection_data .loc [subset_index2 ,"epiweek" ] = [202435 ,202435 ]
116+ detection_data .loc [subset_index2 ,"time_value" ] = [20240831 ,20240831 ]
117+
118+ detection_subset3 = detection_data .loc [subset_index2 ]
119+ df3 = detection_subset3 .reindex (rvdss_cols ,axis = 1 )
120+ df3 = df3 .replace ({np .nan : None }).sort_values (by = ["epiweek" ,"geo_value" ])
121+ df3 = df3 .to_dict (orient = "records" )
122+
123+ expected_response3 = {"epidata" : df3 ,
111124 "result" : 1 ,
112125 "message" : "success" ,
113126 }
@@ -153,7 +166,7 @@ def test_rvdss_repiratory_detections(self, mock_sql):
153166 with self .assertRaises (mysql .connector .errors .IntegrityError ):
154167 update (detection_subset , self .logger )
155168
156- # TODO: test with exact column order
169+ # Request with exact column order
157170 with self .subTest (name = 'exact column order' ):
158171 rvdss_cols_subset = [col for col in detection_subset2 .columns if col in rvdss_cols ]
159172 ordered_cols = [col for col in rvdss_cols if col in rvdss_cols_subset ]
@@ -163,9 +176,7 @@ def test_rvdss_repiratory_detections(self, mock_sql):
163176 connection_mock .commit = self .cnx .commit
164177 mock_sql .return_value = connection_mock
165178
166- pdb .set_trace ()
167179 update (ordered_df , self .logger )
168- pdb .set_trace ()
169180
170181 response = Epidata .rvdss (geo_type = 'province' ,
171182 time_values = [202435 , 202436 ],
@@ -174,14 +185,37 @@ def test_rvdss_repiratory_detections(self, mock_sql):
174185 self .assertEqual (response ,expected_response2 )
175186
176187
177- # TODO: check requesting by issue
178- # with self.subTest(name='issue request'):
179- # response = Epidata.rvdss(geo_type='province',
180- # time_values= [202435, 202436],
181- # geo_value = ['nl','nb'],
182- # issues = [])
188+ # request by issue
189+ with self .subTest (name = 'issue request' ):
190+ response = Epidata .rvdss (geo_type = 'province' ,
191+ time_values = [202435 , 202436 ],
192+ geo_value = ['nl' ,'nb' ],
193+ issues = 20250227 )
194+
195+ self .assertEqual (response ,expected_response )
183196
184197
185- # # TODO: check requesting individual lists
186- # with self.subTest(name='duplicate aquisition'):
198+ # check requesting lists vs single values
199+ with self .subTest (name = 'duplicate aquisition' ):
200+ # * with geo_value, single geo_type, time_value, issue
201+ connection_mock .cursor .return_value = self .cnx .cursor ()
202+ connection_mock .commit = self .cnx .commit
203+ mock_sql .return_value = connection_mock
204+
205+ update (detection_subset3 , self .logger )
206+
207+ response = Epidata .rvdss (geo_type = 'province' ,
208+ time_values = [202435 , 202436 ],
209+ geo_value = "*" ,
210+ issues = 20250227 )
211+
212+ response2 = Epidata .rvdss (geo_type = 'lab' ,
213+ time_values = 202435 ,
214+ geo_value = 'ouest du québec' ,
215+ issues = [20250220 ,20250227 ])
216+
217+ self .assertEqual (response ,expected_response )
218+ self .assertEqual (response2 ,expected_response3 )
219+
220+
187221
0 commit comments