@@ -46,11 +46,11 @@ let's decouple the data loading into a separate function.
4646>> The new function ` load_catchment_data() ` that reads all the data into the format needed
4747>> for the analysis should look something like:
4848>> ``` python
49- >> def load_inflammation_data (dir_path ):
49+ >> def load_catchment_data (dir_path ):
5050>> data_file_paths = glob.glob(os.path.join(dir_path, ' rain_data_2015*.csv' ))
5151>> if len (data_file_paths) == 0 :
5252>> raise ValueError (' No CSV files found in the data directory' )
53- >> data = map (models.load_csv , data_file_paths)
53+ >> data = map (models.read_variable_from_csv , data_file_paths)
5454>> return list (data)
5555>> ```
5656>> This function can now be used in the analysis as follows:
@@ -219,7 +219,7 @@ In addition, implementation of the method `get_area()` is hidden too (abstractio
219219>> ...
220220>> def test_compute_data ():
221221>> from catchment.compute_data import analyse_data, CSVDataSource
222- >> path = Path.cwd() / " ../ data"
222+ >> path = Path.cwd() / " data"
223223>> data_source = CSVDataSource(path)
224224>> result = analyse_data(data_source)
225225>> expected_output = [ [0 . , 0.18801829 ],
@@ -353,7 +353,7 @@ data sources with no extra work.
353353>> data_file_paths = glob.glob(os.path.join(self .dir_path, ' rain_data_2015*.json' ))
354354>> if len (data_file_paths) == 0 :
355355>> raise ValueError (' No JSON files found in the data directory' )
356- >> data = map (models.load_json , data_file_paths)
356+ >> data = map (models.read_variable_from_json , data_file_paths)
357357>> return list (data)
358358>> ```
359359>> Additionally, in the controller will need to select the appropriate DataSource to
0 commit comments