-
Notifications
You must be signed in to change notification settings - Fork 0
Add IFS uncompressed data #26
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
172 changes: 172 additions & 0 deletions
172
src/climatebenchpress/data_loader/datasets/ifs_uncompressed.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,172 @@ | ||
| __all__ = ["IFSUncompressedDataset"] | ||
|
|
||
| import argparse | ||
| from pathlib import Path | ||
|
|
||
| import earthkit.regrid | ||
| import numpy as np | ||
| import requests | ||
| import xarray as xr | ||
|
|
||
| from .. import ( | ||
| monitor, | ||
| open_downloaded_canonicalized_dataset, | ||
| open_downloaded_tiny_canonicalized_dataset, | ||
| ) | ||
| from .abc import Dataset | ||
|
|
||
| BASE_URL = "https://object-store.os-api.cci1.ecmwf.int/esiwacebucket" | ||
|
|
||
|
|
||
| class IFSUncompressedDataset(Dataset): | ||
| """Dataset for IFS uncompressed data. | ||
|
|
||
| Contains data from the [hplp](https://apps.ecmwf.int/ifs-experiments/rd/hplp/) | ||
| experiment from the Integrated Forecasting System (IFS) model. Crucially, | ||
| this dataset contains uncompressed 64-bit floating point data. | ||
| """ | ||
|
|
||
| name = "ifs-uncompressed" | ||
|
|
||
| @staticmethod | ||
| def download(download_path: Path, progress: bool = True): | ||
| ds = load_hplp_data(leveltype="sfc", gridtype="reduced_gg") | ||
| ds = ds[["msl", "10u", "10v"]] | ||
| ds_regridded = regrid_to_regular( | ||
| ds, | ||
| in_grid={"grid": "O400"}, | ||
| out_grid={"grid": [0.25, 0.25]}, | ||
| ) | ||
| downloadfile = download_path / "ifs_uncompressed.zarr" | ||
| with monitor.progress_bar(progress): | ||
| ds_regridded.to_zarr( | ||
| downloadfile, mode="w", encoding=dict(), compute=False | ||
| ).compute() | ||
|
|
||
| @staticmethod | ||
| def open(download_path: Path) -> xr.Dataset: | ||
| ds = xr.open_dataset(download_path / "ifs_uncompressed.zarr") | ||
|
|
||
| # Needed to make the dataset CF-compliant. | ||
| ds.longitude.attrs["axis"] = "X" | ||
| ds.latitude.attrs["axis"] = "Y" | ||
| ds.time.attrs["standard_name"] = "time" | ||
| return ds | ||
|
|
||
|
|
||
| def load_hplp_data(leveltype=None, gridtype=None, step=None, remap=False): | ||
| """Function taken from: https://github.com/climet-eu/compression-lab-notebooks/blob/d297ee98be916359fde16ab36f0f9e0681662df8/04-example-datasets/01-hplp.ipynb.""" | ||
| if leveltype not in {"pl", "ml", "sfc", "wave"}: | ||
| raise ValueError( | ||
| f"Invalid leveltype: '{leveltype}'. Available leveltypes: pl, ml, sfc, wave" | ||
| ) | ||
|
|
||
| if leveltype in {"ml", "pl"} and not gridtype: | ||
| raise ValueError( | ||
| f"Gridtype is required for leveltype '{leveltype}'. Available: reduced_gg, sh" | ||
| ) | ||
|
|
||
| if remap and gridtype != "sh": | ||
| raise ValueError("Only 'sh' fields can be remapped.") | ||
|
|
||
| if leveltype == "wave" and gridtype != "reduced_ll": | ||
| print("Warning: Wave model data are stored on a reduced_ll grid.") | ||
|
|
||
| if leveltype == "sfc" and gridtype != "reduced_gg": | ||
| print("Warning: Surface level data are stored on a reduced_gg grid.") | ||
|
|
||
| if step and not (leveltype == "ml" and gridtype == "reduced_gg"): | ||
| print( | ||
| "Warning: Specifying 'step' is unnecessary for this configuration and will be ignored." | ||
| ) | ||
|
|
||
| if leveltype in {"sfc", "wave"}: | ||
| url = f"{BASE_URL}/hplp/hplp_{leveltype}.grib" | ||
| elif leveltype == "ml" and gridtype == "reduced_gg": | ||
| if step is None: | ||
| raise ValueError( | ||
| "The ml reduced_gg data are split into two parts:\n" | ||
| " - Steps: 0, 12, 24, 36, 48, 60, 72, 84, 96, 108, 120 (2020-07-21T00:00:00 to 2020-07-26T00:00:00)\n" | ||
| " - Steps: 132, 144, 156, 168, 180, 192, 204, 216, 228, 240 (2020-07-26T12:00:00 to 2020-07-31T00:00:00)\n" | ||
| "Specify a step smaller than 120 for accessing the first part, \n" | ||
| "and a step greater or equal to 132 for accessing the second part." | ||
| ) | ||
| if step <= 120: | ||
| url = f"{BASE_URL}/hplp/hplp_{leveltype}_{gridtype}_levels_0_120.grib" | ||
| else: | ||
| url = f"{BASE_URL}/hplp/hplp_{leveltype}_{gridtype}_levels_132_240.grib" | ||
| else: | ||
| url = f"{BASE_URL}/hplp/hplp_{leveltype}_{gridtype}" + ( | ||
| "_O400.grib" if remap else ".grib" | ||
| ) | ||
| ref = requests.get(f"{url}.ref").json() | ||
|
|
||
| print(f"Loading dataset {url}") | ||
|
|
||
| return xr.open_dataset( | ||
| "reference://", | ||
| engine="zarr", | ||
| backend_kwargs=dict(storage_options=dict(fo=ref, asynchronous=False)), | ||
| consolidated=False, | ||
| ) | ||
|
|
||
|
|
||
| def regrid_to_regular(ds, in_grid, out_grid): | ||
| """Regrid dataset to a regular lat-lon grid. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| ds : xr.Dataset | ||
| The input dataset to regrid | ||
| in_grid : dict | ||
| The input grid specification for earthkit.regrid.interpolate | ||
| out_grid : dict | ||
| The output grid specification for earthkit.regrid.interpolate. Is assumed to be | ||
| a regular lat-lon grid with equal spacing in latitude and longitude, e.g. {"grid": [0.25, 0.25]}. | ||
| """ | ||
| out_data = {var: [] for var in ds.data_vars} | ||
| for var in ds.data_vars: | ||
| for time in ds.time: | ||
| r = earthkit.regrid.interpolate( | ||
| ds[var].sel(time=time).values, | ||
| in_grid=in_grid, | ||
| out_grid=out_grid, | ||
| method="linear", | ||
| ) | ||
| out_data[var].append(r) | ||
|
|
||
| dx = out_grid["grid"][0] | ||
| assert ( | ||
| out_grid["grid"][0] == out_grid["grid"][1] | ||
| ), "Only grids with equal latitude and longitude spacing are supported." | ||
| lats = np.linspace(90, -90, int(180 / dx) + 1) | ||
| lons = np.linspace(0, 360 - dx, int(360 / dx)) | ||
| coords = { | ||
| "time": ds.time, | ||
| "latitude": lats, | ||
| "longitude": lons, | ||
| } | ||
| out_ds = xr.Dataset( | ||
| { | ||
| var: (("time", "latitude", "longitude"), out_data[var]) | ||
| for var in ds.data_vars | ||
| }, | ||
| coords=coords, | ||
| ) | ||
| return out_ds | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| parser = argparse.ArgumentParser() | ||
| parser.add_argument("--basepath", type=Path, default=Path()) | ||
| args = parser.parse_args() | ||
|
|
||
| ds = open_downloaded_canonicalized_dataset( | ||
| IFSUncompressedDataset, basepath=args.basepath | ||
| ) | ||
| open_downloaded_tiny_canonicalized_dataset( | ||
| IFSUncompressedDataset, basepath=args.basepath | ||
| ) | ||
|
|
||
| for v, da in ds.items(): | ||
| print(f"- {v}: {da.dims}") |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.