Due to a shift in policy, from 0900 GMT on Wednesday 14th July 2021, we will be disabling ssh access to the server for external users. External users who wish to continue to access code repositories on the server will need to switch to using https. This can be accomplished in the following way: 1) On the repo on gitlab, use the clone dialogue and select ‘Clone with HTTPS’ to get the address of the repo; 2) From within the checkout of your repo run: $ git remote set-url origin HTTPS_ADDRESS. Here, replace HTTPS_ADDRESS with the address you have just copied from GitLab. Pulls and pushes will now require you to enter a username and password rather than using a ssh key. If you would prefer not to enter a password each time, you might consider caching your login credentials.

Commit e2552248 authored by Pierre Cazenave's avatar Pierre Cazenave

Add support for the native OPeNDAP capabilities in the netcdf tools in newer...

Add support for the native OPeNDAP capabilities in the netcdf tools in newer version of MATLAB. Also fix some of the issues with regards the precipitation and evaporation variables (notably setting the evaporation to P_E instead of subtracting it from the precipitation rate and setting it to P_E).
parent 81ffa3d1
......@@ -16,8 +16,7 @@ function data = get_NCEP_forcing(Mobj, modelTime)
%
% OUTPUT:
% data - struct of the data necessary to force FVCOM. These can be
% interpolated onto an unstructured grid in Mobj using
% grid2fvcom_U10V10.m.
% interpolated onto an unstructured grid in Mobj using grid2fvcom.m.
%
% The parameters which can be obtained from the NCEP data are:
% - u wind component (uwnd)
......@@ -27,6 +26,7 @@ function data = get_NCEP_forcing(Mobj, modelTime)
% - Air temperature (air)
% - Relative humidity (rhum)
% - Precipitation rate (prate)
% - Evaporation rate (pevpr)
% - Sea level pressure (slp)
% - Latent heat flux (lhtfl)
% - Surface heat flux (shtfl)
......@@ -43,13 +43,19 @@ function data = get_NCEP_forcing(Mobj, modelTime)
% The OPeNDAP toolbox:
% http://www.opendap.org/pub/contributed/source/ml-toolbox/
%
%
% Author(s)
% Pierre Cazenave (Plymouth Marine Laboratory)
% Ricardo Torres (Plymouth Marine Laboratory)
%
% Revision history:
% 2012-10-31 First version based on get_NCEP_L4.m.
% 2013-02-14 Add support for the native OPeNDAP functions in the MATLAB
% netcdf tools. Also fix the value of data.P_E.data to be only the
% evaporation. The evaporation available from NCEP in prate is in units
% of W m^{-2} whereas FVCOM expects ms^{-1}. Rather than convert from W
% m^{-2} to ms^{-1}, use the latent heat flux at the surface with the
% density of water and the latent heat of vaporisation to estimate
% evaporation rate in ms^{-1}.
%
%==========================================================================
......@@ -57,8 +63,8 @@ subname = 'get_NCEP_forcing';
global ftbverbose;
if(ftbverbose);
fprintf('\n')
fprintf(['begin : ' subname '\n'])
fprintf('\n')
fprintf(['begin : ' subname '\n'])
end
% Get the extent of the model domain (in spherical)
......@@ -96,6 +102,14 @@ ncep.lhtfl = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanaly
ncep.shtfl = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/shtfl.sfc.gauss.',num2str(year),'.nc'];
ncep.pevpr = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/pevpr.sfc.gauss.',num2str(year),'.nc'];
% The fields below can be used to create the net shortwave and longwave
% fluxes if the data you're using don't include net fluxes. Subtract the
% downward from upward fluxes to get net fluxes.
% ncep.dswrf = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/dswrf.sfc.gauss.',num2str(year),'.nc'];
% ncep.uswrf = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/uswrf.sfc.gauss.',num2str(year),'.nc'];
% ncep.dlwrf = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/dlwrf.sfc.gauss.',num2str(year),'.nc'];
% ncep.ulwrf = ['http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/surface_gauss/ulwrf.sfc.gauss.',num2str(year),'.nc'];
fields = fieldnames(ncep);
for aa=1:length(fields)
......@@ -105,10 +119,46 @@ for aa=1:length(fields)
data.(fields{aa}).lon = [];
data_attributes.(fields{aa}) = [];
% Depending on the MATLAB version, either use the native netcdf
% libraries to load the OPeNDAP data, otherwise we need the relevant
% third-party toolbox.
out = ver('MATLAB');
if str2double(out.Version) > 7.13
%ncid_info = ncinfo(ncep.(fields{aa}));
ncid = netcdf.open(ncep.(fields{aa}));
% If you don't know what it contains, start by using the
% 'netcdf.inq' operation:
%[numdims,numvars,numglobalatts,unlimdimid] = netcdf.inq(ncid);
varid = netcdf.inqVarID(ncid, 'time');
data_time.time = netcdf.getVar(ncid, varid, 'double');
varid = netcdf.inqVarID(ncid, (fields{aa}));
data_attributes.(fields{aa}).(fields{aa}).scale_factor = ...
netcdf.getAtt(ncid,varid,'scale_factor','double');
data_attributes.(fields{aa}).(fields{aa}).add_offset = ...
netcdf.getAtt(ncid,varid,'add_offset','double');
varid = netcdf.inqVarID(ncid,'lon');
data_lon.lon = netcdf.getVar(ncid,varid,'double');
varid = netcdf.inqVarID(ncid,'lat');
data_lat.lat = netcdf.getVar(ncid,varid,'double');
timevec = datevec((data_time.time)/24+365);
else
% We'll use the third-party OPeNDAP toolbox.
data_time = loaddap([ncep.(fields{aa}),'?time']);
data_attributes.(fields{aa}) = loaddap('-A',[ncep.(fields{aa})]);
timevec = datevec((data_time.time)/24+365);
% Clip the data to the model domain
data_lon = loaddap([ncep.(fields{aa}),'?lon']);
% If the extents are negative in longitude, we need to extract the NCEP
data_lat = loaddap([ncep.(fields{aa}),'?lat']);
end
% Get the data time and convert to Modified Julian Day.
data_time = loaddap([ncep.(fields{aa}),'?time']);
data_attributes.(fields{aa}) = loaddap('-A',[ncep.(fields{aa})]);
timevec = datevec((data_time.time)/24+365);
data.time = greg2mjulian(timevec(:,1), timevec(:,2), timevec(:,3), ...
timevec(:,4), timevec(:,5), timevec(:,6));
% Clip the time to the given range
......@@ -120,13 +170,9 @@ for aa=1:length(fields)
% Check the times
%[yyyy,mm,dd,hh,MM,ss] = mjulian2greg(data.time(1))
%[yyyy,mm,dd,hh,MM,ss] = mjulian2greg(data.time(end))
% Clip the data to the model domain
data_lon = loaddap([ncep.(fields{aa}),'?lon']);
% If the extents are negative in longitude, we need to extract the NCEP
% data in two goes, once for the end of the grid (west of Greenwich),
% once for the beginning (east of Greenwich), and then stick the two
% bits together.
% Get the data in two goes, once for the end of the grid (west of
% Greenwich), once for the beginning (east of Greenwich), and then
% stick the two bits together.
clear index_lon index_lat
if extents(1) < 0 && extents(2) < 0
% This is OK, we can just shunt the values by 360.
......@@ -146,77 +192,115 @@ for aa=1:length(fields)
end
% Latitude is much more straightforward
data_lat = loaddap([ncep.(fields{aa}),'?lat']);
index_lat = find(data_lat.lat > extents(3) & data_lat.lat < extents(4));
data.(fields{aa}).lat = data_lat.lat(index_lat);
% Get the data
if iscell(index_lon)
% We need to do each half and merge them
eval(['data1_west.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa},'[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', num2str(min(index_lon{1})-1), ':',...
num2str(length(data_lon.lon)-1), ']'');']);
eval(['data1_east.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa}, '[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', '0', ':', num2str(max(index_lon{2})-1), ']'');']);
% Merge the two sets of data together
structfields = fieldnames(data1_west.(fields{aa}).(fields{aa}));
for ii=1:length(structfields)
switch structfields{ii}
case 'lon'
% Only the longitude and the actual data need sticking
% together, but each must be done along a different
% axis (lon is a vector, the data is an array).
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
[data1_west.(fields{aa}).(fields{aa}).(structfields{ii});data1_east.(fields{aa}).(fields{aa}).(structfields{ii})];
case fields{aa}
% This is the actual data
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
[data1_west.(fields{aa}).(fields{aa}).(structfields{ii}),data1_east.(fields{aa}).(fields{aa}).(structfields{ii})];
otherwise
% Assume the data are the same in both arrays. A simple
% check of the range of values in the difference
% between the two arrays should show whether they're
% the same or not. If they are, use the western values,
% otherwise, warn about the differences. It might be
% the data are relatively unimportant anyway (i.e. not
% used later on).
try
tdata = data1_west.(fields{aa}).(fields{aa}).(structfields{ii}) - data1_east.(fields{aa}).(fields{aa}).(structfields{ii});
if range(tdata(:)) == 0
% They're the same data
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
data1_west.(fields{aa}).(fields{aa}).(structfields{ii});
else
data.(fields{aa}).lon = data_lon.lon(cat(1,index_lon{:}));
if str2double(out.Version) > 7.13
% varidlon = netcdf.inqVarID(ncid,'lon');
% varidtime = netcdf.inqVarID(ncid,'time');
% varidlat = netcdf.inqVarID(ncid,'lat');
varid = netcdf.inqVarID(ncid,(fields{aa}));
% [varname,xtype,dimids,natts] = netcdf.inqVar(ncid,varid);
% [~,length1] = netcdf.inqDim(ncid,dimids(1))
% [~,length2] = netcdf.inqDim(ncid,dimids(2))
% [~,length3] = netcdf.inqDim(ncid,dimids(3))
start=[min(index_lon{1})-1,min(index_lat)-1,min(data_time_idx)-1];
count=[length(index_lon{1}),length(index_lat),length(data_time_idx)];
data1_west.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid,varid,start,count,'double');
start=[min(index_lon{2})-1,min(index_lat)-1,min(data_time_idx)-1];
count=[length(index_lon{2}),length(index_lat),length(data_time_idx)];
data1_east.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid,varid,start,count,'double');
data1.(fields{aa}).(fields{aa}).(fields{aa})=cat(1,data1_west.(fields{aa}).(fields{aa}), data1_east.(fields{aa}).(fields{aa}));
else
% We need to do each half and merge them
eval(['data1_west.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa},'[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', num2str(min(index_lon{1})-1), ':',...
num2str(length(data_lon.lon)-1), ']'');']);
eval(['data1_east.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa}, '[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', '0', ':', num2str(max(index_lon{2})-1), ']'');']);
% Merge the two sets of data together
structfields = fieldnames(data1_west.(fields{aa}).(fields{aa}));
for ii=1:length(structfields)
switch structfields{ii}
case 'lon'
% Only the longitude and the actual data need
% sticking together, but each must be done along a
% different axis (lon is a vector, the data is an
% array).
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
[data1_west.(fields{aa}).(fields{aa}).(structfields{ii});data1_east.(fields{aa}).(fields{aa}).(structfields{ii})];
case fields{aa}
% This is the actual data
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
[data1_west.(fields{aa}).(fields{aa}).(structfields{ii}),data1_east.(fields{aa}).(fields{aa}).(structfields{ii})];
otherwise
% Assume the data are the same in both arrays. A
% simple check of the range of values in the
% difference between the two arrays should show
% whether they're the same or not. If they are, use
% the western values, otherwise, warn about the
% differences. It might be the data are relatively
% unimportant anyway (i.e. not used later on).
try
tdata = data1_west.(fields{aa}).(fields{aa}).(structfields{ii}) - data1_east.(fields{aa}).(fields{aa}).(structfields{ii});
if range(tdata(:)) == 0
% They're the same data
data1.(fields{aa}).(fields{aa}).(structfields{ii}) = ...
data1_west.(fields{aa}).(fields{aa}).(structfields{ii});
else
warning('Unexpected data field and the west and east halves don''t match. Skipping.')
end
catch
warning('Unexpected data field and the west and east halves don''t match. Skipping.')
end
catch
warning('Unexpected data field and the west and east halves don''t match. Skipping.')
end
clear tdata
clear tdata
end
end
end
else
% We have a straightforward data extraction
eval(['data1.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa}, '[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', num2str(min(index_lon)-1), ':',...
num2str(max(index_lon)-1), ']'');']);
data.(fields{aa}).lon = data_lon.lon(index_lon);
if str2double(out.Version) > 7.13
varid = netcdf.inqVarID(ncid,(fields{aa}));
% [varname,xtype,dimids,natts] = netcdf.inqVar(ncid,varid);
% [~,length1] = netcdf.inqDim(ncid,dimids(1))
% [~,length2] = netcdf.inqDim(ncid,dimids(2))
% [~,length3] = netcdf.inqDim(ncid,dimids(3))
start=[min(index_lon)-1,min(index_lat)-1,min(data_time_idx)-1];
count=[length(index_lon),length(index_lat),length(data_time_idx)];
data1.(fields{aa}).(fields{aa}).(fields{aa}) = netcdf.getVar(ncid,varid,start,count,'double');
else
eval(['data1.(fields{aa}) = loaddap(''', ncep.(fields{aa}),'?',...
fields{aa}, '[', num2str(min(data_time_idx)-1),':',...
num2str(max(data_time_idx)-1), '][',...
num2str(min(index_lat)-1), ':', num2str(max(index_lat)-1),...
'][', num2str(min(index_lon)-1), ':',...
num2str(max(index_lon)-1), ']'');']);
end
end
datatmp = squeeze(data1.(fields{aa}).(fields{aa}).(fields{aa}));
datatmp = (datatmp * data_attributes.(fields{aa}).(fields{aa}).scale_factor) + data_attributes.(fields{aa}).(fields{aa}).add_offset;
data.(fields{aa}).data = cat(1, data.(fields{aa}).data, datatmp);
data.(fields{aa}).time = cat(1, data.(fields{aa}).time, squeeze(data1.(fields{aa}).(fields{aa}).time));
data.(fields{aa}).lat = squeeze(data1.(fields{aa}).(fields{aa}).lat);
data.(fields{aa}).lon = squeeze(data1.(fields{aa}).(fields{aa}).lon);
data.(fields{aa}).data = datatmp;
data.(fields{aa}).time =data.time;
% data.(fields{aa}).time = cat(1, data.(fields{aa}).time, squeeze(data1.(fields{aa}).(fields{aa}).time));
% data.(fields{aa}).lat = squeeze(data1.(fields{aa}).(fields{aa}).lat);
% data.(fields{aa}).lon = squeeze(data1.(fields{aa}).(fields{aa}).lon);
end
% Now we have some data, we need to create some additional parameters
......@@ -239,7 +323,7 @@ data.prate.data = data.prate.data/1000;
Llv = 2.5*10^6;
rho = 1025; % using a typical value for seawater.
Et = data.lhtfl.data/Llv/rho;
data.P_E.data = data.prate.data-Et;
data.P_E.data = Et;
% Calculate the momentum flux
WW = data.uwnd.data + data.vwnd.data * 1i;
......@@ -249,9 +333,11 @@ data.tx.data=reshape(data.tx.data*0.1, size(data.uwnd.data)); % dyn/cm^2 to N/m^
data.ty.data=reshape(data.ty.data*0.1, size(data.uwnd.data)); % dyn/cm^2 to N/m^2
% Get the fields we need for the subsequent interpolation
data.lon = data.uwnd.lon;
data.lon = data.(fields{1}).lon;
data.lon(data.lon > 180) = data.lon(data.lon > 180) - 360;
data.lat = data.uwnd.lat;
data.lat = data.(fields{1}).lat;
return
% Have a look at some data.
% [X, Y] = meshgrid(data.lon, data.lat);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment