Due to a shift in policy, from 0900 GMT on Wednesday 14th July 2021, we will be disabling ssh access to the server for external users. External users who wish to continue to access code repositories on the server will need to switch to using https. This can be accomplished in the following way: 1) On the repo on gitlab, use the clone dialogue and select ‘Clone with HTTPS’ to get the address of the repo; 2) From within the checkout of your repo run: $ git remote set-url origin HTTPS_ADDRESS. Here, replace HTTPS_ADDRESS with the address you have just copied from GitLab. Pulls and pushes will now require you to enter a username and password rather than using a ssh key. If you would prefer not to enter a password each time, you might consider caching your login credentials.

Commit 8ce03cca authored by Pierre Cazenave's avatar Pierre Cazenave

Fix the way the years and months are handled so they are in sync. The nested...

Fix the way the years and months are handled so they are in sync. The nested loop approach does not work well across year boundaries as you end up with a December to January on the inner loop but you do not increment the year in between.
parent a0b319d3
......@@ -102,384 +102,431 @@ serial = dates(1):dates(2);
years = years(idx);
nt = length(months);
for t = 1:nt
month = months(t);
year = years(y);
% Set up a struct of the remote locations in which we're
% interested.
url = 'http://nomads.ncdc.noaa.gov/thredds/dodsC/cfsr1hr/';
% Get the forcing data.
ncep.dlwsfc = [url, sprintf('%04d%02d/dlwsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.dswsfc = [url, sprintf('%04d%02d/dswsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.lhtfl = [url, sprintf('%04d%02d/lhtfl.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.prate = [url, sprintf('%04d%02d/prate.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.pressfc = [url, sprintf('%04d%02d/pressfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.q2m = [url, sprintf('%04d%02d/q2m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.tmp2m = [url, sprintf('%04d%02d/tmp2m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.uswsfc = [url, sprintf('%04d%02d/uswsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.uwnd = [url, sprintf('%04d%02d/wnd10m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.vwnd = [url, sprintf('%04d%02d/wnd10m.gdas.%04d%02d.grb2', year, month, year, month)];
names.dlwsfc = 'Downward_Long-Wave_Rad_Flux';
names.dswsfc = 'Downward_Short-Wave_Rad_Flux';
names.lhtfl = 'Latent_heat_net_flux';
names.prate = 'Precipitation_rate';
names.pressfc = 'Pressure';
names.q2m = 'Specific_humidity';
names.tmp2m = 'Temperature';
names.uswsfc = 'Upward_Short-Wave_Rad_Flux';
names.uwnd = 'U-component_of_wind';
names.vwnd = 'V-component_of_wind';
fields = fieldnames(ncep);
for aa = 1:length(fields)
% We've been given a list of variables to do, so skip those that
% aren't in the list.
if ~isempty(varlist) && max(strcmp(fields{aa}, varlist)) ~= 1
continue
end
for t = 1:nt
month = months(t);
year = years(t);
if ftbverbose
fprintf('Downloading for %04d/%02d\n', year, month)
end
if ftbverbose
fprintf('getting ''%s'' data... ', fields{aa})
end
% Set up a struct of the remote locations in which we're
% interested.
url = 'http://nomads.ncdc.noaa.gov/thredds/dodsC/cfsr1hr/';
ncep.dlwsfc = [url, sprintf('%04d%02d/dlwsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.dswsfc = [url, sprintf('%04d%02d/dswsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.lhtfl = [url, sprintf('%04d%02d/lhtfl.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.prate = [url, sprintf('%04d%02d/prate.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.pressfc = [url, sprintf('%04d%02d/pressfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.q2m = [url, sprintf('%04d%02d/q2m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.tmp2m = [url, sprintf('%04d%02d/tmp2m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.uswsfc = [url, sprintf('%04d%02d/uswsfc.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.uwnd = [url, sprintf('%04d%02d/wnd10m.gdas.%04d%02d.grb2', year, month, year, month)];
ncep.vwnd = [url, sprintf('%04d%02d/wnd10m.gdas.%04d%02d.grb2', year, month, year, month)];
% We need variable names too since we can't store them as the keys in
% ncep due to characters which MATLAB won't allow in fields (mainly -).
names.dlwsfc = 'Downward_Long-Wave_Rad_Flux';
names.dswsfc = 'Downward_Short-Wave_Rad_Flux';
names.lhtfl = 'Latent_heat_net_flux';
names.prate = 'Precipitation_rate';
names.pressfc = 'Pressure';
names.q2m = 'Specific_humidity';
names.tmp2m = 'Temperature';
names.uswsfc = 'Upward_Short-Wave_Rad_Flux';
names.uwnd = 'U-component_of_wind';
names.vwnd = 'V-component_of_wind';
fields = fieldnames(ncep);
for aa = 1:length(fields)
% We've been given a list of variables to do, so skip those that
% aren't in the list.
if ~isempty(varlist) && max(strcmp(fields{aa}, varlist)) ~= 1
continue
end
if ftbverbose
fprintf('getting ''%s'' data... ', fields{aa})
end
data.(fields{aa}).data = [];
data.(fields{aa}).time = [];
data.(fields{aa}).lat = [];
data.(fields{aa}).lon = [];
%ncid_info = ncinfo(ncep.(fields{aa}));
ncid = netcdf.open(ncep.(fields{aa}));
% If you don't know what it contains, start by using the
% 'netcdf.inq' operation:
%[numdims,numvars,numglobalatts,unlimdimid] = netcdf.inq(ncid);
% Time is in hours since the start of the month. We want
% sensible times, so we'll have to offset at some point.
varid = netcdf.inqVarID(ncid, 'time');
data_time = netcdf.getVar(ncid, varid, 'double');
if max(data_time) == 6
% Precipitation data has times as 0-6 repeated for n days.
% We need a sensible set of hours since the start of the
% month for subsequent subsampling in time. Make the
% sensible time array here.
data_time = 0:length(data_time) - 1;
data.(fields{aa}).data = [];
data.(fields{aa}).time = [];
data.(fields{aa}).lat = [];
data.(fields{aa}).lon = [];
%ncid_info = ncinfo(ncep.(fields{aa}));
ncid = netcdf.open(ncep.(fields{aa}));
% If you don't know what it contains, start by using the
% 'netcdf.inq' operation:
%[numdims, numvars, numglobalatts, unlimdimid] = netcdf.inq(ncid);
% Time is in hours since the start of the month. We want
% sensible times, so we'll have to offset at some point.
varid = netcdf.inqVarID(ncid, 'time');
data_time = netcdf.getVar(ncid, varid, 'double');
if max(data_time) == 6
% Precipitation data has times as 0-6 repeated for n days.
% We need a sensible set of hours since the start of the
% month for subsequent subsampling in time.
data_time = 0:length(data_time) - 1;
end
varid = netcdf.inqVarID(ncid,'lon');
data_lon.lon = netcdf.getVar(ncid,varid,'double');
varid = netcdf.inqVarID(ncid,'lat');
data_lat.lat = netcdf.getVar(ncid,varid,'double');
% Some of the NCEP Reanalysis 2 data are 4D, but with a single
% vertical level (e.g. uwnd, vwnd, air, rhum).
data_level_idx = [];
try % not all data have a 'level', so fail gracefully here.
varid = netcdf.inqVarID(ncid, 'level');
data_level.level = netcdf.getVar(ncid, varid, 'double');
if length(data_level.level) > 1
% Assume we've got rhum and we want humidity at the sea
% surface (1013 millibars (or hPa)). As such, ZQQ must be
% 0.0 in the FVCOM model namelist. Find the closest level
% to pressure at 1 standard atmosphere.
[~, data_level_idx] = min(abs(data_level.level - 1013));
end
catch
true;
end
if isempty(data_level_idx) % default to the first
data_level_idx = 1;
end
varid = netcdf.inqVarID(ncid,'lon');
data_lon.lon = netcdf.getVar(ncid,varid,'double');
varid = netcdf.inqVarID(ncid,'lat');
data_lat.lat = netcdf.getVar(ncid,varid,'double');
% Some of the NCEP Reanalysis 2 data are 4D, but with a single
% vertical level (e.g. uwnd, vwnd, air, rhum).
data_level_idx = [];
try % not all data have a 'level', so fail gracefully here.
varid = netcdf.inqVarID(ncid, 'level');
data_level.level = netcdf.getVar(ncid, varid, 'double');
if length(data_level.level) > 1
% Assume we've got rhum and we want humidity at the sea
% surface (1013 millibars (or hPa)). As such, ZQQ must be
% 0.0 in the FVCOM model namelist. Find the closest level
% to pressure at 1 standard atmosphere.
[~, data_level_idx] = min(abs(data_level.level - 1013));
end
catch
true;
% Time is in hours relative to the start of the month for CFSv2.
timevec = datevec((data_time / 24) + datenum(year, month, 1, 0, 0, 0));
% Get the data time and convert to Modified Julian Day.
data.time = greg2mjulian(...
timevec(:, 1), ...
timevec(:, 2), ...
timevec(:, 3), ...
timevec(:, 4), ...
timevec(:, 5), ...
timevec(:, 6));
% Clip the time to the given range.
data_time_mask = data.time >= modelTime(1) & data.time <= modelTime(end);
data_time_idx = 1:size(data.time, 1);
data_time_idx = data_time_idx(data_time_mask);
if ~isempty(data_time_idx)
data.time = data.time(data_time_mask);
else
% Reset the index to its original size. This is for data
% with only a single time stamp which falls outside the
% model time. Only reset it when the length of the
% input time is equal to 1.
if length(data.time) == 1
data_time_idx = 1:size(data.time, 1);
end
if isempty(data_level_idx) % default to the first
data_level_idx = 1;
end
% Check the times
%[yyyy, mm, dd, hh, MM, ss] = mjulian2greg(data.time(1))
%[yyyy, mm, dd, hh, MM, ss] = mjulian2greg(data.time(end))
% Get the data in two goes, once for the end of the grid (west of
% Greenwich), once for the beginning (east of Greenwich), and then
% stick the two bits together.
clear index_lon index_lat
if extents(1) < 0 && extents(2) < 0
% This is OK, we can just shunt the values by 360.
extents(1) = extents(1) + 360;
extents(2) = extents(2) + 360;
index_lon = find(data_lon.lon > extents(1) & data_lon.lon < extents(2));
elseif extents(1) < 0 && extents(2) > 0
% This is the tricky one. We'll do two passes to extract the
% western chunk first (extents(1) + 360 to 360), then the
% eastern chunk (0 - extents(2)).
index_lon{1} = find(data_lon.lon >= extents(1) + 360);
index_lon{2} = find(data_lon.lon <= extents(2));
else
% Dead easy, we're in the eastern hemisphere, so nothing too
% strenuous here.
index_lon = find(data_lon.lon > extents(1) & data_lon.lon < extents(2));
end
% Latitude is much more straightforward
index_lat = find(data_lat.lat > extents(3) & data_lat.lat < extents(4));
data.(fields{aa}).lat = data_lat.lat(index_lat);
% Get the data
if iscell(index_lon)
data.(fields{aa}).lon = data_lon.lon(cat(1,index_lon{:}));
varid = netcdf.inqVarID(ncid, names.(fields{aa}));
[~, ~, dimids, ~] = netcdf.inqVar(ncid,varid);
if length(dimids) == 4
start = [...
min(index_lon{1}), ...
min(index_lat), ...
data_level_idx, ...
min(data_time_idx)] - 1;
count = [...
length(index_lon{1}), ...
length(index_lat), ...
length(data_level_idx), ...
length(data_time_idx)];
elseif length(dimids) == 3
start = [...
min(index_lon{1}), ...
min(index_lat), ...
min(data_time_idx)] - 1;
count = [...
length(index_lon{1}), ...
length(index_lat), ...
length(data_time_idx)];
end
% Time is in hours relative to the start of the month.
timevec = datevec((data_time / 24) + datenum(year, month, 1, 0, 0, 0));
% Get the data time and convert to Modified Julian Day.
data.time = greg2mjulian(...
timevec(:, 1), ...
timevec(:, 2), ...
timevec(:, 3), ...
timevec(:, 4), ...
timevec(:, 5), ...
timevec(:, 6));
% Clip the time to the given range
data_time_mask = data.time >= modelTime(1) & data.time <= modelTime(end);
data_time_idx = 1:size(data.time, 1);
data_time_idx = data_time_idx(data_time_mask);
if ~isempty(data_time_idx) % for the prate data mainly
data.time = data.time(data_time_mask);
else
% Reset the index to its original size. This is for data
% with only a single time stamp which falls outside the
% model time (as is the case with the precipitation data,
% for some reason). Only reset it when the length of the
% input time is equal to 1.
if length(data.time) == 1
data_time_idx = 1:size(data.time, 1);
data_west.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid, varid, start, count, 'double');
if length(dimids) == 4
start = [...
min(index_lon{2}), ...
min(index_lat), ...
data_level_idx, ...
min(data_time_idx)] - 1;
count = [...
length(index_lon{2}), ...
length(index_lat), ...
length(data_level_idx), ...
length(data_time_idx)];
elseif length(dimids) == 3
start = [...
min(index_lon{2}), ...
min(index_lat), ...
min(data_time_idx)] - 1;
count = [...
length(index_lon{2}), ...
length(index_lat), ...
length(data_time_idx)];
end
data_east.(fields{aa}).(fields{aa}) = ...
netcdf.getVar(ncid, varid, start, count, 'double');
scratch.(fields{aa}).(fields{aa}).(fields{aa}) = ...
cat(1, ...
data_west.(fields{aa}).(fields{aa}), ...
data_east.(fields{aa}).(fields{aa}));
% Merge the two sets of data together
structfields = fieldnames(data_west.(fields{aa}));
for ii = 1:length(structfields)
switch structfields{ii}
case 'lon'
% Only the longitude and the actual data need
% sticking together, but each must be done
% along a different axis (lon is a vector, the
% data is an array).
scratch.(fields{aa}).(structfields{ii}) = ...
[data_west.(fields{aa}).(structfields{ii}); ...
data_east.(fields{aa}).(structfields{ii})];
case fields{aa}
% This is the actual data.
scratch.(fields{aa}).(structfields{ii}) = ...
[rot90(data_west.(fields{aa}).(structfields{ii})), ...
rot90(data_east.(fields{aa}).(structfields{ii}))];
otherwise
% Assume the data are the same in both arrays.
% A simple check of the range of values in the
% difference between the two arrays should show
% whether they're the same or not. If they are,
% use the western values, otherwise, warn about
% the differences. It might be the data are
% relatively unimportant anyway (i.e. not used
% later on).
try
tdata = ...
data_west.(fields{aa}).(structfields{ii}) - ...
data_east.(fields{aa}).(structfields{ii});
if range(tdata(:)) == 0
% They're the same data
scratch.(fields{aa}).(structfields{ii}) = ...
data_west.(fields{aa}).(structfields{ii});
else
warning({'Unexpected data field and the', ...
' west and east halves don''t match.', ...
' Skipping.'})
end
catch
warning({'Unexpected data field and the', ...
' west and east halves don''t match.', ...
' Skipping.'})
end
clearvars tdata
end
end
% Check the times
%[yyyy,mm,dd,hh,MM,ss] = mjulian2greg(data.time(1))
%[yyyy,mm,dd,hh,MM,ss] = mjulian2greg(data.time(end))
% Get the data in two goes, once for the end of the grid (west of
% Greenwich), once for the beginning (east of Greenwich), and then
% stick the two bits together.
clear index_lon index_lat
if extents(1) < 0 && extents(2) < 0
% This is OK, we can just shunt the values by 360.
extents(1) = extents(1) + 360;
extents(2) = extents(2) + 360;
index_lon = find(data_lon.lon > extents(1) & data_lon.lon < extents(2));
elseif extents(1) < 0 && extents(2) > 0
% This is the tricky one. We'll do two passes to extract the
% western chunk first (extents(1)+360 to 360), then the eastern
% chunk (0-extents(2)).
index_lon{1} = find(data_lon.lon >= extents(1) + 360);
index_lon{2} = find(data_lon.lon <= extents(2));
else
% Dead easy, we're in the eastern hemisphere, so nothing too
% strenuous here.
index_lon = find(data_lon.lon > extents(1) & data_lon.lon < extents(2));
clearvars data_west data_east
else
% We have a straightforward data extraction
data.(fields{aa}).lon = data_lon.lon(index_lon);
varid = netcdf.inqVarID(ncid, (fields{aa}));
% [varname,xtype,dimids,natts] = netcdf.inqVar(ncid,varid);
% [~, length1] = netcdf.inqDim(ncid, dimids(1))
% [~, length2] = netcdf.inqDim(ncid, dimids(2))
% [~, length3] = netcdf.inqDim(ncid, dimids(3))
start = [...
min(index_lon), ...
min(index_lat), ...
min(data_time_idx)] - 1;
count = [...
length(index_lon), ...
length(index_lat), ...
length(data_time_idx)];
% The air data (NCEP version of this script) was failing
% with a three long start and count array, so try first
% without (to retain original behaviour for other
% potentially unaffected variables) but fall back to
% getting the data_level_idx one instead (should be the first
% level).
try
scratch.(fields{aa}).(fields{aa}).(fields{aa}) = ...
netcdf.getVar(ncid,varid,start,count,'double');
catch
start = [...
min(index_lon), ...
min(index_lat), ...
data_level_idx, ...
min(data_time_idx)] - 1;
count = [...
length(index_lon), ...
length(index_lat), ...
1, ...
length(data_time_idx)];
scratch.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid, varid, start, count, 'double');
end
% Latitude is much more straightforward
index_lat = find(data_lat.lat > extents(3) & data_lat.lat < extents(4));
data.(fields{aa}).lat = data_lat.lat(index_lat);
% Get the data
if iscell(index_lon)
data.(fields{aa}).lon = data_lon.lon(cat(1,index_lon{:}));
varid = netcdf.inqVarID(ncid, names.(fields{aa}));
end
clearvars data_time* data_level_idx
[~, ~, dimids, ~] = netcdf.inqVar(ncid,varid);
if length(dimids) == 4
start = [min(index_lon{1}), min(index_lat), data_level_idx, min(data_time_idx)] - 1;
count = [length(index_lon{1}), length(index_lat), length(data_level_idx), length(data_time_idx)];
elseif length(dimids) == 3
start = [min(index_lon{1}), min(index_lat), min(data_time_idx)] - 1;
count = [length(index_lon{1}), length(index_lat), length(data_time_idx)];
end
datatmp = squeeze(scratch.(fields{aa}).(fields{aa}));
data_west.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid, varid, start, count, 'double');
data.(fields{aa}).lon(data.(fields{aa}).lon > 180) = ...
data.(fields{aa}).lon(data.(fields{aa}).lon > 180) - 360;
if length(dimids) == 4
start = [min(index_lon{2}), min(index_lat), data_level_idx, min(data_time_idx)] - 1;
count = [length(index_lon{2}), length(index_lat), length(data_level_idx), length(data_time_idx)];
elseif length(dimids) == 3
start = [min(index_lon{2}), min(index_lat), min(data_time_idx)] - 1;
count = [length(index_lon{2}), length(index_lat), length(data_time_idx)];
end
data_east.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid, varid, start, count, 'double');
% data.(fields{aa}).data = datatmp;
data.(fields{aa}).data = cat(3, data.(fields{aa}).data, datatmp);
% data.(fields{aa}).time = data.time;
data.(fields{aa}).time = cat(1, data.(fields{aa}).time, data.time);
% data.(fields{aa}).time = cat(1, data.(fields{aa}).time, squeeze(scratch.(fields{aa}).(fields{aa}).time));
% data.(fields{aa}).lat = squeeze(scratch.(fields{aa}).(fields{aa}).lat);
% data.(fields{aa}).lon = squeeze(scratch.(fields{aa}).(fields{aa}).lon);
scratch.(fields{aa}).(fields{aa}).(fields{aa}) = ...
cat(1, data_west.(fields{aa}).(fields{aa}), data_east.(fields{aa}).(fields{aa}));
% Merge the two sets of data together
structfields = fieldnames(data_west.(fields{aa}));
for ii = 1:length(structfields)
switch structfields{ii}
case 'lon'
% Only the longitude and the actual data need
% sticking together, but each must be done
% along a different axis (lon is a vector, the
% data is an array).
scratch.(fields{aa}).(structfields{ii}) = ...
[data_west.(fields{aa}).(structfields{ii}); data_east.(fields{aa}).(structfields{ii})];
case fields{aa}
% This is the actual data.
scratch.(fields{aa}).(structfields{ii}) = ...
[rot90(data_west.(fields{aa}).(structfields{ii})), rot90(data_east.(fields{aa}).(structfields{ii}))];
otherwise
% Assume the data are the same in both arrays.
% A simple check of the range of values in the
% difference between the two arrays should show
% whether they're the same or not. If they are,
% use the western values, otherwise, warn about
% the differences. It might be the data are
% relatively unimportant anyway (i.e. not used
% later on).
try
tdata = data_west.(fields{aa}).(structfields{ii}) - data_east.(fields{aa}).(structfields{ii});
if range(tdata(:)) == 0
% They're the same data
scratch.(fields{aa}).(structfields{ii}) = ...
data_west.(fields{aa}).(structfields{ii});
else
warning('Unexpected data field and the west and east halves don''t match. Skipping.')
end
catch
warning('Unexpected data field and the west and east halves don''t match. Skipping.')
end
clearvars tdata
end
end
clearvars data_west data_east
if ftbverbose
if isfield(data, fields{aa})
fprintf('done.\n')
else
% We have a straightforward data extraction
data.(fields{aa}).lon = data_lon.lon(index_lon);
varid = netcdf.inqVarID(ncid, (fields{aa}));
% [varname,xtype,dimids,natts] = netcdf.inqVar(ncid,varid);
% [~, length1] = netcdf.inqDim(ncid, dimids(1))
% [~, length2] = netcdf.inqDim(ncid, dimids(2))
% [~, length3] = netcdf.inqDim(ncid, dimids(3))
start = [min(index_lon), min(index_lat), min(data_time_idx)] - 1;
count = [length(index_lon), length(index_lat), length(data_time_idx)];
% The air data (NCEP version of this script) was failing
% with a three long start and count array, so try first
% without (to retain original behaviour for other
% potentially unaffected variables) but fall back to
% getting only the first level (start = 0, count = 1).
try
scratch.(fields{aa}).(fields{aa}).(fields{aa}) = netcdf.getVar(ncid,varid,start,count,'double');
catch
start = [min(index_lon), min(index_lat), 1, min(data_time_idx)] - 1;
count = [length(index_lon), length(index_lat), 1, length(data_time_idx)];
scratch.(fields{aa}).(fields{aa}) = netcdf.getVar(ncid, varid, start, count, 'double');
end
end
clearvars data_time*
datatmp = squeeze(scratch.(fields{aa}).(fields{aa}));
% Fix the longitude ranges for all data.
data.(fields{aa}).lon(data.(fields{aa}).lon > 180) = ...
data.(fields{aa}).lon(data.(fields{aa}).lon > 180) - 360;
% data.(fields{aa}).data = datatmp;
data.(fields{aa}).data = cat(3, data.(fields{aa}).data, datatmp);
% data.(fields{aa}).time = data.time;
data.(fields{aa}).time = cat(1, data.(fields{aa}).time, data.time);
% data.(fields{aa}).time = cat(1, data.(fields{aa}).time, squeeze(scratch.(fields{aa}).(fields{aa}).time));
% data.(fields{aa}).lat = squeeze(scratch.(fields{aa}).(fields{aa}).lat);
% data.(fields{aa}).lon = squeeze(scratch.(fields{aa}).(fields{aa}).lon);
if ftbverbose
if isfield(data, fields{aa})
fprintf('done.\n')
else
fprintf('error!\n')
end
fprintf('error!\n')
end
end
end
% Calculate the net long and shortwave radiation fluxes.
if isfield(data, 'uswsfc') && isfield(data, 'dswsfc')
data.nswsfc.data = data.uswsfc.data - data.dswsfc.data;
data.nswsfc.time = data.uswsfc.time;
data.nswsfc.lon = data.uswsfc.lon;
data.nswsfc.lat = data.uswsfc.lat;
end
% Calculate the net long and shortwave radiation fluxes.
if isfield(data, 'uswsfc') && isfield(data, 'dswsfc')
data.nswsfc.data = data.uswsfc.data - data.dswsfc.data;
data.nswsfc.time = data.uswsfc.time;
data.nswsfc.lon = data.uswsfc.lon;
data.nswsfc.lat = data.uswsfc.lat;
end
% Now we have some data, we need to create some additional parameters
% required by FVCOM.
% Convert precipitation from kg/m^2/s to m/s (required by FVCOM) by
% dividing by freshwater density (kg/m^3).
if isfield(data, 'prate')
data.prate.data = data.prate.data / 1000;
end
% Convert precipitation from kg/m^2/s to m/s (required by FVCOM) by
% dividing by freshwater density (kg/m^3).
if isfield(data, 'prate')
data.prate.data = data.prate.data / 1000;
end
% Evaporation can be approximated by:
%
% E(m/s) = lhtfl/Llv/rho
%
% where:
%
% lhtfl = "Mean daily latent heat net flux at the surface"
% Llv = Latent heat of vaporization (approx to 2.5*10^6 J kg^-1)
% rho = 1025 kg/m^3
%
if isfield(data, 'prate') && isfield(data, 'lhtfl')
Llv = 2.5 * 10^6;
rho = 1025; % using a typical value for seawater.
Et = data.lhtfl.data / Llv / rho;
data.P_E.data = data.prate.data - Et;
% Evaporation and precipitation need to have the same sign for
% FVCOM (ocean losing water is negative in both instances). So,
% flip the evaporation here.
data.Et.data = -Et;
end
% Evaporation can be approximated by:
%
% E(m/s) = lhtfl/Llv/rho
%
% where:
%
% lhtfl = "Mean daily latent heat net flux at the surface"
% Llv = Latent heat of vaporization (approx to 2.5*10^6 J kg^-1)
% rho = 1025 kg/m^3
%
if isfield(data, 'prate') && isfield(data, 'lhtfl')
Llv = 2.5 * 10^6;
rho = 1025; % using a typical value for seawater.
Et = data.lhtfl.data / Llv / rho;
data.P_E.data = data.prate.data - Et;
% Evaporation and precipitation need to have the same sign for
% FVCOM (ocean losing water is negative in both instances). So,
% flip the evaporation here.
data.Et.data = -Et;
% Get the fields we need for the subsequent interpolation. Find the
% position of a sensibly sized array (i.e. not 'topo', 'rhum' or
% 'pres').
for vv = 1:length(fields)
if ~isempty(varlist) && max(strcmp(fields{vv}, varlist)) ~= 1
continue