grid2fvcom.m 8.18 KB
Newer Older
1
function fvcom = grid2fvcom(Mobj,vars,data)
2 3
% Interpolate regularly gridded surface forcing data onto a given FVCOM
% grid.
4
%
5
% grid2fvcom(Mobj,vars,data)
6
%
7 8
% DESCRIPTION:
%   Takes a given NCEP reanalysis grid file and interpolates the U10 and
9 10
%   V10 values onto the specified FVCOM grid file.
%
11
% INPUT:
12 13 14 15 16 17 18 19
%   Mobj - MATLAB mesh object with the following fields:
%       x, y, lon, lat - cartesian and spherical node coordinates. These
%       are transferred to the NetCDF file only and are not used in the
%       interpolation at all.
%       nVerts - number of vertices (nodes) in the unstructured grid.
%       nElems - number of elements in the unstructured grid.
%   vars - a cell array of the variable names to be interpolated on the
%       FVCOM grid in Mobj (e.g. uwnd, U10, vwnd, V10 etc.).
20
%   data - a struct which contains the following arrays:
21 22 23
%       x - x data (probably best in cartesian for the interpolation)
%       y - y data (probably best in cartesian for the interpolation)
%       The struct must also contain all the variables defined in vars.
24 25 26 27
%       time - time vector (in Modified Julian Days). If you're using some
%       of the NCEP surface products (e.g. relative humitidy, sea level
%       pressure), you need to supply x and y coordinates for their grids
%       as .xalt and .yalt).
28
%
29 30
% OUTPUT:
%   fvcom - struct of the interpolated data values at the model nodes and
31 32 33 34 35 36 37
%       element centres. Also includes any variables which were in the
%       input struct but which have not been interpolated (e.g. time).
%
% EXAMPLE USAGE:
%   interpfields = {'uwnd', 'vwnd', 'slp', 'nshf', 'nlwrs', 'nswrs', ...
%       'P_E', 'Et', 'time', 'lon', 'lat', 'x', 'y'};
%   forcing_interp = grid2fvcom(Mobj, interpfields, forcing);
38
%
39
% NOTE:
40 41 42 43 44
%   The shape of the returned arrays for rhum and slp (via
%   get_NCEP_forcing.m) have sometimes differed from the other vairables
%   (they appear to be projected onto a different grid). Unless you
%   desperately need them, I would suggest omitting them from the
%   interpolation here as this assumes the arrays are all the same size.
45 46
%   Alternatively, give data.xalt and data.yalt to specify the alternative
%   grid.
47
%
48 49
% Author(s):
%   Pierre Cazenave (Plymouth Marine Laboratory)
50
%
51 52 53 54 55 56
% Revision history:
%   2012-10-15 First version based on ncep2fvcom_U10V10.m in the
%   fvcom-toolbox.
%   2012-10-16 Removed the code to read the NCEP file. Instead, farmed that
%   out to a new function (read_NCEP_wind) so that the relevant section can
%   be more readily extracted (rather than using the entire globe's data:
57 58
%   it's easier to subsample and provide the subsampled data here).
%   2012-10-17 Add outputs to the function for use in visualisation.
59 60 61
%   2012-10-19 Add wind struct as input rather than separate u, v, time and
%   lat/long arrays. Makes invocation a bit cleaner.
%   2012-11-01 Farmed out the creation of the NetCDF file to
62
%   write_FVCOM_forcing.m and made this purely an interpolation script.
63 64 65 66
%   2013-02-14 Add support for interpolating data on two different grids
%   through the .xalt and .yalt fields in the input data structure. This is
%   handy if you've got data from NCEP from both the Surface and Gaussian
%   products, each of which are on different grids.
67 68 69
%   2013-05-16 Add parallel for loops if the Parallel Computing Toolbox is
%   available (MATLAB parfor loops fail gracefully to for loops if it is
%   not available, in which case no harm, no foul).
70
%
71 72 73 74 75 76 77 78 79
%==========================================================================

if nargin ~= 3
    error('Incorrect number of arguments')
end

subname = 'grid2fvcom';

global ftbverbose;
80 81
if ftbverbose
    fprintf('\nbegin : %s \n', subname)
82 83
end

84 85 86 87 88 89 90 91 92 93 94 95
% Run jobs on multiple workers if we have that functionality. Not sure if
% it's necessary, but check we have the Parallel Toolbox first.
wasOpened = false;
if license('test', 'Distrib_Computing_Toolbox')
    % We have the Parallel Computing Toolbox, so launch a bunch of workers.
    if matlabpool('size') == 0
        % Force pool to be local in case we have remote pools available.
        matlabpool open local
        wasOpened = true;
    end
end

96 97 98
%--------------------------------------------------------------------------
% Get the relevant bits from the FVCOM mesh object
%--------------------------------------------------------------------------
99 100
x = Mobj.x;
y = Mobj.y;
101 102
nVerts = Mobj.nVerts;
nElems = Mobj.nElems;
103
if ftbverbose
104
    fprintf('info for FVCOM domain\n');
105 106
    fprintf('number of nodes: %d\n', nVerts);
    fprintf('number of elems: %d\n', nElems);
107 108 109 110 111 112
end

xc = nodes2elems(x, Mobj);
yc = nodes2elems(y, Mobj);

try
113
    ntimes = numel(data.time);
114
catch
115
    ntimes = numel(data.(vars{1}).time);
116 117 118 119 120
end

% Interpolate supplied regularly gridded data to FVCOM mesh. Use
% TriScatteredInterp to do the interpolation instead of griddata (should be
% faster).
121
for vv = 1:length(vars)
122 123
    if strcmpi(vars{vv}, 'time')
        fprintf('transferring variable %s as is\n', vars{vv})
124
        fvcom.(vars{vv}) = data.(vars{vv});
125
        continue
126 127
    elseif strcmpi(vars{vv}, 'lat') || strcmpi(vars{vv}, 'lon') || strcmpi(vars{vv}, 'x') || strcmpi(vars{vv}, 'y')
        fprintf('reassigning variable %s from unstructured grid\n', vars{vv})
128
        fvcom.(vars{vv}) = Mobj.(vars{vv});
129
    else
130 131 132 133 134 135 136 137 138
        % Preallocate the output arrays.
        % Serial version:
        % fvcom.(vars{vv}).data = zeros(nElems, ntimes);
        % fvcom.(vars{vv}).node = zeros(nVerts, ntimes);
        % Also create temporary arrays for the inner loop to be
        % parallelisable (is that a word?):
        tmp_fvcom_data = zeros(nElems, ntimes);
        tmp_fvcom_node = zeros(nVerts, ntimes);
        tmp_data_data = data.(vars{vv}).data; % input to the interpolation
139

140 141 142
        % Use a parallel loop for the number of time steps we're
        % interpolating (should be quicker, but will use more memory...).
        parfor i = 1:ntimes
143
            fprintf('interpolating %s, frame %d of %d\n', vars{vv}, i, ntimes);
144 145 146 147 148 149

            % Serial version:
            % currvar = data.(vars{vv}).data(:, :, i);
            % Parallel version:
            currvar = tmp_data_data(:, :, i);

150 151 152
            % griddata way (cubic interpolation)
            %fvcom.(vars{vv}).node(:,i) = griddata(wind.x,wind.y,currvar,x,y,'cubic');
            %fvcom.(vars{vv}).data(:,i) = griddata(wind.x,wind.y,currvar,xc,yc,'cubic');
153

154
            % TriScatteredInterp way (with natural neighbour interpolation)
155 156 157 158 159 160 161 162 163 164 165
            % Use a try/catch to account for the different grids over which
            % the humidity and sealevel pressure data are sampled.
            try
                ftsin = TriScatteredInterp(data.x(:), data.y(:), currvar(:), 'natural');
            catch err
                warning([err.identifier, ': Some NCEP data are projected' ...
                    ' onto a different grid. Check you have specified' ...
                    ' data.xalt and data.yalt arrays which are on the' ...
                    ' same grid as the data to be interpolated.'])
                ftsin = TriScatteredInterp(data.xalt(:), data.yalt(:), currvar(:), 'natural');
            end
166 167 168 169 170 171 172 173 174 175 176
            % Serial version:
            % fvcom.(vars{vv}).node(:,i) = ftsin(x,y);
            % fvcom.(vars{vv}).data(:,i) = ftsin(xc,yc);
            % nnans(1) = sum(isnan(fvcom.(vars{vv}).node(:,i)));
            % nnans(2) = sum(isnan(fvcom.(vars{vv}).data(:,i)));
            % Parallel version:
            tmp_fvcom_node(:, i) = ftsin(x,y);
            tmp_fvcom_data(:, i) = ftsin(xc,yc);
            nnans1 = sum(isnan(tmp_fvcom_node(:, i)));
            nnans2 = sum(isnan(tmp_fvcom_data(:, i)));
            if  nnans1 > 0
177
                warning('%i NaNs in the interpolated node data. This won''t work with FVCOM.', nnans1)
178
            end
179
            if nnans2 > 0
180
                warning('%i NaNs in the interpolated element data. This won''t work with FVCOM.', nnans2)
181
            end
182
        end
183 184 185 186 187 188
        % Transfer the temporary arrays back to the relevant struct and
        % clear out the temporary arrays.
        fvcom.(vars{vv}).node = tmp_fvcom_node;
        fvcom.(vars{vv}).data = tmp_fvcom_data;
        clear nnans* tmp_*

189 190 191 192
        fprintf('interpolation of %s complete\n', vars{vv});
    end
end

193 194 195 196 197
if wasOpened
    matlabpool close
end

if ftbverbose
198
    fprintf('end   : %s \n', subname)
199
end