Commit 3b177f2c authored by Mike Bedington's avatar Mike Bedington

Initial commit of existing rose setup

parents
[command]
default = dst=${ARCHIVE_DIR};
src=${ROSE_DATAC}/wrfout_*
ssh ceto6 -t "rsync -aph --no-o --no-g $src $dst"
[command]
default = cp /pml${COMMON_FILES_PATH:1}/* ${ROSE_DATA};
import numpy as np
import netCDF4 as nc
import datetime as dt
import glob as gb
import sys
import PyFVCOM as pf
#"""
cmems_data_dir = '/data/sthenno1/backup/mbe/Data/CMEMS'
start_date = dt.datetime(2018,9,19)
end_date = dt.datetime(2018,9,20)
grid = 'tamar_v2_grd.dat'
sigma_file = 'sigma_gen.dat'
native_coordinates = 'cartesian'
fvcom_harmonics = '/data/sthenno1/backup/mbe/Data/harmonics_files/tamar_2006_harmonics.nc'
interval = 1/24
#"""
"""
cmems_data_dir = sys.argv[1]
start_date = dt.datetime.strptime(sys.argv[2], '%Y-%m-%d')
end_date = dt.datetime.strptime(sys.argv[3], '%Y-%m-%d')
grid = sys.argv[4]
sigma_file = sys.argv[5]
native_coordinates = sys.argv[6]
fvcom_harmonics = sys.argv[7]
interval = 1/float(sys.argv[8])
"""
constituents = ['M2', 'S2']
output_file = 'boundary_nest.nc'.format(grid)
cmems_time_res = 'hi'
##############################################################################################
# Setup preproc Model object
aqua_prep = pf.preproc.Model(start_date, end_date, grid, native_coordinates, zone='30N', sampling=interval)
aqua_prep.add_sigma_coordinates(sigma_file)
# Make the nested boundary object
aqua_prep.add_nests(4)
aqua_prep.add_nests_harmonics(fvcom_harmonics, harmonics_vars=['u', 'v', 'ua', 'va', 'zeta'], constituents=constituents, pool_size=20)
# Make the regular readers for the CMEMS data
fvcom_cmems_names = {'salinity':['SAL', 'vosaline'], 'temp':['TEM', 'votemper'],
'v':['CUR', 'vomecrty'], 'u':['CUR', 'vozocrtx'],
'zeta':['SSH', 'sossheig']}
dt_list = [start_date + dt.timedelta(days = int(i)) for i in np.arange(-1, (end_date - start_date).days + 2)]
datestr_list = [this_date.strftime('%Y%m%d') for this_date in dt_list]
for this_fvcom, this_var in fvcom_cmems_names.items():
cmems_file_list = []
for this_date in datestr_list:
if this_var[0] == 'SSH':
poss_files = gb.glob('{}/*{}*{}*/*{}.nc'.format(cmems_data_dir, 'hi', this_var[0], this_date))
else:
poss_files = gb.glob('{}/*{}*{}*/*{}.nc'.format(cmems_data_dir, cmems_time_res, this_var[0], this_date))
# Handle that sometimes theres multiple files for one day from different forecast runs
if len(poss_files) > 1:
chosen_file = poss_files[0]
for this_file in poss_files[1:]:
if this_file > chosen_file:
chosen_file = this_file
cmems_file_list.append(chosen_file)
elif len(poss_files) == 1:
cmems_file_list.append(poss_files[0])
if this_var[0] =='SSH':
reg_reader = pf.preproc.Regular2DReader
else:
reg_reader = pf.preproc.RegularReader
this_data_reader = reg_reader(cmems_file_list[0], [this_var[1]])
if len(cmems_file_list) > 1:
for this_file in cmems_file_list[1:]:
this_data_reader += reg_reader(this_file, [this_var[1]])
aqua_prep.add_nests_regular(this_fvcom, this_data_reader, this_var[1], constrain_coordinates=False)
# Depth avg the velocities
aqua_prep.avg_nest_force_vel()
# Write the forcing file
aqua_prep.write_nested_forcing(output_file, adjust_tides=['zeta', 'u', 'v', 'ua', 'va'])
NUMBER OF SIGMA LEVELS = 25
SIGMA COORDINATE TYPE = GENERALIZED
DU = 35.0
DL = 15.0
MIN CONSTANT DEPTH = 120.0
KU = 7
KL = 3
ZKU = 5.0 5.0 5.0 5.0 5.0 5.0 5.0
ZKL = 5.0 5.0 5.0
This source diff could not be displayed because it is too large. You can view the blob instead.
OBC Node Number = 45
1 1 1
2 2 1
3 3 1
4 4 1
5 5 1
6 6 1
7 7 1
8 8 1
9 9 1
10 10 1
11 11 1
12 12 1
13 13 1
14 14 1
15 15 1
16 16 1
17 17 1
18 18 1
19 19 1
20 20 1
21 21 1
22 22 1
23 23 1
24 24 1
25 25 1
26 26 1
27 27 1
28 28 1
29 29 1
30 30 1
31 31 1
32 32 1
33 33 1
34 34 1
35 35 1
36 36 1
37 37 1
38 38 1
39 39 1
40 40 1
41 41 1
42 42 1
43 43 1
44 44 1
45 45 1
[command]
default = python3 make_nest_bound.py ${CMEMS_DATA_DIR} ${START_DAY} ${END_DAY} ${ROSE_DATAC}/${GRID_NAME}_grd.dat ${ROSE_DATAC}/sigma_gen.dat ${COORDINATES} ${HARMONICS_FILE_PATH} ${NEST_INTERVAL}; cp boundary_nest.nc ${ROSE_DATAC}/${GRID_NAME}_nest_0001.nc
import numpy as np
import pickle as pk
import netCDF4 as nc
import sys
import datetime as dt
import PyFVCOM as pf
import fvcom_river as fr
grid_name = sys.argv[1]
common_dir = sys.argv[2]
start_date = dt.datetime.strptime(sys.argv[3],'%Y-%m-%d_%H:%M:%S') - dt.timedelta(days=2) # Must start before FVCOM run
end_date = dt.datetime.strptime(sys.argv[4],'%Y-%m-%d_%H:%M:%S')
native_coordinates = sys.argv[5]
wrf_nc_file_str = sys.argv[6]
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
river_list = []
for this_obj in river_dict.values():
this_obj.mouth_lon = float(this_obj.mouth_lon)
this_obj.mouth_lat = float(this_obj.mouth_lat)
this_obj.salinity = 0
river_list.append(this_obj)
# Add the new WRF data
forecast_nc = nc.Dataset(wrf_nc_file_str, 'r')
wrf_vars = ['RAINNC', 'T2', 'Times']
forecast_data = {}
for this_var in wrf_vars:
forecast_data[this_var] = forecast_nc.variables[this_var][:]
date_str_raw = [b''.join(this_date_raw) for this_date_raw in forecast_data['Times']]
forecast_data['times'] = np.asarray([dt.datetime.strptime(this_date_str.decode('utf-8'), '%Y-%m-%d_%H:%M:%S') for this_date_str in date_str_raw])
for this_river in river_list:
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'])
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
# Get and write out the forecast predictions
grid = common_dir + '/' + grid_name + '_grd.dat'
native_coordinates = 'cartesian'
obc_file = common_dir + '/' + grid_name + '_obc.dat'
output_file = grid_name + '_riv.nc'
output_file_nml = grid_name + '_riv.nml'
positions, names, times, flux_array, temperature, salinity, ersem_dict = fr.get_pyfvcom_prep(river_list, start_date, end_date, ersem=False, noisy=True)
salinity = np.ones(salinity.shape)
flux_array[flux_array < 0] = 0
aqua_prep = pf.preproc.Model(start_date, end_date, grid, native_coordinates, zone='30N')
aqua_prep.add_open_boundaries(obc_file)
aqua_prep.add_rivers(positions, names, times, flux_array, temperature, salinity, threshold=np.inf, history='', info='')
aqua_prep.check_rivers(max_discharge=400, min_depth=None, open_boundary_proximity=None, noisy=False)
aqua_prep.write_river_forcing(output_file, ersem=False)
aqua_prep.write_river_namelist(output_file_nml, output_file, vertical_distribution='uniform')
[command]
default = cp -r ${RIVER_MODEL_PATH}/* .; python3 river_make.py ${GRID_NAME} ${ROSE_DATAC} ${START_DATE_STR} ${END_DATE_STR} ${COORDINATES} ${WRF_ARCHIVE_DIR}/today/wrfout_d03*; mv ${GRID_NAME}_riv* /${REMOTE_TRANSFER_DIR}
from datetime import datetime
import PyFVCOM as pf
import sys
casename = sys.argv[1]
start_str = sys.argv[2]
end_str = sys.argv[3]
# Define a start, end and sampling interval for the tidal data
start = datetime.strptime(start_str, '%Y-%m-%d')
end = datetime.strptime(end_str, '%Y-%m-%d')
interval = 1 / 24 # 1 hourly in units of days
model = pf.preproc.Model(start, end, 'tamar_v2_grd.dat', sampling=interval,
native_coordinates='cartesian', zone='30U', noisy=True)
# Define everything we need for the open boundaries.
# We need the TPXO data to predict tides at the boundary. Get that from here:
# ftp://ftp.oce.orst.edu/dist/tides/Global/tpxo9_netcdf.tar.gz
# and extract its contents in the PyFVCOM/examples directory.
fvcom_harmonics = 'tamar_2006_harmonics.nc'
constituents = ['M2', 'S2']
for boundary in model.open_boundaries:
# Create a 5km sponge layer for all open boundaries.
#boundary.add_sponge_layer(5000, 0.001)
# Set the type of open boundary we've got.
boundary.add_type(1) # prescribed surface elevation
# And add some tidal data.
boundary.add_fvcom_tides(fvcom_harmonics, predict='zeta', constituents=constituents, interval=interval, serial=True)
# Make a vertical grid with 21 uniform levels
model.sigma.type = 'uniform'
model.dims.levels = 24
# Write out the files for FVCOM.
model.write_grid('{}_grd.dat'.format(casename), depth_file='{}_dep.dat'.format(casename))
#model.write_sponge('operational_spg.dat')
model.write_coriolis('{}_cor.dat'.format(casename))
model.write_sigma('{}_sigma.dat'.format(casename))
model.write_tides('{}_elevtide.nc'.format(casename))
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
OBC Node Number = 45
1 1 1
2 2 1
3 3 1
4 4 1
5 5 1
6 6 1
7 7 1
8 8 1
9 9 1
10 10 1
11 11 1
12 12 1
13 13 1
14 14 1
15 15 1
16 16 1
17 17 1
18 18 1
19 19 1
20 20 1
21 21 1
22 22 1
23 23 1
24 24 1
25 25 1
26 26 1
27 27 1
28 28 1
29 29 1
30 30 1
31 31 1
32 32 1
33 33 1
34 34 1
35 35 1
36 36 1
37 37 1
38 38 1
39 39 1
40 40 1
41 41 1
42 42 1
43 43 1
44 44 1
45 45 1
[command]
default = python3 fvcom_preproc.py ${GRID_NAME} ${START_DAY} ${END_DAY}; cp ${GRID_NAME}_* ${ROSE_DATAC}
#!/bin/bash --login
set -eu
# Get today's forecast data from CMEMS for the NW Shelf domain. Delete yesterday's whilst we're at it.
# CMEMS FTP username and password are stored in ~/.netrc to make this more secure.
forecast_days=$1
today=${today:-$(date +%Y%m%d)}
for day in $(seq -2 $forecast_days); do
end=$(date +%Y%m%d -d "$today + $day days")
echo -n "Getting forecast $today-$end "
for var in CUR SAL SSH TEM; do
dir=MetO-NWS-PHYS-hi-${var}
if [ ! -d $dir ]; then
mkdir $dir
fi
file=metoffice_foam1_amm7_NWS_${var}_b${today}_hi${end}.nc
# Don't fail if we didn't get the file. This might just mean we're doing a hindcast download.
#wget -qc ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file || true
wget -c ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file
# If we're doing a hindcast download we might end up with an empty file, so nuke it here.
if [ ! -s $dir/$file ]; then
rm $dir/$file
fi
done
echo "done."
done
# Clear out the old forecast data.
yesterday=$(date +%Y%m%d -d "$today - 1 day")
for day in $(seq -1 $forecast_days); do
end=$(date +%Y%m%d -d "$yesterday + $day days")
echo -n "Clearing old forecast $yesterday-$end "
for var in CUR SAL SSH TEM; do
dir=MetO-NWS-PHYS-hi-${var}
file=metoffice_foam1_amm7_NWS_${var}_b${yesterday}_hi${end}.nc
if [ -f $dir/$file ]; then
rm $dir/$file
fi
done
echo "done."
done
# Create a residual of the currents and sea surface height for the files we've just downloaded.
#module load mpi/mpich-x86_64
#cd ~/Models/FVCOM/fvcom-projects/stemm-ccs/python/tides/
#for day in $(seq -1 $forecast_days); do
# day=$(date +%Y%m%d -d "$today + $day days")
# mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} SSH sossheig
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vozocrtx
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vomecrty
#done
##python3 make_residual.py ${today:0:4} ${today:4:2} SSH sossheig
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
[command]
default = cd ${CMEMS_DATA_DIR}; bash get_nws_forecast.sh 5
[command]
default = ln -s ${WRF_FORECAST_FILE_DIR}/today_wrf.nc ${ROSE_DATAC}/tamar_v2_wnd.nc
[command]
default = ln -s ${RIVER_MODEL_PATH}/${GRID_NAME}_riv.nc ${ROSE_DATAC}/${GRID_NAME}_riv.nc; ln -s ${RIVER_MODEL_PATH}/${GRID_NAME}_riv.nml ${ROSE_DATAC}/${GRID_NAME}_riv.nml
[command]
default = ln -s ${WRF_FORECAST_FILE_DIR}/today_wrf.nc ${ROSE_DATAC}/tamar_v2_wnd.nc
[command]
default = cp /pml${REMOTE_TRANSFER_DIR}/${GRID_NAME}_riv* ${ROSE_DATAC};
[command]
default = cp ${ROSE_DATAC}/${GRID_NAME}_run.nml . ; set -eu; ulimit -s unlimited; module load intel; module load intel-mpi;
module load hdf5-intelmpi; module load netcdf-intelmpi; export WORKDIR=$(readlink -f $(pwd));
export OMP_NUM_THREADS=1; export I_MPI_PIN_PROCS=0-19; export I_MPI_EXTRA_FILESYSTEM=on; export I_MPI_EXTRA_FILESYSTEM_LIST=gpfs;
export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi.so; np=$SLURM_NTASKS;
srun -K -n $np fvcom --casename=${GRID_NAME} --dbg=0
[command]
default = echo "Run WRF convert"
[command]
default = ln -s ${COMMON_FILES_PATH}/* ${ROSE_DATAC};
[command]
default = ln -s ${ROSE_DATA}/* ${ROSE_DATAC};
[command]
default = mkdir -p /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}; mv ${ROSE_DATAC}/output/${GRID_NAME}_0001.nc /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}; cp ${ROSE_DATAC}/output/${GRID_NAME}_restart_0001.nc /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}
#!/usr/bin/env bash
date_list=
matching_files=
for i in {1..5}; do echo $(date -I -d "2014-06-15 +$i days"); done
ncrcat -v T2,RAINNC,Times wrfout_d03_${THIS_YEAR}-${THIS_MONTH}*18_00_00 -O ${THIS_YEAR}_${THIS_MONTH}_data.nc
import numpy as np
import datetime as dt
import glob as gb
import netCDF4 as nc
import pickle as pk
import sys
import fvcom_river as fr
wrf_forecast_out_dir = sys.argv[1]
end_date = dt.datetime.strptime(sys.argv[2],'%Y-%m-%d')
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
start_date = end_date
for this_river in river_dict.values():
this_river_update = np.max(this_river.catchment_precipitation[0])
if this_river_update < start_date:
start_date = this_river_update
if start_date == end_date:
print('Already up to date')
else:
date_list = np.asarray([start_date + dt.timedelta(days=int(this_ind)) for this_ind in np.arange(0, (end_date - start_date).days + 1)])
missing_dates = []
for this_date in date_list:
print(this_date)
this_date_str = this_date.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates.append(this_date)
missing_dates_1 = []
for this_date in missing_dates:
print('Trying to fill for {}'.format(this_date))
this_date_m1 = this_date - dt.timedelta(days=1)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_1.append(this_date)
missing_dates_2 = []
for this_date in missing_dates_1:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_2.append(this_date)
missing_dates_3 = []
for this_date in missing_dates_2:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_3.append(this_date)
for this_date in missing_dates_3:
this_date_m1 = this_date - dt.timedelta(days=4)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
print('Giving up on {}'.format(this_date))
with open('river_model.pk1','wb') as f:
pk.dump(river_dict, f, pk.HIGHEST_PROTOCOL)
[command]
default = cp update_river_data.py ${RIVER_MODEL_PATH}; cd ${RIVER_MODEL_PATH}; python3 update_river_data.py ${WRF_ARCHIVE_DIR_PICA} ${START_DAY}; rm update_river_data.py
!========================================================= Hernan G. Arango ===
! Copyright (c) 2002-2014 The ROMS/TOMS Group !
! Licensed under a MIT/X style license !
! See License_ROMS.txt !
!==============================================================================
! !
! Input parameters can be entered in ANY order, provided that the parameter !
! KEYWORD (usually, upper case) is typed correctly followed by "=" or "==" !
! symbols. Any comment lines are allowed and must begin with an exclamation !
! mark (!) in column one. Comments may appear to the right of a parameter !
! specification to improve documentation. Comments will be ignored during !
! reading. Blank lines are also allowed and ignored. Continuation lines in !
! a parameter specification are allowed and must be preceded by a backslash !
! (\). In some instances, more than one value is required for a parameter. !
! If fewer values are provided, the last value is assigned for the entire !
! parameter array. The multiplication symbol (*), without blank spaces in !
! between, is allowed for a parameter specification. For example, in a two !
! grids nested application: !
! !
! AKT_BAK = 2*1.0d-6 2*5.0d-6 ! m2/s !
! !
! indicates that the first two entries of array AKT_BAK, in fortran column- !
! major order, will have the same value of "1.0d-6" for grid 1, whereas the !
! next two entries will have the same value of "5.0d-6" for grid 2. !
! !
! In multiple levels of nesting and/or multiple connected domains step-ups, !
! "Ngrids" entries are expected for some of these parameters. In such case, !
! the order of the entries for a parameter is extremely important. It must !
! follow the same order (1:Ngrids) as in the state variable declaration. The !
! USER may follow the above guidelines for specifying his/her values. These !
! parameters are marked by "=" plural symbol after the KEYWORD. !
! !
!==============================================================================
! !
!-----------------------------------------------------------------------------!
! Sediment Module Dynamics Parameters
! NCS : Number of cohesive (mud) sediment tracers
! NNS : Number of non-cohesive (sand) sediment tracers
! BEDLOAD : True of bedload is to be considered
! SUSLOAD : True of suspended load is to be considered
! COHESIVE_BED : use to activate cohesive bed model
! MIXED_BED : use to activate mixed bed behavior
! NONCOHESIVE_BED2 : use modified bed model of Sherwood et al
! SED_MORPH : use to allow bottom model elevation to evolve
! SED_FL