Commit 3b177f2c authored by Mike Bedington's avatar Mike Bedington

Initial commit of existing rose setup

parents
[command]
default = dst=${ARCHIVE_DIR};
src=${ROSE_DATAC}/wrfout_*
ssh ceto6 -t "rsync -aph --no-o --no-g $src $dst"
[command]
default = cp /pml${COMMON_FILES_PATH:1}/* ${ROSE_DATA};
import numpy as np
import netCDF4 as nc
import datetime as dt
import glob as gb
import sys
import PyFVCOM as pf
#"""
cmems_data_dir = '/data/sthenno1/backup/mbe/Data/CMEMS'
start_date = dt.datetime(2018,9,19)
end_date = dt.datetime(2018,9,20)
grid = 'tamar_v2_grd.dat'
sigma_file = 'sigma_gen.dat'
native_coordinates = 'cartesian'
fvcom_harmonics = '/data/sthenno1/backup/mbe/Data/harmonics_files/tamar_2006_harmonics.nc'
interval = 1/24
#"""
"""
cmems_data_dir = sys.argv[1]
start_date = dt.datetime.strptime(sys.argv[2], '%Y-%m-%d')
end_date = dt.datetime.strptime(sys.argv[3], '%Y-%m-%d')
grid = sys.argv[4]
sigma_file = sys.argv[5]
native_coordinates = sys.argv[6]
fvcom_harmonics = sys.argv[7]
interval = 1/float(sys.argv[8])
"""
constituents = ['M2', 'S2']
output_file = 'boundary_nest.nc'.format(grid)
cmems_time_res = 'hi'
##############################################################################################
# Setup preproc Model object
aqua_prep = pf.preproc.Model(start_date, end_date, grid, native_coordinates, zone='30N', sampling=interval)
aqua_prep.add_sigma_coordinates(sigma_file)
# Make the nested boundary object
aqua_prep.add_nests(4)
aqua_prep.add_nests_harmonics(fvcom_harmonics, harmonics_vars=['u', 'v', 'ua', 'va', 'zeta'], constituents=constituents, pool_size=20)
# Make the regular readers for the CMEMS data
fvcom_cmems_names = {'salinity':['SAL', 'vosaline'], 'temp':['TEM', 'votemper'],
'v':['CUR', 'vomecrty'], 'u':['CUR', 'vozocrtx'],
'zeta':['SSH', 'sossheig']}
dt_list = [start_date + dt.timedelta(days = int(i)) for i in np.arange(-1, (end_date - start_date).days + 2)]
datestr_list = [this_date.strftime('%Y%m%d') for this_date in dt_list]
for this_fvcom, this_var in fvcom_cmems_names.items():
cmems_file_list = []
for this_date in datestr_list:
if this_var[0] == 'SSH':
poss_files = gb.glob('{}/*{}*{}*/*{}.nc'.format(cmems_data_dir, 'hi', this_var[0], this_date))
else:
poss_files = gb.glob('{}/*{}*{}*/*{}.nc'.format(cmems_data_dir, cmems_time_res, this_var[0], this_date))
# Handle that sometimes theres multiple files for one day from different forecast runs
if len(poss_files) > 1:
chosen_file = poss_files[0]
for this_file in poss_files[1:]:
if this_file > chosen_file:
chosen_file = this_file
cmems_file_list.append(chosen_file)
elif len(poss_files) == 1:
cmems_file_list.append(poss_files[0])
if this_var[0] =='SSH':
reg_reader = pf.preproc.Regular2DReader
else:
reg_reader = pf.preproc.RegularReader
this_data_reader = reg_reader(cmems_file_list[0], [this_var[1]])
if len(cmems_file_list) > 1:
for this_file in cmems_file_list[1:]:
this_data_reader += reg_reader(this_file, [this_var[1]])
aqua_prep.add_nests_regular(this_fvcom, this_data_reader, this_var[1], constrain_coordinates=False)
# Depth avg the velocities
aqua_prep.avg_nest_force_vel()
# Write the forcing file
aqua_prep.write_nested_forcing(output_file, adjust_tides=['zeta', 'u', 'v', 'ua', 'va'])
NUMBER OF SIGMA LEVELS = 25
SIGMA COORDINATE TYPE = GENERALIZED
DU = 35.0
DL = 15.0
MIN CONSTANT DEPTH = 120.0
KU = 7
KL = 3
ZKU = 5.0 5.0 5.0 5.0 5.0 5.0 5.0
ZKL = 5.0 5.0 5.0
This diff is collapsed.
OBC Node Number = 45
1 1 1
2 2 1
3 3 1
4 4 1
5 5 1
6 6 1
7 7 1
8 8 1
9 9 1
10 10 1
11 11 1
12 12 1
13 13 1
14 14 1
15 15 1
16 16 1
17 17 1
18 18 1
19 19 1
20 20 1
21 21 1
22 22 1
23 23 1
24 24 1
25 25 1
26 26 1
27 27 1
28 28 1
29 29 1
30 30 1
31 31 1
32 32 1
33 33 1
34 34 1
35 35 1
36 36 1
37 37 1
38 38 1
39 39 1
40 40 1
41 41 1
42 42 1
43 43 1
44 44 1
45 45 1
[command]
default = python3 make_nest_bound.py ${CMEMS_DATA_DIR} ${START_DAY} ${END_DAY} ${ROSE_DATAC}/${GRID_NAME}_grd.dat ${ROSE_DATAC}/sigma_gen.dat ${COORDINATES} ${HARMONICS_FILE_PATH} ${NEST_INTERVAL}; cp boundary_nest.nc ${ROSE_DATAC}/${GRID_NAME}_nest_0001.nc
import numpy as np
import pickle as pk
import netCDF4 as nc
import sys
import datetime as dt
import PyFVCOM as pf
import fvcom_river as fr
grid_name = sys.argv[1]
common_dir = sys.argv[2]
start_date = dt.datetime.strptime(sys.argv[3],'%Y-%m-%d_%H:%M:%S') - dt.timedelta(days=2) # Must start before FVCOM run
end_date = dt.datetime.strptime(sys.argv[4],'%Y-%m-%d_%H:%M:%S')
native_coordinates = sys.argv[5]
wrf_nc_file_str = sys.argv[6]
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
river_list = []
for this_obj in river_dict.values():
this_obj.mouth_lon = float(this_obj.mouth_lon)
this_obj.mouth_lat = float(this_obj.mouth_lat)
this_obj.salinity = 0
river_list.append(this_obj)
# Add the new WRF data
forecast_nc = nc.Dataset(wrf_nc_file_str, 'r')
wrf_vars = ['RAINNC', 'T2', 'Times']
forecast_data = {}
for this_var in wrf_vars:
forecast_data[this_var] = forecast_nc.variables[this_var][:]
date_str_raw = [b''.join(this_date_raw) for this_date_raw in forecast_data['Times']]
forecast_data['times'] = np.asarray([dt.datetime.strptime(this_date_str.decode('utf-8'), '%Y-%m-%d_%H:%M:%S') for this_date_str in date_str_raw])
for this_river in river_list:
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'])
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
# Get and write out the forecast predictions
grid = common_dir + '/' + grid_name + '_grd.dat'
native_coordinates = 'cartesian'
obc_file = common_dir + '/' + grid_name + '_obc.dat'
output_file = grid_name + '_riv.nc'
output_file_nml = grid_name + '_riv.nml'
positions, names, times, flux_array, temperature, salinity, ersem_dict = fr.get_pyfvcom_prep(river_list, start_date, end_date, ersem=False, noisy=True)
salinity = np.ones(salinity.shape)
flux_array[flux_array < 0] = 0
aqua_prep = pf.preproc.Model(start_date, end_date, grid, native_coordinates, zone='30N')
aqua_prep.add_open_boundaries(obc_file)
aqua_prep.add_rivers(positions, names, times, flux_array, temperature, salinity, threshold=np.inf, history='', info='')
aqua_prep.check_rivers(max_discharge=400, min_depth=None, open_boundary_proximity=None, noisy=False)
aqua_prep.write_river_forcing(output_file, ersem=False)
aqua_prep.write_river_namelist(output_file_nml, output_file, vertical_distribution='uniform')
[command]
default = cp -r ${RIVER_MODEL_PATH}/* .; python3 river_make.py ${GRID_NAME} ${ROSE_DATAC} ${START_DATE_STR} ${END_DATE_STR} ${COORDINATES} ${WRF_ARCHIVE_DIR}/today/wrfout_d03*; mv ${GRID_NAME}_riv* /${REMOTE_TRANSFER_DIR}
from datetime import datetime
import PyFVCOM as pf
import sys
casename = sys.argv[1]
start_str = sys.argv[2]
end_str = sys.argv[3]
# Define a start, end and sampling interval for the tidal data
start = datetime.strptime(start_str, '%Y-%m-%d')
end = datetime.strptime(end_str, '%Y-%m-%d')
interval = 1 / 24 # 1 hourly in units of days
model = pf.preproc.Model(start, end, 'tamar_v2_grd.dat', sampling=interval,
native_coordinates='cartesian', zone='30U', noisy=True)
# Define everything we need for the open boundaries.
# We need the TPXO data to predict tides at the boundary. Get that from here:
# ftp://ftp.oce.orst.edu/dist/tides/Global/tpxo9_netcdf.tar.gz
# and extract its contents in the PyFVCOM/examples directory.
fvcom_harmonics = 'tamar_2006_harmonics.nc'
constituents = ['M2', 'S2']
for boundary in model.open_boundaries:
# Create a 5km sponge layer for all open boundaries.
#boundary.add_sponge_layer(5000, 0.001)
# Set the type of open boundary we've got.
boundary.add_type(1) # prescribed surface elevation
# And add some tidal data.
boundary.add_fvcom_tides(fvcom_harmonics, predict='zeta', constituents=constituents, interval=interval, serial=True)
# Make a vertical grid with 21 uniform levels
model.sigma.type = 'uniform'
model.dims.levels = 24
# Write out the files for FVCOM.
model.write_grid('{}_grd.dat'.format(casename), depth_file='{}_dep.dat'.format(casename))
#model.write_sponge('operational_spg.dat')
model.write_coriolis('{}_cor.dat'.format(casename))
model.write_sigma('{}_sigma.dat'.format(casename))
model.write_tides('{}_elevtide.nc'.format(casename))
This diff is collapsed.
This diff is collapsed.
OBC Node Number = 45
1 1 1
2 2 1
3 3 1
4 4 1
5 5 1
6 6 1
7 7 1
8 8 1
9 9 1
10 10 1
11 11 1
12 12 1
13 13 1
14 14 1
15 15 1
16 16 1
17 17 1
18 18 1
19 19 1
20 20 1
21 21 1
22 22 1
23 23 1
24 24 1
25 25 1
26 26 1
27 27 1
28 28 1
29 29 1
30 30 1
31 31 1
32 32 1
33 33 1
34 34 1
35 35 1
36 36 1
37 37 1
38 38 1
39 39 1
40 40 1
41 41 1
42 42 1
43 43 1
44 44 1
45 45 1
[command]
default = python3 fvcom_preproc.py ${GRID_NAME} ${START_DAY} ${END_DAY}; cp ${GRID_NAME}_* ${ROSE_DATAC}
#!/bin/bash --login
set -eu
# Get today's forecast data from CMEMS for the NW Shelf domain. Delete yesterday's whilst we're at it.
# CMEMS FTP username and password are stored in ~/.netrc to make this more secure.
forecast_days=$1
today=${today:-$(date +%Y%m%d)}
for day in $(seq -2 $forecast_days); do
end=$(date +%Y%m%d -d "$today + $day days")
echo -n "Getting forecast $today-$end "
for var in CUR SAL SSH TEM; do
dir=MetO-NWS-PHYS-hi-${var}
if [ ! -d $dir ]; then
mkdir $dir
fi
file=metoffice_foam1_amm7_NWS_${var}_b${today}_hi${end}.nc
# Don't fail if we didn't get the file. This might just mean we're doing a hindcast download.
#wget -qc ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file || true
wget -c ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file
# If we're doing a hindcast download we might end up with an empty file, so nuke it here.
if [ ! -s $dir/$file ]; then
rm $dir/$file
fi
done
echo "done."
done
# Clear out the old forecast data.
yesterday=$(date +%Y%m%d -d "$today - 1 day")
for day in $(seq -1 $forecast_days); do
end=$(date +%Y%m%d -d "$yesterday + $day days")
echo -n "Clearing old forecast $yesterday-$end "
for var in CUR SAL SSH TEM; do
dir=MetO-NWS-PHYS-hi-${var}
file=metoffice_foam1_amm7_NWS_${var}_b${yesterday}_hi${end}.nc
if [ -f $dir/$file ]; then
rm $dir/$file
fi
done
echo "done."
done
# Create a residual of the currents and sea surface height for the files we've just downloaded.
#module load mpi/mpich-x86_64
#cd ~/Models/FVCOM/fvcom-projects/stemm-ccs/python/tides/
#for day in $(seq -1 $forecast_days); do
# day=$(date +%Y%m%d -d "$today + $day days")
# mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} SSH sossheig
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vozocrtx
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vomecrty
#done
##python3 make_residual.py ${today:0:4} ${today:4:2} SSH sossheig
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
[command]
default = cd ${CMEMS_DATA_DIR}; bash get_nws_forecast.sh 5
[command]
default = ln -s ${WRF_FORECAST_FILE_DIR}/today_wrf.nc ${ROSE_DATAC}/tamar_v2_wnd.nc
[command]
default = ln -s ${RIVER_MODEL_PATH}/${GRID_NAME}_riv.nc ${ROSE_DATAC}/${GRID_NAME}_riv.nc; ln -s ${RIVER_MODEL_PATH}/${GRID_NAME}_riv.nml ${ROSE_DATAC}/${GRID_NAME}_riv.nml
[command]
default = ln -s ${WRF_FORECAST_FILE_DIR}/today_wrf.nc ${ROSE_DATAC}/tamar_v2_wnd.nc
[command]
default = cp /pml${REMOTE_TRANSFER_DIR}/${GRID_NAME}_riv* ${ROSE_DATAC};
[command]
default = cp ${ROSE_DATAC}/${GRID_NAME}_run.nml . ; set -eu; ulimit -s unlimited; module load intel; module load intel-mpi;
module load hdf5-intelmpi; module load netcdf-intelmpi; export WORKDIR=$(readlink -f $(pwd));
export OMP_NUM_THREADS=1; export I_MPI_PIN_PROCS=0-19; export I_MPI_EXTRA_FILESYSTEM=on; export I_MPI_EXTRA_FILESYSTEM_LIST=gpfs;
export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi.so; np=$SLURM_NTASKS;
srun -K -n $np fvcom --casename=${GRID_NAME} --dbg=0
[command]
default = echo "Run WRF convert"
[command]
default = ln -s ${COMMON_FILES_PATH}/* ${ROSE_DATAC};
[command]
default = ln -s ${ROSE_DATA}/* ${ROSE_DATAC};
[command]
default = mkdir -p /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}; mv ${ROSE_DATAC}/output/${GRID_NAME}_0001.nc /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}; cp ${ROSE_DATAC}/output/${GRID_NAME}_restart_0001.nc /gpfs1/users/modellers/mbe/rose_run_output/${START_DAY}
#!/usr/bin/env bash
date_list=
matching_files=
for i in {1..5}; do echo $(date -I -d "2014-06-15 +$i days"); done
ncrcat -v T2,RAINNC,Times wrfout_d03_${THIS_YEAR}-${THIS_MONTH}*18_00_00 -O ${THIS_YEAR}_${THIS_MONTH}_data.nc
import numpy as np
import datetime as dt
import glob as gb
import netCDF4 as nc
import pickle as pk
import sys
import fvcom_river as fr
wrf_forecast_out_dir = sys.argv[1]
end_date = dt.datetime.strptime(sys.argv[2],'%Y-%m-%d')
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
start_date = end_date
for this_river in river_dict.values():
this_river_update = np.max(this_river.catchment_precipitation[0])
if this_river_update < start_date:
start_date = this_river_update
if start_date == end_date:
print('Already up to date')
else:
date_list = np.asarray([start_date + dt.timedelta(days=int(this_ind)) for this_ind in np.arange(0, (end_date - start_date).days + 1)])
missing_dates = []
for this_date in date_list:
print(this_date)
this_date_str = this_date.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates.append(this_date)
missing_dates_1 = []
for this_date in missing_dates:
print('Trying to fill for {}'.format(this_date))
this_date_m1 = this_date - dt.timedelta(days=1)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_1.append(this_date)
missing_dates_2 = []
for this_date in missing_dates_1:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_2.append(this_date)
missing_dates_3 = []
for this_date in missing_dates_2:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_3.append(this_date)
for this_date in missing_dates_3:
this_date_m1 = this_date - dt.timedelta(days=4)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
print('Giving up on {}'.format(this_date))
with open('river_model.pk1','wb') as f:
pk.dump(river_dict, f, pk.HIGHEST_PROTOCOL)
[command]
default = cp update_river_data.py ${RIVER_MODEL_PATH}; cd ${RIVER_MODEL_PATH}; python3 update_river_data.py ${WRF_ARCHIVE_DIR_PICA} ${START_DAY}; rm update_river_data.py
This diff is collapsed.
[command]
default = cp cstms_sediment.inp ${ROSE_DATAC}
This diff is collapsed.
rose-suite_TAMAR.conf
\ No newline at end of file
access-list=mbe
owner=mbe
project=test_suite
sub-project=A
title=hmm_title
[jinja2:suite.rc]
## Run properties
COLD_START=True
SEDIMENT=False
USE_CETO=True
NO_NODES=4
FORECAST=True
## Grid properties and files
GRID_NAME='aqua_v16'
COMMON_FILES_PATH='/gpfs1/users/modellers/mbe/rosa_run_common'
## Atmospheric setup
WIND_ON='F'
HEATING_ON='F'
PRECIPITATION_ON='F'
AIR_PRESSURE_ON='F'
## WRF suite settings if any of the above are on
WRF_RUN_SUITE='rose-wrf'
WRF_FORECAST_FILE_DIR='/gpfs1/users/modellers/mbe/rose_run_wrf_temp'
## River setup
NO_RIVERS='0'
## Required if NO_RIVERS > 0, either NEURAL_NET or CLIMATOLOGY
RIVER_MODEL='NEURAL_NET'
## Required if NO_RIVERS > 0, the files expected depend on the RIVER_MODEL value
RIVER_MODEL_PATH='/pmldata/sthenno1/backup/mbe/Code/fvcom-projects/mycoast/run/rose_fvcom_prep/river_mod/'
## Boundary forcing
## Between 'TIDAL', 'CMEMS', and 'NEST_FILE'
BOUNDARY_FORCING='CMEMS'
## Required for TIDAL and CMEMS
HARMONICS_FILE_PATH='/pmldata/sthenno1/backup/mbe/Data/harmonics_files/aqua_v16_0001_2006_harmonics.nc'
## Required for CMEMS
CMEMS_DATA_DIR='/pmldata/euryale1/scratch/pica/data/CMEMS'
## Required for NEST_FILE
NEST_RUN_SUITE='rose-rosa'
# The time resolution of the nest output, as divisions of 1 day (i.e. 24 would be hourly, 48 every half hour)