Commit 88b74143 authored by Mike Bedington's avatar Mike Bedington
Browse files

Merge branch 'master' of gitlab.ecosystem-modelling.pml.ac.uk:pml-modelling/rose_fvcom_setup

parents 99fbb391 915d5091
......@@ -17,14 +17,14 @@ wrf_nc_file_str = sys.argv[6]
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
river_dict = pk.load(f)
river_list = []
for this_obj in river_dict.values():
this_obj.mouth_lon = float(this_obj.mouth_lon)
this_obj.mouth_lat = float(this_obj.mouth_lat)
this_obj.salinity = 0
river_list.append(this_obj)
this_obj.mouth_lon = float(this_obj.mouth_lon)
this_obj.mouth_lat = float(this_obj.mouth_lat)
this_obj.salinity = 0
river_list.append(this_obj)
# Add the new WRF data
forecast_nc = nc.Dataset(wrf_nc_file_str, 'r')
......@@ -32,19 +32,26 @@ forecast_nc = nc.Dataset(wrf_nc_file_str, 'r')
wrf_vars = ['RAINNC', 'T2', 'Times']
forecast_data = {}
for this_var in wrf_vars:
forecast_data[this_var] = forecast_nc.variables[this_var][:]
forecast_data[this_var] = forecast_nc.variables[this_var][:]
date_str_raw = [b''.join(this_date_raw) for this_date_raw in forecast_data['Times']]
forecast_data['times'] = np.asarray([dt.datetime.strptime(this_date_str.decode('utf-8'), '%Y-%m-%d_%H:%M:%S') for this_date_str in date_str_raw])
for this_river in river_list:
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'])
if hasattr(this_river, 'wrf_catchment_factors'):
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'])
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
for this_river in river_list:
try:
this_river._expandDateSeries(start_date, end_date)
except:
pass
# Get and write out the forecast predictions
grid = common_dir + '/' + grid_name + '_grd.dat'
......
......@@ -10,181 +10,67 @@ import fvcom_river as fr
wrf_forecast_out_dir = sys.argv[1]
end_date = dt.datetime.strptime(sys.argv[2],'%Y-%m-%d')
no_miss_loops = 4
# Load the river model
with open('river_model.pk1','rb') as f:
river_dict = pk.load(f)
river_dict = pk.load(f)
start_date = end_date
for this_river in river_dict.values():
this_river_update = np.max(this_river.catchment_precipitation[0])
if this_river_update < start_date:
start_date = this_river_update
if hasattr(this_river, 'catchment_precipitation'):
this_river_update = np.max(this_river.catchment_precipitation[0])
if this_river_update < start_date:
start_date = this_river_update
if start_date == end_date:
print('Already up to date')
else:
date_list = np.asarray([start_date + dt.timedelta(days=int(this_ind)) for this_ind in np.arange(0, (end_date - start_date).days + 1)])
missing_dates = []
for this_date in date_list:
print(this_date)
this_date_str = this_date.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates.append(this_date)
missing_dates_1 = []
for this_date in missing_dates:
print('Trying to fill for {}'.format(this_date))
this_date_m1 = this_date - dt.timedelta(days=1)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_1.append(this_date)
missing_dates_2 = []
for this_date in missing_dates_1:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_2.append(this_date)
missing_dates_3 = []
for this_date in missing_dates_2:
this_date_m1 = this_date - dt.timedelta(days=3)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
missing_dates_3.append(this_date)
for this_date in missing_dates_3:
this_date_m1 = this_date - dt.timedelta(days=4)
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
except:
print('Giving up on {}'.format(this_date))
with open('river_model.pk1','wb') as f:
pk.dump(river_dict, f, pk.HIGHEST_PROTOCOL)
print('Already up to date')
else:
missing_dates = np.asarray([start_date + dt.timedelta(days=int(this_ind)) for this_ind in np.arange(0, (end_date - start_date).days + 1)])
for this_missing_loop in np.arange(0, no_miss_loops):
new_missing_dates = []
for this_date in missing_dates:
if this_missing_loop > 0:
print('Trying again to fill for {}'.format(this_date))
else:
print(this_date)
this_date_m1 = this_date - dt.timedelta(days=int(this_missing_loop))
this_date_str = this_date_m1.strftime('%Y%m%d')
potential_files = gb.glob('{}/{}*_forecast/wrfout_d03*'.format(wrf_forecast_out_dir, this_date_str))
try:
this_wrf_nc = nc.Dataset(potential_files[-1], 'r')
wrf_date_str_raw = this_wrf_nc.variables['Times'][:]
wrf_date_str = np.asarray([b''.join(this_str) for this_str in wrf_date_str_raw])
wrf_dt = np.asarray([dt.datetime.strptime(this_str.decode('utf-8'),'%Y-%m-%d_%H:%M:%S') for this_str in wrf_date_str])
wrf_dt_date = np.asarray([this_dt.date() for this_dt in wrf_dt])
date_match = wrf_dt_date == this_date.date()
forecast_data = {'times': wrf_dt[date_match], 'RAINNC': this_wrf_nc.variables['RAINNC'][date_match,:,:],
'T2': this_wrf_nc.variables['T2'][date_match,:,:]}
this_wrf_nc.close()
for this_river_name, this_river in river_dict.items():
if hasattr(this_river, 'addToSeries'):
this_rain = np.sum(np.sum(forecast_data['RAINNC']*this_river.wrf_catchment_factors, axis=2), axis=1)
this_river.addToSeries('catchment_precipitation', this_rain, forecast_data['times'], override=True)
this_temp = np.zeros(len(forecast_data['times']))
for i in range(0, len(forecast_data['times'])):
this_temp[i] = np.average(forecast_data['T2'][i,:,:], weights=this_river.wrf_catchment_factors)
this_river.addToSeries('catchment_temp', this_temp, forecast_data['times'], override=True)
if hasattr(this_river, 'river_obj'):
this_river.catchment_precipitation = this_river.river_obj.catchment_precipitation
except:
new_missing_dates.append(this_date)
missing_dates = new_missing_dates[:]
with open('river_model.pk1','wb') as f:
pk.dump(river_dict, f, pk.HIGHEST_PROTOCOL)
rose-suite_TAMAR.conf
\ No newline at end of file
rose-suite_ROSA.conf
\ No newline at end of file
......@@ -3,12 +3,13 @@
COLD_START=True
SEDIMENT=False
USE_CETO=True
NO_NODES=4
MIN_NODES=4
MAX_NODES=10
FORECAST=True
## Grid properties and files
GRID_NAME='aqua_v16'
COMMON_FILES_PATH='/gpfs1/users/modellers/mbe/rosa_run_common'
COMMON_FILES_PATH='/data/sthenno1/backup/mbe/Code/fvcom-projects/mycoast/run/aqua_v16_common'
## Atmospheric setup
WIND_ON='F'
......@@ -20,20 +21,20 @@ WRF_RUN_SUITE='rose-wrf'
WRF_FORECAST_FILE_DIR='/gpfs1/users/modellers/mbe/rose_run_wrf_temp'
## River setup
NO_RIVERS='0'
## Required if NO_RIVERS > 0, either NEURAL_NET or CLIMATOLOGY
RIVER_MODEL='NEURAL_NET'
## Required if NO_RIVERS > 0, the files expected depend on the RIVER_MODEL value
RIVER_MODEL_PATH='/pmldata/sthenno1/backup/mbe/Code/fvcom-projects/mycoast/run/rose_fvcom_prep/river_mod/'
## Between 'NONE', 'NEURAL_NET', and 'CLIMATOLOGY'
RIVER_MODEL='NONE'
## Required if 'NEURAL_NET' or 'CLIMATOLOGY', the files expected depend on the RIVER_MODEL value
RIVER_MODEL_PATH='/data/sthenno1/backup/mbe/Code/fvcom-projects/mycoast/run/aqua_v16_river_mod'
## Required if 'NEURAL_NET', must omit first slash to allow use on remote and local
REMOTE_TRANSFER_DIR='data/sthenno1/backup/mbe/rose_rosa_transfer_dir'
## Boundary forcing
## Between 'TIDAL', 'CMEMS', and 'NEST_FILE'
BOUNDARY_FORCING='CMEMS'
## Required for TIDAL and CMEMS
HARMONICS_FILE_PATH='/pmldata/sthenno1/backup/mbe/Data/harmonics_files/aqua_v16_0001_2006_harmonics.nc'
HARMONICS_FILE_PATH='/data/sthenno1/backup/mbe/Data/harmonics_files/aqua_v16_0001_2006_harmonics.nc'
## Required for CMEMS
CMEMS_DATA_DIR='/pmldata/euryale1/scratch/pica/data/CMEMS'
CMEMS_DATA_DIR='/data/sthenno1/backup/mbe/Data/CMEMS'
## Required for NEST_FILE
NEST_RUN_SUITE='rose-rosa'
# The time resolution of the nest output, as divisions of 1 day (i.e. 24 would be hourly, 48 every half hour)
......
......@@ -3,7 +3,8 @@
COLD_START=True
SEDIMENT=False
USE_CETO=True
NO_NODES=5
MIN_NODES=4
MAX_NODES=10
FORECAST=True
## Grid properties and files
......
......@@ -4,8 +4,8 @@
abort if any task fails = False
[scheduling]
initial cycle point = 2018-10-03T00:00:00Z
final cycle point = +P4D
initial cycle point = 2018-10-04T00:00:00Z
final cycle point = +P5D
[[special tasks]]
clock-trigger = start_cycle(PT0M)
[[dependencies]]
......@@ -102,7 +102,7 @@
submission polling intervals = PT10S
execution polling intervals = PT10S, PT1M
[[[directives]]]
--nodes = {{NO_NODES}}
--nodes = {{MIN_NODES}}-{{MAX_NODES}}
--ntasks-per-node=20
--threads-per-core=1
--time=24:00:00
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment