Commit 3ca5619d authored by Mike Bedington's avatar Mike Bedington

Initial commit

parents
#!/bin/bash --login
set -eu
# Get today's forecast data from CMEMS for the NW Shelf domain. Delete yesterday's whilst we're at it.
# CMEMS FTP username and password are stored in ~/.netrc to make this more secure.
forecast_days=$1
cmems_dir=$2
cd ${cmems_dir}
today=${today:-$(date +%Y%m%d)}
for day in $(seq -2 $forecast_days); do
end=$(date +%Y%m%d -d "$today + $day days")
echo -n "Getting forecast $today-$end "
for var in CUR SAL SSH TEM; do
dir=MetO-NWS-PHYS-hi-${var}
if [ ! -d $dir ]; then
mkdir $dir
fi
file=metoffice_foam1_amm7_NWS_${var}_b${today}_hi${end}.nc
# Don't fail if we didn't get the file. This might just mean we're doing a hindcast download.
#wget -qc ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file || true
wget -c ftp://nrt.cmems-du.eu/Core/NORTHWESTSHELF_ANALYSIS_FORECAST_PHYS_004_001_b/$dir/$file -O$dir/$file || true
# If we're doing a hindcast download we might end up with an empty file, so nuke it here.
if [ ! -s $dir/$file ]; then
rm $dir/$file
fi
# If we've got a new forecast for day x then delete all other files for day x
if [ -f $dir/$file ]; then
echo "Clearing old forecast ${end}"
ls ${dir}/metoffice_foam1_amm7_NWS_${var}_b*_hi${end}.nc | grep -v metoffice_foam1_amm7_NWS_${var}_b${today}_hi${end}.nc | xargs rm || true
fi
done
echo "done."
done
# Create a residual of the currents and sea surface height for the files we've just downloaded.
#module load mpi/mpich-x86_64
#cd ~/Models/FVCOM/fvcom-projects/stemm-ccs/python/tides/
#for day in $(seq -1 $forecast_days); do
# day=$(date +%Y%m%d -d "$today + $day days")
# mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} SSH sossheig
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vozocrtx
# #mpirun -n $(nproc) python3 nemo_tides.py ${day:0:4} ${day:4:2} ${day:6:2} CUR vomecrty
#done
##python3 make_residual.py ${today:0:4} ${today:4:2} SSH sossheig
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
##python3 make_residual.py ${today:0:4} ${today:4:2} CUR vozocrtx
[command]
default = bash get_nws_forecast.sh 5 ${CMEMS_DATA_DIR}
#!/bin/bash
# Get the forcing files for a GFS-forced WRF run. This is based on Dima's (SAMS) approach.
set -eu
forecast_hours=$1
euryale_archivedir=$2
ceto_archive_dir=$3
echo " ======== start ========"
echo "Getting forecast GFS data"
cd ${euryale_archivedir}
curl_opts="--location --silent --continue-at -"
# Get a list of folders, then files within the most recent folder ($last_url):
base=ftp://ftp.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/
last_day=${last_day:-$(curl $curl_opts -l ${base} | grep "gfs\." | tail -n 1 | cut -f2 -d'‘' | cut -f1 -d'’' | cut -f2 -d'`' | cut -f1 -d "'")}
last_url=${base}/${last_day}
# Find the most recent hour.
last_hour=""
for hh in {18..00..6}; do
if curl $curl_opts --output /dev/null --head --fail ${last_url}/gfs.t${hh}z.pgrb2.0p25.f000; then
last_hour=$hh
break
fi
done
if [ -z $last_hour ]; then
# Try the last-but-one day.
last_day=$(curl $curl_opts -l ${base} | grep "gfs\." | tail -n 2 | head -n 1 | cut -f2 -d'‘' | cut -f1 -d'’' | cut -f2 -d'`' | cut -f1 -d "'")
last_url=${base}/${last_day}
# Find the most recent hour.
for hh in {18..00..6}; do
if curl $curl_opts --output /dev/null --head --fail ${last_url}/gfs.t${hh}z.pgrb2.0p25.f000; then
last_hour=$hh
break
fi
done
# Add a days' worth of data to the forecast since we've had to go back an extra day.
forecast_hours=$((forecast_hours+24))
fi
# If we still have nothing, then actually bail.
if [ -z $last_hour ]; then
echo "Found no recent run. Bailing."
exit 1
fi
latest_forecast_dir=${last_day//./_}_${last_hour}
if [ ! -d "$latest_forecast_dir" ]; then
mkdir $latest_forecast_dir
cd $latest_forecast_dir
# Grab the latest files.
curl $curl_opts --remote-name ${last_url}/gfs.t${last_hour}z.pgrb2.0p25.f000 || true
# Cycle the latest forecast files in a range $fxxx== between xxx=000 and
# xxx=195 with 3hours increment:
for ff in $(seq -w 0 3 $(printf %03d $forecast_hours)); do
file=gfs.t${last_hour}z.pgrb2.0p25.f${ff}
url=${last_url}/${file}
curl $curl_opts --remote-name $url
done
echo "Download complete"
echo "Transferring to ceto"
cd ..
rsync -aPq $latest_forecast_dir $USER@login.ceto.npm.ac.uk:${ceto_archive_dir}
echo " Transfer complete"
echo " ========= end ========="
else
echo "Most recent run already downloaded"
fi
[command]
default = bash dima_get_new.sh 120 ${EURYALE_GFS_ARCHIVE} ${CETO_GFS_DIR}
[jinja2:suite.rc]
## Run properties
INITIAL_START_DATE='2018-11-20T00:00:00Z'
FINAL_CYCLE_POINT='+P5D'
RUNDAYS=1
MAIL_TO='mbe@pml.ac.uk'
# GFS settings
EURYALE_GFS_ARCHIVE
CETO_GFS_DIR
## CMEMS settings
CMEMS_DATA_DIR='/data/sthenno1/backup/mbe/Data/CMEMS'
access-list=mbe
owner=mbe
project=test_suite
sub-project=A
title=hmm_title
#!jinja2
[cylc]
UTC mode = True # Ignore DST
abort if any task fails = False
[scheduling]
initial cycle point = {{INITIAL_START_DATE}}
final cycle point = {{FINAL_CYCLE_POINT}}
[[special tasks]]
clock-trigger = start_cycle(PT0M)
[[dependencies]]
[[[R1]]]
graph = """
copy_common_to_remote => softlink_forcing_remote
write_run_namelist => hot_cold_start => run_fvcom
"""
[[[P1D]]]
graph = """
run_fvcom[-P1D] => start_cycle => softlink_forcing & softlink_forcing_remote => write_run_namelist
"""
[[[+P1D/P1D]]]
graph = """
run_fvcom[-P1D] => transfer_restart => run_fvcom
"""
[runtime]
[[root]]
env-script = eval $(rose task-env --cycle-offset=P1D)
script = rose task-run --verbose
[[[job]]]
execution time limit = PT3H
[[[events]]]
mail events = submission timeout, execution timeout, failed
mail to = {{MAIL_TO}}
submission timeout = P1D
[[[environment]]]
START_DATE=$(rose date --print-format='%Y-%m-%d %H:%M:%S' $CYLC_TASK_CYCLE_POINT)
END_DATE=$(rose date --offset=P1D --print-format='%Y-%m-%d %H:%M:%S' $CYLC_TASK_CYCLE_POINT)
START_DATE_STR=$(rose date --print-format='%Y-%m-%d_%H:%M:%S' $CYLC_TASK_CYCLE_POINT)
END_DATE_STR=$(rose date --offset=P1D --print-format='%Y-%m-%d_%H:%M:%S' $CYLC_TASK_CYCLE_POINT)
START_DAY=$(rose date --print-format='%Y-%m-%d' $CYLC_TASK_CYCLE_POINT)
END_DAY=$(rose date --offset=P1D --print-format='%Y-%m-%d' $CYLC_TASK_CYCLE_POINT)
CMEMS_DATA_DIR={{CMEMS_DATA_DIR}}
[[slurm_job]]
[[[job]]]
batch system = slurm
submission polling intervals = PT10S
execution polling intervals = PT10S, PT1M
[[[directives]]]
--nodes = {{MIN_NODES}}-{{MAX_NODES}}
--ntasks-per-node=20
--threads-per-core=1
--time=24:00:00
{%- if USE_CETO %}
[[[remote]]]
host = login.ceto.npm.ac.uk
owner = mbe
{%- endif %}
[[slurm_job_1]]
inherit = slurm_job
[[[directives]]]
--nodes = 1
--ntasks-per-node = 1
--threads-per-core=1
--time=24:00:00
[[remote_job]]
{%- if USE_CETO %}
[[[remote]]]
host = login.ceto.npm.ac.uk
owner = mbe
{%- endif %}
[[get_CMEMS_data]]
[[get_GFS_data]]
[[transfer_GFS_data]]
inherit = slurm_job_1
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment