Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
PyFVCOM
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
4
Issues
4
List
Boards
Labels
Service Desk
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
FVCOM
PyFVCOM
Commits
9bcb7290
Commit
9bcb7290
authored
Jan 18, 2018
by
Pierre Cazenave
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'dev' of gitlab.ecosystem-modelling.pml.ac.uk:fvcom/pyfvcom into dev
parents
b33e2979
616133fb
Pipeline
#778
failed with stage
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
63 additions
and
7 deletions
+63
-7
PyFVCOM/preproc.py
PyFVCOM/preproc.py
+62
-6
PyFVCOM/validation.py
PyFVCOM/validation.py
+1
-1
No files found.
PyFVCOM/preproc.py
View file @
9bcb7290
...
...
@@ -1024,6 +1024,44 @@ class Model(Domain):
if
self
.
dims
.
river
==
0
:
return
if
max_discharge
:
# Find rivers in excess of the given discharge maximum.
big_rivers
=
np
.
unique
(
np
.
argwhere
(
self
.
river
.
flux
>
max_discharge
)[:,
1
])
if
big_rivers
:
for
this_river
in
big_rivers
:
no_of_splits
=
np
.
ceil
(
np
.
max
(
self
.
river
.
flux
[:,
this_river
])
/
max_discharge
)
original_river_name
=
self
.
river
.
names
[
this_river
]
each_flux
=
self
.
river
.
flux
[:,
this_river
]
/
no_of_splits
# everything else is concentrations so can just be copied
for
this_i
in
np
.
arange
(
2
,
no_of_splits
+
1
):
self
.
river
.
names
.
append
(
original_river_name
+
str
(
this_i
))
self
.
river
.
flux
[:,
this_river
]
=
self
.
river
.
flux
[:,
this_river
]
/
no_of_splits
# everything else is concentrations so can just be copied
# Collect all variables to add columns for
all_vars
=
[
'flux'
,
'temperature'
,
'salinity'
]
# Ersem variables if they're in there
N_names
=
list
(
filter
(
lambda
x
:
'mud_'
in
x
,
list
(
self
.
river
.
__dict__
.
keys
())))
Z_names
=
list
(
filter
(
lambda
x
:
'mud_'
in
x
,
list
(
self
.
river
.
__dict__
.
keys
())))
O_names
=
list
(
filter
(
lambda
x
:
'mud_'
in
x
,
list
(
self
.
river
.
__dict__
.
keys
())))
# And sediment ones
muddy_sediment_names
=
list
(
filter
(
lambda
x
:
'mud_'
in
x
,
list
(
self
.
river
.
__dict__
.
keys
())))
sandy_sediment_names
=
list
(
filter
(
lambda
x
:
'sand_'
in
x
,
list
(
self
.
river
.
__dict__
.
keys
())))
all_vars
=
flatten_list
([
all_vars
,
N_names
,
Z_names
,
O_names
,
muddy_sediment_names
,
sandy_sediment_names
])
for
this_var
in
all_vars
:
self
.
__add_river_col
(
self
,
this_var
,
this_river
,
no_of_splits
-
1
)
# update no of rivers
for
i
,
node
in
enumerate
(
self
.
river
.
node
):
bad
=
find_bad_node
(
self
.
grid
.
triangles
,
node
)
if
bad
:
...
...
@@ -1036,14 +1074,12 @@ class Model(Domain):
if
still_bad
:
# Is this even possible?
candidates
=
get_attached_unique_nodes
(
candidate
,
self
.
grid
.
triangles
)
# TO DO - Add check for two rivers on one node
dist
=
[
haversine_distance
((
self
.
grid
.
lon
[
i
],
self
.
grid
.
lat
[
i
]),
(
self
.
grid
.
lon
[
node
],
self
.
grid
.
lat
[
node
]))
for
i
in
candidates
]
self
.
river
.
node
[
i
]
=
np
.
argmin
(
candidates
[
np
.
argmin
(
dist
)])
if
max_discharge
:
# Find rivers in excess of the given discharge maximum.
big_rivers
=
np
.
argwhere
(
self
.
river
.
flux
>
max_discharge
)
# TODO: implement this!
if
min_depth
:
deep_rivers
=
np
.
argwhere
(
self
.
grid
.
h
[
self
.
river
.
node
]
>
min_depth
)
# TODO: implement this!
...
...
@@ -1078,6 +1114,26 @@ class Model(Domain):
# Update the dimension too.
self
.
dims
.
river
-=
len
(
boundary_river_indices
)
def
__add_river_col
(
self
,
var_name
,
col_to_copy
,
no_cols_to_add
):
"""
Helper function to copy the existing data for river variable to a new splinter river (when they are split for excessive discharge at
one node
Parameters
----------
var_name : str
Name of river attribute to alter
col_to_copy : int
The column (i.e. river) to copy from)
no_cols_to_add : int
The number of columns (i.e. extra rivers) to add to the end of the array
"""
old_data
=
getattr
(
self
.
river
,
var_name
)
col_to_add
=
old_data
[:,
col_to_copy
][:,
np
.
newaxis
]
col_to_add
=
np
.
tile
(
col_to_add
,
[
1
,
no_cols_to_add
])
setattr
(
self
.
river
,
var_name
,
np
.
hstack
([
old_data
,
col_to_add
]))
def
write_river_forcing
(
self
,
output_file
,
ersem
=
False
,
ncopts
=
{
'zlib'
:
True
,
'complevel'
:
7
},
sediments
=
False
,
**
kwargs
):
"""
Write out an FVCOM river forcing netCDF file.
...
...
@@ -1098,7 +1154,7 @@ class Model(Domain):
- flux : river discharge data [time, river]
- temperature : river temperature data [time, river]
- salinity : river salinity data [time, river]
The `ersem' dictionary should contain at least
:
If using ersem them it should also contain
:
- N1_p : phosphate [time, river]
- N3_n : nitrate [time, river]
- N4_n : ammonium [time, river]
...
...
PyFVCOM/validation.py
View file @
9bcb7290
...
...
@@ -1097,7 +1097,7 @@ class ICES_comp():
node_ind
=
self
.
zeta_filereader
.
closest_node
([
sample
[
'Longdeg'
],
sample
[
'Latdeg'
]],
haversine
=
True
)
if
self
.
daily_avg
:
# For daily averages match by day, otherwise use nearest time
sample_dt
=
dt
.
datetime
(
y
,
m
,
d
)
sample_dt
=
dt
.
datetime
(
int
(
sample
[
'Year'
]),
int
(
sample
[
'Mnth'
]),
int
(
sample
[
'Dy'
])
)
model_time_ind
=
self
.
zeta_filereader
.
closest_time
(
sample_dt
)
model_dt
=
self
.
zeta_filereader
.
time
.
datetime
[
model_time_ind
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment