Video diagnostics of model outputΒΆ
What does the time-evolution of the autoencoded pressure field look like?
This takes a script to make a contour plot of original and autoencoded pressures at a specified hour:
#!/usr/bin/env python
# Overplot original and encoded pressure fields.
# Can be run at any epoch and any date - for video diagnostics.
import tensorflow as tf
tf.enable_eager_execution()
import numpy
import IRData.twcr as twcr
import iris
import datetime
import argparse
import os
import math
import pickle
import Meteorographica as mg
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import cartopy
import cartopy.crs as ccrs
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", help="Model at which epoch?",
type=int,required=True)
parser.add_argument("--year", help="Comparison year",
type=int,required=True)
parser.add_argument("--month", help="Comparison month",
type=int,required=True)
parser.add_argument("--day", help="Comparison day",
type=int,required=True)
parser.add_argument("--hour", help="Comparison hour",
type=int,required=True)
args = parser.parse_args()
# Get the 20CR data
ic=twcr.load('prmsl',datetime.datetime(args.year,
args.month,
args.day,
args.hour),
version='2c')
ic=ic.extract(iris.Constraint(member=1))
# Get the autoencoder at the chosen epoch
model_save_file = ("%s/Machine-Learning-experiments/"+
"simple_autoencoder_instrumented/"+
"saved_models/Epoch_%04d") % (
os.getenv('SCRATCH'),args.epoch)
autoencoder=tf.keras.models.load_model(model_save_file)
# Normalisation - Pa to mean=0, sd=1 - and back
def normalise(x):
x -= 101325
x /= 3000
return x
def unnormalise(x):
x *= 3000
x += 101325
return x
fig=Figure(figsize=(19.2,10.8), # 1920x1080, HD
dpi=100,
facecolor=(0.88,0.88,0.88,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
# Map showing original and reconstructed fields
projection=ccrs.RotatedPole(pole_longitude=180.0, pole_latitude=90.0)
ax_map=fig.add_axes([0,0,1,1],projection=projection)
ax_map.set_axis_off()
extent=[-180,180,-90,90]
ax_map.set_extent(extent, crs=projection)
matplotlib.rc('image',aspect='auto')
# Run the data through the autoencoder and convert back to iris cube
pm=ic.copy()
pm.data=normalise(pm.data)
ict=tf.convert_to_tensor(pm.data, numpy.float32)
ict=tf.reshape(ict,[1,91*180]) # ????
result=autoencoder.predict_on_batch(ict)
result=tf.reshape(result,[91,180])
pm.data=unnormalise(result)
# Background, grid and land
ax_map.background_patch.set_facecolor((0.88,0.88,0.88,1))
#mg.background.add_grid(ax_map)
land_img_orig=ax_map.background_img(name='GreyT', resolution='low')
# original pressures as red contours
mg.pressure.plot(ax_map,ic,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='red',
label=False,
linewidths=2)
# Encoded pressures as blue contours
mg.pressure.plot(ax_map,pm,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='blue',
label=False,
linewidths=2)
mg.utils.plot_label(ax_map,
'%04d-%02d-%02d:%02d' % (args.year,
args.month,
args.day,
args.hour),
facecolor=(0.88,0.88,0.88,0.9),
fontsize=16,
x_fraction=0.98,
y_fraction=0.03,
verticalalignment='bottom',
horizontalalignment='right')
# Render the figure as a png
figfile=("%s/Machine-Learning-experiments/"+
"simple_autoencoder_instrumented/"+
"images/%02d%02d%02d%02d_%04d.png") % (
os.getenv('SCRATCH'),
args.year,args.month,
args.day,args.hour,
args.epoch)
if not os.path.isdir(os.path.dirname(figfile)):
os.makedirs(os.path.dirname(figfile))
fig.savefig(figfile)
To make the video, it is necessary to run the script above hundreds of times - every 15 minutes for a month. This script makes the list of commands needed to make all the images, which can be run in parallel.
#!/usr/bin/env python
# Make all the individual frames for a movie
import os
import subprocess
import datetime
# Where to put the output files
opdir="%s/slurm_output" % os.getenv('SCRATCH')
if not os.path.isdir(opdir):
os.makedirs(opdir)
# Function to check if the job is already done for this timepoint
def is_done(year,month,day,hour,epoch):
op_file_name=("%s/Machine-Learning-experiments/"+
"simple_autoencoder_instrumented/"+
"images/%02d%02d%02d%02d_%04d.png") % (
os.getenv('SCRATCH'),
year,month,
day,hour,
epoch)
if os.path.isfile(op_file_name):
return True
return False
f=open("run.txt","w+")
start_day=datetime.datetime(2009, 1, 1, 0)
end_day =datetime.datetime(2009, 1,31,23)
current_day=start_day
while current_day<=end_day:
if is_done(current_day.year,current_day.month,
current_day.day,current_day.hour,1000):
current_day=current_day+datetime.timedelta(hours=1)
continue
cmd=("./compare.py --year=%d --month=%d" +
" --day=%d --hour=%d --epoch=1000\n") % (
current_day.year,current_day.month,
current_day.day,current_day.hour)
f.write(cmd)
current_day=current_day+datetime.timedelta(hours=1)
f.close()
To turn the thousands of images into a movie, use ffmpeg
#!/bin/bash
ffmpeg -r 24 -pattern_type glob -i /scratch/hadpb/Machine-Learning-experiments/simple_autoencoder_instrumented/images/2\*.png -c:v libx264 -preset slow -tune animation -profile:v high -level 4.2 -pix_fmt yuv420p -crf 25 -c:a copy compare.mp4