From 1d982414f39715183737ffecf72a0b97b5d0363b Mon Sep 17 00:00:00 2001 From: Nils Hempelmann Date: Tue, 18 Jun 2024 15:05:56 +0200 Subject: [PATCH] integrated raw code --- albatross/atmos_ocean_data.py | 392 +++++++++++++++++++++++++++++ albatross/climdiv_data.py | 183 ++++++++++++++ albatross/processes/wps_drought.py | 311 ++++++++++++----------- albatross/simpleNIPA.py | 289 +++++++++++++++++++++ albatross/utils.py | 178 +++++++++++++ 5 files changed, 1202 insertions(+), 151 deletions(-) create mode 100755 albatross/atmos_ocean_data.py create mode 100755 albatross/climdiv_data.py create mode 100755 albatross/simpleNIPA.py create mode 100755 albatross/utils.py diff --git a/albatross/atmos_ocean_data.py b/albatross/atmos_ocean_data.py new file mode 100755 index 0000000..67bd2fe --- /dev/null +++ b/albatross/atmos_ocean_data.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python +""" +Module for loading atmospheric and oceanic data necessary to run NIPA +""" + +import os +from os import environ as EV +import sys +import resource + +def openDAPsst(version = '3b', debug = False, anomalies = True, **kwargs): + """ + This function downloads data from the new ERSSTv3b on the IRI data library + kwargs should contain: startyr, endyr, startmon, endmon, nbox + """ + from utils import int_to_month + from os.path import isfile + from pydap.client import open_url + from numpy import arange + from numpy import squeeze + import pickle + import re + from collections import namedtuple + + + SSTurl = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCDC/.ERSST/.version' + version + '/' + \ + '.anom/T/%28startmon%20startyr%29%28endmon%20endyr%29RANGEEDGES/T/nbox/0.0/boxAverage/dods' + #SSTurl = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.MONTHLY/.Intrinsic/.PressureLevel/.phi/P/%28700%29VALUES' +'/' + \ + #'.anom/T/%28startmon%20startyr%29%28endmon%20endyr%29RANGEEDGES/T/nbox/0.0/boxAverage/dods' + #SSTurl = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCDC/.ERSST/.version' + version + '/' + \ + #'.anom/T/%28startmon%20startyr%29%28endmon%20endyr%29RANGEEDGES/T/nbox/0.0/boxAverage/data.nc' + + if not anomalies: + SSTurl = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCDC/.ERSST/.version' + version + '/' + \ + '.sst/T/%28startmon%20startyr%29%28endmon%20endyr%29RANGEEDGES/T/nbox/0.0/boxAverage/dods' + #SSTurl = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.MONTHLY/.Intrinsic/.PressureLevel/.phi/P/%28700%29VALUES' +'/' + \ + #'.anom/T/%28startmon%20startyr%29%28endmon%20endyr%29RANGEEDGES/T/nbox/0.0/boxAverage/dods' + + print( 'Preparing to download from %s' % (SSTurl)) + + i2m = int_to_month() + + # Keyword arguments for setting up the download + DLargs = { + 'startmon' : i2m[kwargs['months'][0]], + 'endmon' : i2m[kwargs['months'][-1]], + 'startyr' : str(kwargs['startyr']), + 'endyr' : str(kwargs['endyr']), + 'nbox' : str(kwargs['n_mon']) + } + fp = os.getcwd() + '/DATA/nipa/SST/' + DLargs['startmon'] + DLargs['startyr'] + \ + '_' + DLargs['endmon'] + DLargs['endyr'] + '_nbox_' + DLargs['nbox'] + '_version' + version + + fp = fp + '_anoms' if anomalies else fp + '_ssts' + + seasonal_var = namedtuple('seasonal_var', ('data','lat','lon')) + + #if isfile(fp): + #print 'file found' + #sys.setrecursionlimit(15000) + #print sys.getrecursionlimit() + # stupid edit: remove file + #os.remove(fp) + #if debug: print('Using pickled SST') + #f = open(fp,'rb') + #sstdata = pickle.load(f) + #f.close() + #var = seasonal_var(sstdata['grid'], sstdata['lat'], sstdata['lon']) + #return var + + print( 'New SST field, will save to %s' % fp) + print(SSTurl) + for kw in DLargs: + SSTurl = re.sub(kw, DLargs[kw], SSTurl) + + print('Starting download...') + print(SSTurl) + dataset = open_url(SSTurl) + arg = 'anom' if anomalies else 'sst' + sst = dataset[arg] + + time = dataset['T'] + grid = sst.array[:,:,:,:].data.squeeze() # MODIFIED ANDREA: inserted "data" + t = time.data[:].squeeze() + sstlat = dataset['Y'][:] + sstlon = dataset['X'][:] + print('Download finished.') + + #_Grid has shape (ntim, nlat, nlon) + + nseasons = 12 / kwargs['n_mon'] + if debug: + print('Number of seasons is %i, number of months is %i' % (nseasons, kwargs['n_mon'])) + ntime = len(t) + + idx = arange(0, ntime, nseasons).astype(int) + #print(idx) + #print(grid) + sst = grid[idx] + sstdata = {'grid':sst, 'lat':sstlat, 'lon':sstlon} + var = seasonal_var(sst, sstlat, sstlon) + + f = open(fp,'wb') + pickle.dump(sstdata,f,pickle.HIGHEST_PROTOCOL) + f.close() + return var + +def load_slp(newFormat = False, debug = False, anomalies = True, **kwargs): + """ + This function loads HADSLP2r data. + """ + from utils import slp_tf, int_to_month + from netCDF4 import Dataset + from sklearn.preprocessing import scale + from numpy import arange, zeros, where + from os.path import isfile + import pandas as pd + import pickle + + transform = slp_tf() #This is for transforming kwargs into DLargs + + DLargs = { + 'startmon' : transform[kwargs['months'][0]], + 'endmon' : transform[kwargs['months'][-1]], + 'startyr' : str(kwargs['startyr']), + 'endyr' : str(kwargs['endyr']), + 'nbox' : str(kwargs['n_mon']) + } + i2m = int_to_month() #_Use in naming convention + fp = EV['DATA'] + '/nipa/SLP/' + i2m[kwargs['months'][0]] + \ + DLargs['startyr'] + '_' + i2m[kwargs['months'][-1]] + \ + DLargs['endyr'] + '_nbox_' + DLargs['nbox'] + + if isfile(fp): + f = open(fp) + slpdata = pickle.load(f) + f.close() + if newFormat: + from collections import namedtuple + seasonal_var = namedtuple('seasonal_var', ('data','lat','lon')) + slp = seasonal_var(slpdata['grid'], slpdata['lat'], slpdata['lon']) + return slp + return slpdata + print('Creating new SLP pickle from netCDF file') + + #_Next block takes the netCDF file and extracts the time to make + #_a time index. + nc_fp = EV['DATA'] + '/netCDF/slp.mnmean.real.nc' + dat = Dataset(nc_fp) + t = dat.variables['time'] + extractargs = { + 'start' : '1850-01', + 'periods' : len(t[:]), + 'freq' : 'M', + } + tiindexndex = pd.date_range(**extractargs) + + + #Need to get start and end out of time index + startyr = kwargs['startyr'] + startmon = int(DLargs['startmon']) + + idx_start = where((tiindexndex.year == startyr) & (tiindexndex.month == startmon)) + idx = [] + [idx.extend(arange(kwargs['n_mon']) + idx_start + 12*n) for n in range(kwargs['n_year'])] + + """ + This is how sst open dap does it but doesn't work for this + idx = ((tiindexndex.year >= int(DLargs['startyr'])) & \ + ((tiindexndex.month >= int(DLargs['startmon'])) & \ + (tiindexndex.month <= int(DLargs['endmon'])))) & \ + ((tiindexndex.year <= int(DLargs['endyr']))) + """ + + + if debug: + print(tiindexndex[idx][:10]) + + lat = dat.variables['lat'][:] + lon = dat.variables['lon'][:] + slp = dat.variables['slp'][:] + + nlat = len(lat) + nlon = len(lon) + time = tiindexndex[idx] + slpavg = zeros((kwargs['n_year'], nlat, nlon)) + + for year, mons in enumerate(idx): + slpavg[year] = slp[mons].mean(axis=0) + if debug: + print('Averaging ', mons) + + #WHERE TO SCALE THE DATA? + for i in range(nlat): + for j in range(nlon): + slpavg[:,i,j] = scale(slpavg[:,i,j]) + slpdata = { + 'grid' : slpavg, + 'lat' : lat, + 'lon' : lon + } + f = open(fp,'w') + pickle.dump(slpdata,f) + print('SLP data saved to %s' % (fp)) + f.close() + if newFormat: + from collections import namedtuple + seasonal_var = namedtuple('seasonal_var', ('data','lat','lon')) + slp = seasonal_var(slpdata['grid'], slpdata['lat'], slpdata['lon']) + return slp + return slpdata + +def load_clim_file(fp, debug = False): + # This function takes a specified input file, and + # creates a pandas series with all necessary information + # to run NIPA + import numpy as np + import pandas as pd + + #First get the description and years + f = open(fp) + description = f.readline() + years = f.readline() + startyr, endyr = years[:4], years[5:9] + print( description) + + #First load extended index + data = np.loadtxt(fp, skiprows = 2) + nyrs = data.shape[0] + data = data.reshape(data.size) # Make data 1D + timeargs = {'start' : startyr + '-01', + 'periods' : len(data), + 'freq' : 'M'} + index = pd.date_range(**timeargs) + clim_data = pd.Series(data = data, index = index) + + return clim_data + +def load_climdata(**kwargs): + + data = load_clim_file(kwargs['fp']) + from numpy import where, arange, zeros, inf + from utils import slp_tf + tran = slp_tf() + startmon = int(tran[kwargs['months'][0]]) + startyr = kwargs['startyr'] + idx_start = where((data.index.year == startyr) & (data.index.month == startmon)) + idx = [] + [idx.extend(arange(len(kwargs['months'])) + idx_start + 12*n) for n in range(kwargs['n_year'])] + #print len(idx) + #print kwargs['n_year'] + #if kwargs['months'][-1]>12: # period extends across 2 years + #del idx[-1] + #print len(idx) + climdata = zeros((kwargs['n_year'])) + for year, mons in enumerate(idx): + climdata[year] = data.values[mons].mean() + return climdata + +def create_phase_index(debug = False, **kwargs): + # kwargs = kwgroups['index'] + from numpy import sort + index = load_clim_file(kwargs['fp']) + from numpy import where, arange, zeros, inf + from utils import slp_tf + tran = slp_tf() + startmon = int(tran[kwargs['months'][0]]) + startyr = kwargs['startyr'] + idx_start = where((index.index.year == startyr) & (index.index.month == startmon)) + idx = [] + [idx.extend(arange(kwargs['n_mon']) + idx_start + 12*n) for n in range(kwargs['n_year'])] + index_avg = zeros((kwargs['n_year'])) + for year, mons in enumerate(idx): + index_avg[year] = index.values[mons].mean() + + index = sort(index_avg) + pos = index[index>0] + neg = index[index<0] + n_el = int(round(len(pos)*0.34)) + n_la = int(round(len(neg)*0.34)) + n_np = int(len(pos) - n_el) + n_nn = int(len(neg) - n_la) + + + cutoffs = { + 'la' : (neg[0], neg[n_la-1]), + 'nn' : (neg[n_la], neg[-1]), + 'np' : (pos[0], pos[n_np-1]), + 'el' : (pos[-n_el], pos[-1]), + 'N' : (neg[n_la + 1], pos[n_np-1]) + } + + phaseind = { + 'pos' : (index_avg >= cutoffs['el'][0]) & (index_avg <= \ + cutoffs['el'][1]), + 'neg' : (index_avg >= cutoffs['la'][0]) & (index_avg <= \ + cutoffs['la'][1]), + 'neut' : (index_avg >= cutoffs['N'][0]) & (index_avg <= \ + cutoffs['N'][1]), + 'neutpos' : (index_avg >= cutoffs['np'][0]) & (index_avg <= \ + cutoffs['np'][1]), + 'neutneg' : (index_avg >= cutoffs['nn'][0]) & (index_avg <= \ + cutoffs['nn'][1]), + 'allyears' : (index_avg >= -inf) + } + + + return index_avg, phaseind + +def create_phase_index2(**kwargs): + from copy import deepcopy + import numpy as np + from numpy import sort + index = load_clim_file(kwargs['fp']) + from numpy import where, arange, zeros, inf + from utils import slp_tf + tran = slp_tf() + startmon = int(tran[kwargs['months'][0]]) + startyr = kwargs['startyr'] + idx_start = where((index.index.year == startyr) & (index.index.month == startmon)) + idx = [] + [idx.extend(arange(kwargs['n_mon']) + idx_start + 12*n) for n in range(kwargs['n_year'])] + index_avg = zeros((kwargs['n_year'])) + for year, mons in enumerate(idx): + index_avg[year] = index.values[mons].mean() + + idx = np.argsort(index_avg) + nyrs = kwargs['n_year'] + nphase = kwargs['n_phases'] + phases_even = kwargs['phases_even'] + p = np.zeros((len(index_avg)), dtype = 'bool') + p1 = deepcopy(p) + p2 = deepcopy(p) + p3 = deepcopy(p) + p4 = deepcopy(p) + p5 = deepcopy(p) + phaseind = {} + if nphase == 1: + p[idx[:]] = True + phaseind['allyears'] = p + if nphase == 2: + x = nyrs / nphase + p1[idx[:int(x)]] = True; phaseind['neg'] = p1 + p2[idx[int(x):]] = True; phaseind['pos'] = p2 + if nphase == 3: + if phases_even: + x = nyrs / nphase + x2 = nyrs - x + else: + x = nphase / 4 + x2 = nyrs - x + p1[idx[:x]] = True; phaseind['neg'] = p1 + p2[idx[x:x2]] = True; phaseind['neutral'] = p2 + p3[idx[x2:]] = True; phaseind['pos'] = p3 + + if nphase == 4: + if phases_even: + x = nyrs / nphase + x3 = nyrs - x + xr = (x3 - x) / 2 + x2 = x+xr + else: + half = nyrs / 2 + x = int(round(half*0.34)) + x3 = nyrs - x + xr = (x3 - x) / 2 + x2 = x + xr + p1[idx[:x]] = True; phaseind['neg'] = p1 + p2[idx[x:x2]] = True; phaseind['neutneg'] = p2 + p3[idx[x2:x3]] = True; phaseind['netpos'] = p3 + p4[idx[x3:]] = True; phaseind['pos'] = p4 + if nphase == 5: + if phases_even: + x = nyrs / nphase + x4 = nyrs - x + xr = (x4 - x) / 3 + x2 = x+xr + x3 = x4-xr + else: + half = nyrs / 2 + x = int(round(half*0.3)) + x4 = nyrs - x + xr = (x4 - x) / 3 + x2 = x+xr + x3 = x4-xr + p1[idx[:x]] = True; phaseind['neg'] = p1 + p2[idx[x:x2]] = True; phaseind['neutneg'] = p2 + p3[idx[x2:x3]] = True; phaseind['neutral'] = p3 + p4[idx[x3:x4]] = True; phaseind['neutpos'] = p4 + p5[idx[x4:]] = True; phaseind['pos'] = p5 + # if nphase == 6: + return index_avg, phaseind diff --git a/albatross/climdiv_data.py b/albatross/climdiv_data.py new file mode 100755 index 0000000..861eb51 --- /dev/null +++ b/albatross/climdiv_data.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python +""" +Module for loading climate division data for running NIPA +""" + +import os +from atmos_ocean_data import * +from os import environ as EV + +def get_data(kwgroups): + clim_data = load_climdata(**kwgroups['climdata']) + sst = openDAPsst(newFormat = True, anomalies = True, **kwgroups['sst']) + index, phaseind = create_phase_index2(**kwgroups['index']) + return clim_data, sst, index, phaseind + +def create_kwgroups(debug = False, climdata_startyr = 1871, n_yrs = 145, \ + climdata_months = [1,2,3], n_mon_sst = 3, sst_lag = 3, n_mon_slp = 3, \ + slp_lag = 3, n_mon_index = 3, index_lag = 3, n_phases = 2, phases_even = True, \ + index_fp = 'mei.txt', climdata_fp = 'APGD_prcp.txt'): + print(climdata_months) + """ + This function takes information about the seasons, years, and type of divisional + data to look at, and creates appropriate kwgroups (parameters) to be input into + data loading and openDap modules. + """ + + #_Check a few things + assert climdata_months[0] >= 1, 'Divisonal data can only wrap to the following year' + assert climdata_months[-1] <= 15, 'DJFM (i.e. [12, 13, 14, 15]) is the biggest wrap allowed' + + #_Following block sets the appropriate start month for the SST and SLP fields + #_based on the input climdata_months and the specified lags + sst_months = [] + slp_months = [] + index_months = [] + sst_start = climdata_months[0] - sst_lag + sst_months.append(sst_start) + slp_start = climdata_months[0] - slp_lag + slp_months.append(slp_start) + index_start = climdata_months[0] - index_lag + index_months.append(index_start) + + #_The for loops then populate the rest of the sst(slp)_months based n_mon_sst(slp) + for i in range(1, n_mon_sst): + sst_months.append(sst_start + i) + for i in range(1, n_mon_slp): + slp_months.append(slp_start + i) + for i in range(1, n_mon_index): + index_months.append(index_start + i) + + assert sst_months[0] >= -8, 'sst_lag set too high, only goes to -8' + assert slp_months[0] >= -8, 'slp_lag set too high, only goes to -8' + assert index_months[0] >= -8, 'index_lag set too high, only goes to -8' + + #_Next block of code checks start years and end years and sets appropriately. + #_So hacky.. + ######################################################### + ######################################################### + if climdata_months[-1] <= 12: + climdata_endyr = climdata_startyr + n_yrs - 1 + if sst_months[0] < 1 and sst_months[-1] < 1: + sst_startyr = climdata_startyr - 1 + sst_endyr = climdata_endyr - 1 + elif sst_months[0] < 1 and sst_months[-1] >= 1: + sst_startyr = climdata_startyr - 1 + sst_endyr = climdata_endyr + elif sst_months[0] >=1 and sst_months[-1] >= 1: + sst_startyr = climdata_startyr + sst_endyr = climdata_endyr + elif climdata_months[-1] > 12: + climdata_endyr = climdata_startyr + n_yrs + if sst_months[0] < 1 and sst_months[-1] < 1: + sst_startyr = climdata_startyr - 1 + sst_endyr = climdata_endyr - 2 + elif sst_months[0] < 1 and sst_months[-1] >= 1: + sst_startyr = climdata_startyr - 1 + sst_endyr = climdata_endyr - 1 + elif sst_months[0] >=1 and 1 <= sst_months[-1] <= 12: + sst_startyr = climdata_startyr + sst_endyr = climdata_endyr - 1 + elif sst_months[0] >=1 and sst_months[-1] > 12: + sst_startyr = climdata_startyr + sst_endyr = climdata_endyr + if climdata_months[-1] <= 12: + climdata_endyr = climdata_startyr + n_yrs - 1 + if index_months[0] < 1 and index_months[-1] < 1: + index_startyr = climdata_startyr - 1 + index_endyr = climdata_endyr - 1 + elif index_months[0] < 1 and index_months[-1] >= 1: + index_startyr = climdata_startyr - 1 + index_endyr = climdata_endyr + elif index_months[0] >=1 and index_months[-1] >= 1: + index_startyr = climdata_startyr + index_endyr = climdata_endyr + elif climdata_months[-1] > 12: + climdata_endyr = climdata_startyr + n_yrs + if index_months[0] < 1 and index_months[-1] < 1: + index_startyr = climdata_startyr - 1 + index_endyr = climdata_endyr - 2 + elif index_months[0] < 1 and index_months[-1] >= 1: + index_startyr = climdata_startyr - 1 + index_endyr = climdata_endyr - 1 + elif index_months[0] >=1 and 1 <= index_months[-1] <= 12: + index_startyr = climdata_startyr + index_endyr = climdata_endyr - 1 + elif index_months[0] >=1 and index_months[-1] > 12: + index_startyr = climdata_startyr + index_endyr = climdata_endyr + if climdata_months[-1] <= 12: + climdata_endyr = climdata_startyr + n_yrs - 1 + if slp_months[0] < 1 and slp_months[-1] < 1: + slp_startyr = climdata_startyr - 1 + slp_endyr = climdata_endyr - 1 + elif slp_months[0] < 1 and slp_months[-1] >= 1: + slp_startyr = climdata_startyr - 1 + slp_endyr = climdata_endyr + elif slp_months[0] >=1 and slp_months[-1] >= 1: + slp_startyr = climdata_startyr + slp_endyr = climdata_endyr + elif climdata_months[-1] > 12: + climdata_endyr = climdata_startyr + n_yrs + if slp_months[0] < 1 and slp_months[-1] < 1: + slp_startyr = climdata_startyr - 1 + slp_endyr = climdata_endyr - 2 + elif slp_months[0] < 1 and slp_months[-1] >= 1: + slp_startyr = climdata_startyr - 1 + slp_endyr = climdata_endyr - 1 + elif slp_months[0] >=1 and 1 <= slp_months[-1] <= 12: + slp_startyr = climdata_startyr + slp_endyr = climdata_endyr - 1 + elif slp_months[0] >=1 and slp_months[-1] > 12: + slp_startyr = climdata_startyr + slp_endyr = climdata_endyr + ######################################################### + ######################################################### + + if debug: + from utils import int_to_month + i2m = int_to_month() + print('Precip starts in %s-%d, ends in %s-%d' % \ + (i2m[climdata_months[0]], climdata_startyr, i2m[climdata_months[-1]], climdata_endyr)) + print('SST starts in %s-%d, ends in %s-%d' % \ + (i2m[sst_months[0]], sst_startyr, i2m[sst_months[-1]], sst_endyr)) + print('SLP starts in %s-%d, ends in %s-%d' % \ + (i2m[slp_months[0]], slp_startyr, i2m[slp_months[-1]], slp_endyr)) + print('INDEX starts in %s-%d, ends in %s-%d' % \ + (i2m[index_months[0]], index_startyr, i2m[index_months[-1]], index_endyr)) + + #_Create function output + kwgroups = { + 'climdata' : { 'fp' : climdata_fp, + 'startyr' : climdata_startyr, + 'endyr' : climdata_endyr, + 'months' : climdata_months, + 'n_year' : n_yrs + }, + + 'sst' : { 'n_mon' : n_mon_sst, + 'months' : sst_months, + 'startyr' : sst_startyr, + 'endyr' : sst_endyr + }, + + 'slp' : { 'n_mon' : n_mon_slp, + 'months' : slp_months, + 'startyr' : slp_startyr, + 'endyr' : slp_endyr, + 'n_year' : n_yrs + }, + 'index' : { 'n_mon' : n_mon_index, + 'months' : index_months, + 'startyr' : index_startyr, + 'endyr' : index_endyr, + 'n_year' : n_yrs, + 'fp' : index_fp, + 'n_phases' : n_phases, + 'phases_even': phases_even + } + } + + + + return kwgroups diff --git a/albatross/processes/wps_drought.py b/albatross/processes/wps_drought.py index f3a1f84..0d497af 100644 --- a/albatross/processes/wps_drought.py +++ b/albatross/processes/wps_drought.py @@ -10,8 +10,9 @@ class Drought(Process): def __init__(self): inputs = [ LiteralInput('pr', 'Monthly global precipitation file', - abstract='??? netcdf file', + abstract='text file of precipitation', # keywords=['name', 'firstname'], + default = 'https://raw.githubusercontent.com/mxgiuliani00/CSI/master/NIPA/DATA/APGD_prcpComo.txt', data_type='string'), LiteralInput('sst', 'Monthly global sea surface temperature file', abstract='??? netcdf file', @@ -25,9 +26,10 @@ def __init__(self): # abstract='??? netcdf file', # # keywords=['name', 'firstname'], # data_type='string'), - LiteralInput('indicator', 'Indicator ???', - abstract='examples: climate indicator of tele-connection patterns, ???', + LiteralInput('indicator', 'NAO Indicator ', + abstract='examples: climate indicator of tele-connection patterns', # keywords=['name', 'firstname'], + default = 'https://raw.githubusercontent.com/mxgiuliani00/CSI/master/NIPA/DATA/nao.txt', data_type='string'), LiteralInput('start_year', 'Start Year', abstract='2020', @@ -43,14 +45,22 @@ def __init__(self): data_type='string') ] outputs = [ - LiteralOutput('negative_list', 'Negative List ...', - # abstract='negativ list', - # keywords=['output', 'result', 'response'], - data_type='string'), - LiteralOutput('positive_list', 'Postive List ...', + # LiteralOutput('negative_list', 'Negative List ...', + # # abstract='negativ list', + # # keywords=['output', 'result', 'response'], + # data_type='string'), + # LiteralOutput('positive_list', 'Postive List ...', + # # abstract='negativ list', + # # keywords=['output', 'result', 'response'], + # data_type='string'), + LiteralOutput('pc_file', 'PC File ', # abstract='negativ list', # keywords=['output', 'result', 'response'], data_type='string'), + ComplexOutput('plot', 'Nice Graphic', + # abstract='Plot of original input file. First timestep.', + as_reference=True, + supported_formats=[FORMAT_PNG]), ] super(Drought, self).__init__( @@ -76,155 +86,154 @@ def _handler(request, response): ############################ LOGGER.info("get input parameter") - - pr = request.inputs['pr'][0].data - sst= request.inputs['sst'][0].data ############################ ### original code https://github.com/mxgiuliani00/CSI/blob/master/NIPA/run_nipa.py LOGGER.info("start processing") - -# import pandas as pd -# from matplotlib import cm, pyplot as plt -# import numpy as np -# from climdiv_data import * -# from simpleNIPA import * -# from atmos_ocean_data import * -# from utils import sstMap -# import matplotlib as mpl -# from utils import * -# import pydap.client -# import mpl_toolkits -# import csv -# import math - - -# #### USER INPUT #### - -# # Select the input-output files: -# index_file = './DATA/nao.txt' -# clim_file = './DATA/APGD_prcpComo.txt' -# filename = 'testComoNAO' - -# # Settings: -# M = 2 # number of climate signal's phases -# n_obs = 3 # number of observations (months) -# lag = 3 # lag-time (months) --> 3 = seasonal -# months = [1,2,3] # months to consider (J,F,M) -# startyr = 1971 # beginning of the time period to analyze -# n_yrs = 38 # number of years to analyze - -# # Select the type of experiment: -# # crv_flag: -# # True = runs NIPA with crossvalidation -# # False = runs NIPA without crossvalidation and save the first SST-Principal Component for multi-variate model -# # -# crv_flag = True -# map_flag = True - -# #################### - -# # Don't mess with the next few lines -# years = np.arange(startyr, startyr+n_yrs) - -# kwgroups = create_kwgroups(debug = True, climdata_months = months, -# climdata_startyr = startyr, n_yrs = n_yrs, -# n_mon_sst = n_obs, n_mon_index = n_obs, sst_lag = lag, -# n_phases = M, phases_even = True, -# index_lag = lag, -# index_fp = index_file, -# climdata_fp = clim_file) - -# climdata, sst, index, phaseind = get_data(kwgroups) -# fp = './maps/%s' % (filename) -# fig, axes = plt.subplots(M, 1, figsize = (6, 12)) -# timeseries = {'years' : [], 'data' : [], 'hindcast': []} -# pc1 = {'pc1':[]} - -# print('NIPA running...') - -# if M == 1: -# phase = 'allyears' -# model = NIPAphase(climdata, sst, index, phaseind[phase]) -# model.phase = phase -# model.years = years[phaseind[phase]] -# model.bootcorr(corrconf = 0.95) -# model.gridCheck() -# model.crossvalpcr(xval = crv_flag) -# timeseries['years'] = model.years -# timeseries['data'] = model.clim_data -# timeseries['hindcast'] = model.hindcast -# print( timeseries['years']) -# print( timeseries['data']) - -# if map_flag: -# fig, axes, m = sstMap(model, fig = fig, ax = axes) -# axes.set_title('%s, %.2f' % (phase, model.correlation)) -# fig.savefig(fp) -# plt.close(fig) - - -# else: -# for phase, ax in zip(phaseind, axes): -# model = NIPAphase(climdata, sst, index, phaseind[phase]) -# model.phase = phase -# model.years = years[phaseind[phase]] -# model.bootcorr(corrconf = 0.95) -# model.gridCheck() -# model.crossvalpcr(xval = crv_flag) -# timeseries['years'].append(model.years) -# timeseries['data'].append(model.clim_data) -# timeseries['hindcast'].append(model.hindcast) -# if not crv_flag: -# if hasattr(model,'pc1'): -# pc1['pc1'].append(model.pc1) + # general + import pandas as pd + from matplotlib import cm, pyplot as plt + import matplotlib as mpl + import numpy as np + import pydap.client + import mpl_toolkits + import csv + import math + + # drought specific functions + from climdiv_data import * + from simpleNIPA import * + from atmos_ocean_data import * + from utils import sstMap + from utils import * + + ########################### + LOGGER.info("Select the input-output files") -# if map_flag: -# fig, ax, m = sstMap(model, fig = fig, ax = ax) -# ax.set_title('%s, %.2f' % (phase, model.correlation)) -# fig.savefig(fp) -# plt.close(fig) - - -# # save timeseries (exceptions handled only for 2 phase analysis) -# if np.size(timeseries['hindcast'][0]) == 1: -# if math.isnan(timeseries['hindcast'][0]): -# # no result for the first phase -> consider only the second set of results -# timeseries['years'] = timeseries['years'][1] -# timeseries['data'] = timeseries['data'][1] -# timeseries['hindcast'] = timeseries['hindcast'][1] - -# elif np.size(timeseries['hindcast'][1]) == 1: -# if math.isnan(timeseries['hindcast'][1]): -# # no result for the second phase -> consider only the first set of results -# timeseries['years'] = timeseries['years'][0] -# timeseries['data'] = timeseries['data'][0] -# timeseries['hindcast'] = timeseries['hindcast'][0] - -# else: -# timeseries['years'] = np.concatenate(timeseries['years']) -# timeseries['data'] = np.concatenate(timeseries['data']) -# timeseries['hindcast'] = np.concatenate(timeseries['hindcast']) - -# df = pd.DataFrame(timeseries) -# ts_file = './output/%s_timeseries.csv' % (filename) -# df.to_csv(ts_file) - -# if not crv_flag: -# # save PC -# pc1['pc1'] = np.concatenate(pc1['pc1']) -# pc_file = './output/%s_pc1SST.csv' % (filename) -# df1 = pd.DataFrame(pc1) -# df1.to_csv(pc_file) - -# print( 'NIPA run completed') - - - ############################ - LOGGER. info("start processing") - - response.outputs['graphic'].data = 'graphic' + request.inputs['pr'][0].data +# sst= request.inputs['sst'][0].data + index_file = request.inputs['indicator'][0].data # = './DATA/nao.txt' + clim_file = request.inputs['pr'][0].data # './DATA/APGD_prcpComo.txt' + filename = 'testComoNAO' + + # #### USER INPUT #### + LOGGER.info("configuration of the process") + + # Settings: + M = 2 # number of climate signal's phases + n_obs = 3 # number of observations (months) + lag = 3 # lag-time (months) --> 3 = seasonal + months = [1,2,3] # months to consider (J,F,M) + startyr = 1971 # beginning of the time period to analyze + n_yrs = 38 # number of years to analyze + + # Select the type of experiment: + # crv_flag: + # True = runs NIPA with crossvalidation + # False = runs NIPA without crossvalidation and save the first SST-Principal Component for multi-variate model + # + crv_flag = True + map_flag = True + + #################### + + # Don't mess with the next few lines + years = np.arange(startyr, startyr+n_yrs) + + kwgroups = create_kwgroups(debug = True, climdata_months = months, + climdata_startyr = startyr, n_yrs = n_yrs, + n_mon_sst = n_obs, n_mon_index = n_obs, sst_lag = lag, + n_phases = M, phases_even = True, + index_lag = lag, + index_fp = index_file, + climdata_fp = clim_file) + + climdata, sst, index, phaseind = get_data(kwgroups) + fp = './maps/%s' % (filename) + fig, axes = plt.subplots(M, 1, figsize = (6, 12)) + timeseries = {'years' : [], 'data' : [], 'hindcast': []} + pc1 = {'pc1':[]} + + LOGGER.info("NIPA running...") # print('NIPA running...') + + if M == 1: + phase = 'allyears' + model = NIPAphase(climdata, sst, index, phaseind[phase]) + model.phase = phase + model.years = years[phaseind[phase]] + model.bootcorr(corrconf = 0.95) + model.gridCheck() + model.crossvalpcr(xval = crv_flag) + timeseries['years'] = model.years + timeseries['data'] = model.clim_data + timeseries['hindcast'] = model.hindcast + LOGGER.info( timeseries['years']) + LOGGER.info( timeseries['data']) + + if map_flag: + fig, axes, m = sstMap(model, fig = fig, ax = axes) + axes.set_title('%s, %.2f' % (phase, model.correlation)) + fig.savefig(fp) + plt.close(fig) + + + else: + for phase, ax in zip(phaseind, axes): + model = NIPAphase(climdata, sst, index, phaseind[phase]) + model.phase = phase + model.years = years[phaseind[phase]] + model.bootcorr(corrconf = 0.95) + model.gridCheck() + model.crossvalpcr(xval = crv_flag) + timeseries['years'].append(model.years) + timeseries['data'].append(model.clim_data) + timeseries['hindcast'].append(model.hindcast) + if not crv_flag: + if hasattr(model,'pc1'): + pc1['pc1'].append(model.pc1) + + if map_flag: + fig, ax, m = sstMap(model, fig = fig, ax = ax) + ax.set_title('%s, %.2f' % (phase, model.correlation)) + fig.savefig(fp) + plt.close(fig) + + + # save timeseries (exceptions handled only for 2 phase analysis) + if np.size(timeseries['hindcast'][0]) == 1: + if math.isnan(timeseries['hindcast'][0]): + # no result for the first phase -> consider only the second set of results + timeseries['years'] = timeseries['years'][1] + timeseries['data'] = timeseries['data'][1] + timeseries['hindcast'] = timeseries['hindcast'][1] + + elif np.size(timeseries['hindcast'][1]) == 1: + if math.isnan(timeseries['hindcast'][1]): + # no result for the second phase -> consider only the first set of results + timeseries['years'] = timeseries['years'][0] + timeseries['data'] = timeseries['data'][0] + timeseries['hindcast'] = timeseries['hindcast'][0] + + else: + timeseries['years'] = np.concatenate(timeseries['years']) + timeseries['data'] = np.concatenate(timeseries['data']) + timeseries['hindcast'] = np.concatenate(timeseries['hindcast']) + + df = pd.DataFrame(timeseries) + ts_file = './%s_timeseries.csv' % (filename) + df.to_csv(ts_file) + + if not crv_flag: + # save PC + pc1['pc1'] = np.concatenate(pc1['pc1']) + pc_file = './output/%s_pc1SST.csv' % (filename) + df1 = pd.DataFrame(pc1) + df1.to_csv(pc_file) + + LOGGER.info('NIPA run completed') + + response.outputs['plot'].data = fp + response.outputs['pc_file'].data = pc_file return response diff --git a/albatross/simpleNIPA.py b/albatross/simpleNIPA.py new file mode 100755 index 0000000..01c90b1 --- /dev/null +++ b/albatross/simpleNIPA.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python +""" +New NIPA module +""" + +import os +import pandas as pd +import numpy as np +from collections import namedtuple +seasonal_var = namedtuple('seasonal_var', ('data','lat','lon')) +from os import environ as EV + + +class NIPAphase(object): + """ + Class and methods for operations on phases as determined by the MEI. + + _INPUTS + phaseind: dictionary containing phase names as keys and corresponding booleans as index vectors + clim_data: n x 1 pandas time series of the climate data (predictands) + sst: dictionary containing the keys 'data', 'lat', and 'lon' + slp: dictionary containing the keys 'data', 'lat', and 'lon' + mei: n x 1 pandas time series containing averaged MEI values + + _ATTRIBUTES + sstcorr_grid + slpcorr_grid + + """ + def __init__(self, clim_data, sst, mei, phaseind, alt = False): + if alt: + self.clim_data = clim_data + else: + self.clim_data = clim_data[phaseind] + self.sst = seasonal_var(sst.data[phaseind], sst.lat, sst.lon) + self.mei = mei[phaseind] + self.flags = {} + + """ + sst is a named tuple + + """ + return + def categorize(self, ncat = 3, hindcast = False): + from pandas import Series + from numpy import sort + + if hindcast: + data = self.hindcast.copy() + else: + data = self.clim_data.copy() + x = sort(data) + n = len(x) + upper = x[((2 * n) / ncat) - 1] + lower = x[(n / ncat) - 1] + + + cat_dat = Series(index = data.index) + + + for year in data.index: + if data[year] <= lower: + cat_dat[year] = 'B' + elif data[year] > upper: + cat_dat[year] = 'A' + else: + cat_dat[year] = 'N' + if hindcast: + self.hcat = cat_dat + else: + self.cat = cat_dat + return + + def bootcorr( self, ntim = 1000, corrconf = 0.95, bootconf = 0.80, + debug = False, quick = True ): + from numpy import meshgrid, zeros, ma, isnan, linspace + from utils import vcorr, sig_test + + corrlevel = 1 - corrconf + + fieldData = self.sst.data + clim_data = self.clim_data + + corr_grid = vcorr(X = fieldData, y = clim_data) + + n_yrs = len(clim_data) + + p_value = sig_test(corr_grid, n_yrs) + + #Mask insignificant gridpoints + corr_grid = ma.masked_array(corr_grid, ~(p_value < corrlevel)) + #Mask land + corr_grid = ma.masked_array(corr_grid, isnan(corr_grid)) + #Mask northern/southern ocean + corr_grid.mask[self.sst.lat > 60] = True + corr_grid.mask[self.sst.lat < -30] = True + nlat = len(self.sst.lat) + nlon = len(self.sst.lon) + + if quick: + self.corr_grid = corr_grid + self.n_pre_grid = nlat * nlon - corr_grid.mask.sum() + if self.n_pre_grid == 0: + self.flags['noSST'] = True + else: + self.flags['noSST'] = False + return + ###INITIALIZE A NEW CORR GRID#### + + count = np.zeros((nlat,nlon)) + + + dat = clim_data.copy() + + + for boot in xrange(ntim): + + ###SHUFFLE THE YEARS AND CREATE THE BOOT DATA### + idx = np.random.randint(0, len(dat) - 1, len(dat)) + boot_fieldData = np.zeros((len(idx), nlat, nlon)) + boot_fieldData[:] = fieldData[idx] + boot_climData = np.zeros((len(idx))) + boot_climData = dat[idx] + + boot_corr_grid = vcorr(X = boot_fieldData, y = boot_climData) + + p_value = sig_test(boot_corr_grid, n_yrs) + + count[p_value <= corrlevel] += 1 + if debug: + print( 'Count max is %i' % count.max()) + + + ###CREATE MASKED ARRAY USING THE COUNT AND BOOTCONF ATTRIBUTES + corr_grid = np.ma.masked_array(corr_grid, count < bootconf * ntim) + + self.corr_grid = corr_grid + self.n_pre_grid = nlat * nlon - corr_grid.mask.sum() + if self.n_pre_grid == 0: + self.flags['noSST'] = True + else: + self.flags['noSST'] = False + return + + def gridCheck(self, lim = 5, ntim = 2, debug = False): + if self.n_pre_grid < 50: lim = 6; ntim = 2 + for time in range(ntim): + dat = self.corr_grid.mask + count = 0 + for i in np.arange(1,dat.shape[0]-1): + for j in np.arange(1,dat.shape[1]-1): + if not dat[i,j]: + check = np.zeros(8) + check[0] = dat[i+1,j] + check[1] = dat[i+1,j+1] + check[2] = dat[i+1,j-1] + check[3] = dat[i,j+1] + check[4] = dat[i,j-1] + check[5] = dat[i-1,j] + check[6] = dat[i-1,j+1] + check[7] = dat[i-1,j-1] + if check.sum() >= lim: + dat[i,j] = True + count += 1 + if debug: print( 'Deleted %i grids' % count) + + self.corr_grid.mask = dat + self.n_post_grid = dat.size - dat.sum() + + return + + def crossvalpcr(self, xval = True, debug = False): + #Must set phase with bootcorr, and then use crossvalpcr, as it just uses the corr_grid attribute + import numpy as np + from numpy import array + from scipy.stats import pearsonr as corr + from scipy.stats import linregress + from matplotlib import pyplot as plt + from utils import weightsst + predictand = self.clim_data + + if self.corr_grid.mask.sum() >= len(self.sst.lat) * len(self.sst.lon) - 4: + yhat = np.nan + e = np.nan + #index = self.clim_data.index + index = self.mei + hindcast = pd.Series(data = yhat, index = index) + error = pd.Series(data = e, index = index) + self.correlation = np.nan + self.hindcast = np.nan + self.hindcast_error = np.nan + self.flags['noSST'] = True + return + + self.flags['noSST'] = False + sstidx = self.corr_grid.mask == False + n = len(predictand) + yhat = np.zeros(n) + e = np.zeros(n) + idx = np.arange(n) + + params = [] + std_errs = [] + p_vals = [] + t_vals = [] + if not xval: + rawSSTdata = weightsst(self.sst).data + rawdata = rawSSTdata[:, sstidx] + cvr = np.cov(rawdata.T) + eigval, eigvec = np.linalg.eig(cvr) + eigvalsort = np.argsort(eigval)[::-1] + eigval = eigval[eigvalsort] + eigval = np.real(eigval) + ncomp = 1 + eof_1 = eigvec[:,:ncomp] #_fv stands for Feature Vector, in this case EOF-1 + eof_1 = np.real(eof_1) + pc_1 = eof_1.T.dot(rawdata.T).squeeze() + slope, intercept, r, p, err = linregress(pc_1, predictand) + yhat = slope * pc_1 + intercept + self.pc1 = pc_1 + self.correlation = r + self.hindcast = yhat + return + + for i in idx: + test = idx == i + train = idx != i + rawSSTdata = weightsst(self.sst).data[train] + droppedSSTdata = weightsst(self.sst).data[test] + rawdata = rawSSTdata[:, sstidx]# + dropped_data = droppedSSTdata[:,sstidx].squeeze() + + #U, s, V = np.linalg.svd(rawdata) + #pc_1 = V[0,:] #_Rows of V are principal components + #eof_1 = U[:,0].squeeze() #_Columns are EOFS + #EIGs = s**2 #_s is square root of eigenvalues + + cvr = np.cov(rawdata.T) + #print cvr.shape + eigval, eigvec = np.linalg.eig(cvr) + eigvalsort = np.argsort(eigval)[::-1] + eigval = eigval[eigvalsort] + eigval = np.real(eigval) + ncomp = 1 + eof_1 = eigvec[:,:ncomp] #_fv stands for Feature Vector, in this case EOF-1 + eof_1 = np.real(eof_1) + pc_1 = eof_1.T.dot(rawdata.T).squeeze() + + slope, intercept, r_value, p_value, std_err = linregress(pc_1, predictand[train]) + predictor = dropped_data.dot(eof_1) + yhat[i] = slope * predictor + intercept + e[i] = predictand[i] - yhat[i] + params.append(slope); std_errs.append(std_err); p_vals.append(p_value) + t_vals.append(slope/std_err) + + r, p = corr(predictand, yhat) + + hindcast = yhat + error = e + self.hindcast = hindcast + self.hindcast_error = error + self.correlation = round(r, 2) + self.reg_stats = { 'params' : array(params), + 'std_errs' : array(std_errs), + 't_vals' : array(t_vals), + 'p_vals' : array(p_vals)} + + return + + def genEnsemble(self): + from numpy import zeros + from numpy.random import randint + sd = self.hindcast_error.std() + mn = self.hindcast_error.mean() + n = len(self.hindcast) + ensemble = zeros((1000,n)) + for i in range(n): + for j in range(1000): + idx = randint(0,n) + ensemble[j,i] = self.hindcast[i] + self.hindcast_error[idx] + self.ensemble = ensemble + return + + def simple_skillscores(self): + n = len(self.clim_data) + lower_ind = n/3 + upper_ind = ( 2*n / 3 ) + + maxima = max(self.clim_data[:lower_ind]) diff --git a/albatross/utils.py b/albatross/utils.py new file mode 100755 index 0000000..931bffd --- /dev/null +++ b/albatross/utils.py @@ -0,0 +1,178 @@ + +""" +Module containing utility functions for running NIPA. +""" +from matplotlib import cm, pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable + +import numpy as np + +def plot_timeseries(timeseries, fp): + results = {} + for key in timeseries: + results[key] = np.zeros((0)) + for item in timeseries[key]: + results[key] = np.concatenate((results[key], item)) + idx = np.argsort(results['years']) + + for key in results: + results[key] = results[key][idx] + plt.plot(results['years'], results['hindcast'], label = 'hindcast') + plt.plot(results['years'], results['data'], label = 'original') + + plt.legend() + plt.savefig(fp+'_timeseries') + return + +def make_scatterplot(model, fp): + plt.scatter(model.clim_data, model.hindcast) + plt.title('%s, %.2f' % (model.phase, model.correlation)) + plt.savefig(fp + '_scatterplot') + plt.close() + return + +def weightsst(sst): + # SST needs to be downloaded using the openDAPsst function + from numpy import cos, radians + weights = cos(radians(sst.lat)) + for i, weight in enumerate(weights): + sst.data[:,i,:] *= weight + return sst + +def sig_test(r, n, twotailed = True): + import numpy as np + from scipy.stats import t as tdist + df = n - 2 + + # Create t-statistic + # Use absolute value to be able to deal with negative scores + t = np.abs(r * np.sqrt(df/(1-r**2))) + p = (1 - tdist.cdf(t,df)) + if twotailed: + p = p * 2 + return p + +def vcorr(X, y): + # Function to correlate a single time series with a gridded data field + # X - Gridded data, 3 dimensions (ntim, nlat, nlon) + # Y - Time series, 1 dimension (ntim) + + ntim, nlat, nlon = X.shape + ngrid = nlat * nlon + + y = y.reshape(1, ntim) + X = X.reshape(ntim, ngrid).T + Xm = X.mean(axis = 1).reshape(ngrid,1) + ym = y.mean() + r_num = np.sum((X-Xm) * (y-ym), axis = 1) + r_den = np.sqrt(np.sum((X-Xm)**2, axis = 1) * np.sum((y-ym)**2)) + r = (r_num/r_den).reshape(nlat, nlon) + + return r + +def int_to_month(): + """ + This function is used by data_load.create_data_parameters + """ + i2m = { + -8 : 'Apr', + -7 : 'May', + -6 : 'Jun', + -5 : 'Jul', + -4 : 'Aug', + -3 : 'Sep', + -2 : 'Oct', + -1 : 'Nov', + 0 : 'Dec', + 1 : 'Jan', + 2 : 'Feb', + 3 : 'Mar', + 4 : 'Apr', + 5 : 'May', + 6 : 'Jun', + 7 : 'Jul', + 8 : 'Aug', + 9 : 'Sept', + 10 : 'Oct', + 11 : 'Nov', + 12 : 'Dec', + 13 : 'Jan', + 14 : 'Feb', + 15 : 'Mar', + } + return i2m + +def slp_tf(): + d = { + -4 : '08', + -3 : '09', + -2 : '10', + -1 : '11', + 0 : '12', + 1 : '01', + 2 : '02', + 3 : '03', + 4 : '04', + 5 : '05', + 6 : '06', + 7 : '07', + 8 : '08', + 9 : '09', + 10 : '10', + 11 : '11', + 12 : '12', + 13 : '01', + 14 : '02', + 15 : '03', + } + return d + +def meteo_swiss_convert(f_in, f_out): + data = np.loadtxt(f_in, skiprows = 28) + years = data[:, 0] + months = data[:, 1] + temp = data[:, 2] + prcp = data[:, 3] + startyr = years[0] + endyr = years[-1] + nyrs = endyr - startyr + 1 + x = prcp.reshape(nyrs, 12) + y = np.arange(startyr, startyr + nyrs).reshape(nyrs, 1) + array = np.concatenate((y, x), axis = 1) + + fmtstr = '%i %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f' + with open(f_out, 'w') as f: + f.write('Description \n') + f.write('Line 2 \n') + np.savetxt(f, array, fmt = fmtstr) + + return + +def sstMap(nipaPhase, cmap = cm.jet, fig = None, ax = None): + from mpl_toolkits.basemap import Basemap + if fig == None: + fig = plt.figure() + ax = fig.add_subplot(111) + m = Basemap(ax = ax, projection = 'cyl', lon_0 = 270, resolution = 'i') + m.drawmapboundary(fill_color='#ffffff',linewidth = 0.15) + m.drawcoastlines(linewidth = 0.15) + m.fillcontinents(color='#eeeeee',lake_color='#ffffff') + parallels = np.linspace(m.llcrnrlat, m.urcrnrlat, 4) + meridians = np.linspace(m.llcrnrlon, m.urcrnrlon, 4) + m.drawparallels(parallels, linewidth = 0.3, labels = [0,0,0,0]) + m.drawmeridians(meridians, linewidth = 0.3, labels = [0,0,0,0]) + + lons = nipaPhase.sst.lon + lats = nipaPhase.sst.lat + + data = nipaPhase.corr_grid + levels = np.linspace(-1.0,1.0,41) + + lons, lats = np.meshgrid(lons,lats) + + im1 = m.pcolormesh(lons,lats,data, vmin = np.min(levels), \ + vmax=np.max(levels), cmap = cmap, latlon=True) + divider = make_axes_locatable(ax) + cax = divider.append_axes('bottom', size='5%', pad=0.05) + fig.colorbar(im1, cax=cax, orientation='horizontal') + return fig, ax, m