From 6aa6be6440ab7cf83f1a924c12a2214c06999c24 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Wed, 8 Jan 2025 17:09:52 -0700 Subject: [PATCH 1/9] Add manual specs of different turbines (see extended message) This needs to be modified in the future. For now it is only a workaround to get the rotor speed and the blade pitch for a given wind direction for different turbines. --- .../fastfarm/FASTFarmCaseCreation.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py index 7205583..c57f574 100644 --- a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py +++ b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py @@ -268,6 +268,8 @@ def __repr__(self): def _checkInputs(self): + + #### check if the turbine in the template FF input exists. # Create case path is doesn't exist if not os.path.exists(self.path): @@ -1247,6 +1249,36 @@ def _setRotorParameters(self): #'WvLowCOffS': (['wspd'], [0, 0, 0, 0, 0 ]), # 2nd order wave info. Unused for now 0.314159 from repo; 0.862 from KS }, coords={'wspd': [6.6, 8.6, 10.6, 12.6, 15]} ) # 15 m/s is 'else', since method='nearest' is used on the variable `bins` + + elif self.D == 178: # DTU 10MW W turbine + print(f'CHANGE THE _setRotorParameters of the DTU 10MW turbine') + self.bins = xr.Dataset({'WaveHs': (['wspd'], [ 1.429, 1.429]), # 1.429 comes from Matt's hydrodyn input file + 'WaveTp': (['wspd'], [ 7.073, 7.073]), # 7.073 comes from Matt's hydrodyn input file + 'RotSpeed': (['wspd'], [ 4.0, 4.0]), # 4 rpm comes from Matt's ED input file + 'BlPitch': (['wspd'], [ 0.0, 0.0]), # 0 deg comes from Matt's ED input file + #'WvHiCOffD': (['wspd'], [0, 0]), # 2nd order wave info. Unused for now + #'WvLowCOffS': (['wspd'], [0, 0]), # 2nd order wave info. Unused for now + }, coords={'wspd': [10, 15]} ) # 15 m/s is 'else', since method='nearest' is used on the variable `bins` + + + elif self.D == 82: # Vestas V82, 1.5MW, 82 m diameter + print(f'CHANGE THE _setRotorParameters of the V82 1.5MW turbine') + self.bins = xr.Dataset({'WaveHs': (['wspd'], [ 1.429, 1.429]), # 1.429 comes from Matt's hydrodyn input file + 'WaveTp': (['wspd'], [ 7.073, 7.073]), # 7.073 comes from Matt's hydrodyn input file + 'RotSpeed': (['wspd'], [ 4.0, 4.0]), # 4 rpm comes from Matt's ED input file + 'BlPitch': (['wspd'], [ 0.0, 0.0]), # 0 deg comes from Matt's ED input file + #'WvHiCOffD': (['wspd'], [0, 0]), # 2nd order wave info. Unused for now + #'WvLowCOffS': (['wspd'], [0, 0]), # 2nd order wave info. Unused for now + }, coords={'wspd': [10, 15]} ) # 15 m/s is 'else', since method='nearest' is used on the variable `bins` + + + elif self.D == 93: # Siemens SWT-2.3-93 2.3 MW, 93 m diameter + self.bins = xr.Dataset({'WaveHs': (['wspd'], [ 1, 1]), # arbitrary since no hydrodyn is used + 'WaveTp': (['wspd'], [ 7, 7]), # arbitrary since no hydrodyn is used + 'RotSpeed': (['wspd'], [ 5.0, 5.0]), # from input file + 'BlPitch': (['wspd'], [ -1, -1]), # from input file + }, coords={'wspd': [10, 15]} ) # 15 m/s is 'else', since method='nearest' is used on the variable `bins` + else: raise ValueError(f'Unknown turbine with diameter {self.D}. Add values to the `_setRotorParameters` function.') From b0fb73fcecca65fd0ab8159ad02a8d79f7872c14 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Wed, 8 Jan 2025 17:12:10 -0700 Subject: [PATCH 2/9] FF: Change `normal` to `offset_vector` to conform to AMR-Wind changes --- .../fastfarm/AMRWindSimulation.py | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/openfast_toolbox/fastfarm/AMRWindSimulation.py b/openfast_toolbox/fastfarm/AMRWindSimulation.py index d899f2a..ae7be5a 100644 --- a/openfast_toolbox/fastfarm/AMRWindSimulation.py +++ b/openfast_toolbox/fastfarm/AMRWindSimulation.py @@ -667,19 +667,19 @@ def write_sampling_params(self, out=None, overwrite=False): zoffsets_lr_str = " ".join(str(item) for item in self.zoffsets_lr) s += f"# Low sampling grid spacing = {self.ds_lr} m\n" - s += f"{self.postproc_name_lr}.Low.type = PlaneSampler\n" - s += f"{self.postproc_name_lr}.Low.num_points = {self.nx_lr} {self.ny_lr}\n" - s += f"{self.postproc_name_lr}.Low.origin = {self.xlow_lr:.4f} {self.ylow_lr:.4f} {self.zlow_lr:.4f}\n" # Round the float output - s += f"{self.postproc_name_lr}.Low.axis1 = {self.xdist_lr:.4f} 0.0 0.0\n" # Assume: axis1 oriented parallel to AMR-Wind x-axis - s += f"{self.postproc_name_lr}.Low.axis2 = 0.0 {self.ydist_lr:.4f} 0.0\n" # Assume: axis2 oriented parallel to AMR-Wind y-axis - s += f"{self.postproc_name_lr}.Low.normal = 0.0 0.0 1.0\n" - s += f"{self.postproc_name_lr}.Low.offsets = {zoffsets_lr_str}\n\n\n" + s += f"{self.postproc_name_lr}.Low.type = PlaneSampler\n" + s += f"{self.postproc_name_lr}.Low.num_points = {self.nx_lr} {self.ny_lr}\n" + s += f"{self.postproc_name_lr}.Low.origin = {self.xlow_lr:.4f} {self.ylow_lr:.4f} {self.zlow_lr:.4f}\n" # Round the float output + s += f"{self.postproc_name_lr}.Low.axis1 = {self.xdist_lr:.4f} 0.0 0.0\n" # Assume: axis1 oriented parallel to AMR-Wind x-axis + s += f"{self.postproc_name_lr}.Low.axis2 = 0.0 {self.ydist_lr:.4f} 0.0\n" # Assume: axis2 oriented parallel to AMR-Wind y-axis + s += f"{self.postproc_name_lr}.Low.offset_vector = 0.0 0.0 1.0\n" + s += f"{self.postproc_name_lr}.Low.offsets = {zoffsets_lr_str}\n\n\n" s += f"# ---- High-res sampling parameters ----\n" - s += f"{self.postproc_name_hr}.output_format = netcdf\n" - s += f"{self.postproc_name_hr}.output_frequency = {self.output_frequency_hr}\n" - s += f"{self.postproc_name_hr}.fields = velocity # temperature tke\n" - s += f"{self.postproc_name_hr}.labels = {sampling_labels_hr_str}\n" + s += f"{self.postproc_name_hr}.output_format = netcdf\n" + s += f"{self.postproc_name_hr}.output_frequency = {self.output_frequency_hr}\n" + s += f"{self.postproc_name_hr}.fields = velocity # temperature tke\n" + s += f"{self.postproc_name_hr}.labels = {sampling_labels_hr_str}\n" # Write out high resolution sampling plane info for turbkey in self.hr_domains: @@ -704,13 +704,13 @@ def write_sampling_params(self, out=None, overwrite=False): zoffsets_hr_str = " ".join(str(item) for item in zoffsets_hr) s += f"\n# Turbine {wt_name} with base at (x,y,z) = ({wt_x:.4f}, {wt_y:.4f}, {wt_z:.4f}), with hh = {wt_h}, D = {wt_D}, grid spacing = {self.ds_hr} m\n" - s += f"{self.postproc_name_hr}.{sampling_name}.type = PlaneSampler\n" - s += f"{self.postproc_name_hr}.{sampling_name}.num_points = {nx_hr} {ny_hr}\n" - s += f"{self.postproc_name_hr}.{sampling_name}.origin = {xlow_hr:.4f} {ylow_hr:.4f} {zlow_hr:.4f}\n" # Round the float output - s += f"{self.postproc_name_hr}.{sampling_name}.axis1 = {xdist_hr:.4f} 0.0 0.0\n" # Assume: axis1 oriented parallel to AMR-Wind x-axis - s += f"{self.postproc_name_hr}.{sampling_name}.axis2 = 0.0 {ydist_hr:.4f} 0.0\n" # Assume: axis2 oriented parallel to AMR-Wind y-axis - s += f"{self.postproc_name_hr}.{sampling_name}.normal = 0.0 0.0 1.0\n" - s += f"{self.postproc_name_hr}.{sampling_name}.offsets = {zoffsets_hr_str}\n" + s += f"{self.postproc_name_hr}.{sampling_name}.type = PlaneSampler\n" + s += f"{self.postproc_name_hr}.{sampling_name}.num_points = {nx_hr} {ny_hr}\n" + s += f"{self.postproc_name_hr}.{sampling_name}.origin = {xlow_hr:.4f} {ylow_hr:.4f} {zlow_hr:.4f}\n" # Round the float output + s += f"{self.postproc_name_hr}.{sampling_name}.axis1 = {xdist_hr:.4f} 0.0 0.0\n" # Assume: axis1 oriented parallel to AMR-Wind x-axis + s += f"{self.postproc_name_hr}.{sampling_name}.axis2 = 0.0 {ydist_hr:.4f} 0.0\n" # Assume: axis2 oriented parallel to AMR-Wind y-axis + s += f"{self.postproc_name_hr}.{sampling_name}.offset_vector = 0.0 0.0 1.0\n" + s += f"{self.postproc_name_hr}.{sampling_name}.offsets = {zoffsets_hr_str}\n" if out is None: From 8462fc0a21ab51341442054c01ff125c934dcef2 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Fri, 10 Jan 2025 16:10:02 -0700 Subject: [PATCH 3/9] FF: Ensure output of WD radii is consistent with `NumRadii` --- openfast_toolbox/fastfarm/FASTFarmCaseCreation.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py index c57f574..dca8d82 100644 --- a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py +++ b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py @@ -1908,7 +1908,14 @@ def _FF_setup_LES(self, seedsToKeep=1): ff_file['dr'] = self.dr ff_file['NumRadii'] = int(np.ceil(3*D_/(2*self.dr) + 1)) ff_file['NumPlanes'] = int(np.ceil( 20*D_/(self.dt_low_les*Vhub_*(1-1/6)) ) ) - + + # Ensure radii outputs are within [0, NumRadii-1] + for i, r in enumerate(ff_file['OutRadii']): + if r > ff_file['NumRadii']-1: + ff_file['NOutRadii'] = i + ff_file['OutRadii'] = ff_file['OutRadii'][:i] + break + # Vizualization outputs ff_file['WrDisWind'] = 'False' ff_file['WrDisDT'] = ff_file['DT_Low-VTK'] # default is the same as DT_Low-VTK From 5dbfead8a718dff921de26d37499722325676a2d Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Mon, 13 Jan 2025 11:06:09 -0700 Subject: [PATCH 4/9] FF: Add support for overwrite fst input file on fstf --- openfast_toolbox/fastfarm/FASTFarmCaseCreation.py | 3 ++- openfast_toolbox/fastfarm/fastfarm.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py index dca8d82..41a7a1e 100644 --- a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py +++ b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py @@ -1878,7 +1878,8 @@ def _FF_setup_LES(self, seedsToKeep=1): outputFSTF = os.path.join(seedPath, 'FFarm_mod.fstf') # Write the file (mostly for turbine locations here - writeFastFarm(outputFSTF, templateFSTF, xWT, yWT, zWT, FFTS=None, OutListT1=self.outlistFF, noLeadingZero=True) + writeFastFarm(outputFSTF, templateFSTF, xWT, yWT, zWT, FFTS=None, OutListT1=self.outlistFF, + noLeadingZero=True, turbineTemplateFullFilename=f"../{self.turbfilename}1.fst") # Open saved file and change additional values manually or make sure we have the correct ones ff_file = FASTInputFile(outputFSTF) diff --git a/openfast_toolbox/fastfarm/fastfarm.py b/openfast_toolbox/fastfarm/fastfarm.py index 9ac4b85..28a8071 100644 --- a/openfast_toolbox/fastfarm/fastfarm.py +++ b/openfast_toolbox/fastfarm/fastfarm.py @@ -326,13 +326,16 @@ def fastFarmBoxExtent(yBox, zBox, tBox, meanU, hubHeight, D, xWT, yWT, return d -def writeFastFarm(outputFile, templateFile, xWT, yWT, zWT, FFTS=None, OutListT1=None, noLeadingZero=False): +def writeFastFarm(outputFile, templateFile, xWT, yWT, zWT, FFTS=None, OutListT1=None, noLeadingZero=False, turbineTemplateFullFilename=None): """ Write FastFarm input file based on a template, a TurbSimFile and the Layout outputFile: .fstf file to be written templateFile: .fstf file that will be used to generate the output_file XWT,YWT,ZWT: positions of turbines FFTS: FastFarm TurbSim parameters as returned by fastFarmTurbSimExtent + turbineTemplateFullFilename: full (or relative) path and filename of the template + turbine to be used. If None, it uses the turbine name from the templateFile. + Example input: turbineTemplateFullFilename='../IEA15.T1.fst' """ # --- Read template fast farm file fst=FASTInputFile(templateFile) @@ -354,7 +357,12 @@ def writeFastFarm(outputFile, templateFile, xWT, yWT, zWT, FFTS=None, OutListT1= nCol= 10 else: nCol = 4 - ref_path = fst['WindTurbines'][0,3] + + if turbineTemplateFullFilename is None: + ref_path = fst['WindTurbines'][0,3] + else: + ref_path = f'"{turbineTemplateFullFilename}"' # add quotes + WT = np.array(['']*nWT*nCol,dtype='object').reshape((nWT,nCol)) for iWT,(x,y,z) in enumerate(zip(xWT,yWT,zWT)): WT[iWT,0]=x From c5ea64eb84b66bb6df54769a17094d7eac47f1b7 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Mon, 13 Jan 2025 11:07:16 -0700 Subject: [PATCH 5/9] FF: add minor warning print-out --- openfast_toolbox/fastfarm/AMRWindSimulation.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/openfast_toolbox/fastfarm/AMRWindSimulation.py b/openfast_toolbox/fastfarm/AMRWindSimulation.py index ae7be5a..7c8f708 100644 --- a/openfast_toolbox/fastfarm/AMRWindSimulation.py +++ b/openfast_toolbox/fastfarm/AMRWindSimulation.py @@ -175,6 +175,10 @@ def _checkInputs(self): # Check that level_lr is >= 0 # check that level_hr is <=self.max_level + # For convenience, the turbines should not be zero-indexed + if self.wts[0]['name'] != 'T1': + print(f"--- WARNING: Recommended turbine numbering should start at 1. Currently it is zero-indexed.") + # Flags of given/calculated spatial resolution for warning/error printing purposes self.given_ds_hr = False @@ -652,7 +656,7 @@ def write_sampling_params(self, out=None, overwrite=False): sampling_labels_lr_str = " ".join(str(item) for item in self.sampling_labels_lr) sampling_labels_hr_str = " ".join(str(item) for item in self.sampling_labels_hr) s += f"#¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨#\n" - s += f"# POST-Processing #\n" + s += f"# POST-PROCESSING #\n" s += f"#.......................................#\n" s += f"# Sampling info generated by AMRWindSamplingCreation.py on {self.curr_datetime}\n" s += f"incflo.post_processing = {self.postproc_name_lr} {self.postproc_name_hr} # averaging\n\n\n" From aeedbd3094b41bb6ddfdaad026067a384d057290 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Mon, 27 Jan 2025 14:28:59 -0700 Subject: [PATCH 6/9] FF: add postprocessing routines --- .../fastfarm/postpro/ff_postpro.py | 915 ++++++++++++++++++ 1 file changed, 915 insertions(+) create mode 100644 openfast_toolbox/fastfarm/postpro/ff_postpro.py diff --git a/openfast_toolbox/fastfarm/postpro/ff_postpro.py b/openfast_toolbox/fastfarm/postpro/ff_postpro.py new file mode 100644 index 0000000..d9e93ab --- /dev/null +++ b/openfast_toolbox/fastfarm/postpro/ff_postpro.py @@ -0,0 +1,915 @@ +import numpy as np +import pandas as pd +import xarray as xr +import os, sys +from multiprocessing import Pool +from itertools import repeat + +from openfast_toolbox.io import TurbSimFile, FASTOutputFile, VTKFile, FASTInputFile + +def _get_fstf_filename(caseobj): + if hasattr(caseobj, 'outputFFfilename'): + return os.path.splitext(caseobj.outputFFfilename)[0] + else: + return 'FFarm_mod' + + +def readTurbineOutputPar(caseobj, dt_openfast, dt_processing, saveOutput=True, output='zarr', + iCondition=0, fCondition=-1, iCase=0, fCase=-1, iSeed=0, fSeed=-1, iTurbine=0, fTurbine=-1, + nCores=36): + ''' + Inputs + ------ + output: str + Either 'nc' or 'zarr'. Determines the output format + Zero-indexed initial and final values for conditions, cases, seeds, and turbines. + + + ''' + + + #from multiprocessing import set_start_method + #try: + # set_start_method("spawn") + #except RuntimeError: + # print(f'Fell into RunTime error on `set_start_method("spawn")`. Continuing..\n') + + + if fCondition==-1: + fCondition = caseobj.nConditions + if fCondition-iCondition <= 0: + raise ValueError (f'Final condition to read needs to be larger than initial.') + + if fCase==-1: + fCase = caseobj.nCases + if fCase-iCase <= 0: + raise ValueError (f'Final case to read needs to be larger than initial.') + + if fSeed==-1: + fSeed = caseobj.nSeeds + if fSeed-iSeed <= 0: + raise ValueError (f'Final seed to read needs to be larger than initial.') + + if fTurbine==-1: + fTurbine = caseobj.nTurbines + if fTurbine-iTurbine <= 0: + raise ValueError (f'Final turbine to read needs to be larger than initial.') + + if fCase-iCase < nCores: + print(f'Total number of cases requested ({fCase-iCase}) is lower than number of cores {nCores}.') + print(f'Changing the number of cores to {fCase-iCase}.') + nCores = fCase-iCase + + if output not in ['zarr','nc']: + raise ValueError (f'Output can only be zarr or nc') + + + outfilename = f'ds_turbineOutput_temp_cond{iCondition}_{fCondition}_case{iCase}_{fCase}_seed{iSeed}_{fSeed}_turb{iTurbine}_{fTurbine}_dt{dt_processing}s' + zarrstore = f'{outfilename}.zarr' + ncfile = f'{outfilename}.nc' + outputzarr = os.path.join(caseobj.path, zarrstore) + outputnc = os.path.join(caseobj.path, ncfile) + + if output=='zarr' and os.path.isdir(outputzarr) and saveOutput: + print(f'Output file {zarrstore} exists. Loading it..') + comb_ds = xr.open_zarr(outputzarr) + return comb_ds + if output=='nc' and os.path.isfile(outputnc) and saveOutput: + print(f'Output file {ncfile} exists. Loading it..') + comb_ds = xr.open_dataset(outputnc) + return comb_ds + + + print(f'Running readTurbineOutput in parallel using {nCores} workers') + + # Split all the cases in arrays of roughly the same size + chunks = np.array_split(range(iCase,fCase), nCores) + # Now, get the beginning and end of each separate chunk + iCase_list = [i[0] for i in chunks] + fCase_list = [i[-1]+1 for i in chunks] + print(f'iCase_list is {iCase_list}') + print(f'fCase_list is {fCase_list}') + + p = Pool() + ds_ = p.starmap(readTurbineOutput, zip(repeat(caseobj), # caseobj + repeat(dt_openfast), # dt_openfast + repeat(dt_processing), # dt_processing + repeat(False), # saveOutput + repeat(output), # output + repeat(iCondition), # iCondition + repeat(fCondition), # fCondition + iCase_list, # iCase + fCase_list, # fCase + repeat(iSeed), # iSeed + repeat(fSeed), # fSeed + repeat(iTurbine), # iTurbine + repeat(fTurbine), # fTurbine + ) + ) + + # Trying this out + print('trying to close the pool. does this seem to work better on notebooks?') + p.close() + p.terminate() + p.join() + + print(f'Done reading all output. Concatenating the arrays') + + try: + comb_ds = xr.combine_by_coords(ds_) + except ValueError as e: + if str(e) == "Coordinate variable case is neither monotonically increasing nor monotonically decreasing on all datasets": + print('Concatenation using combine_by_coords failed. Concatenating using merge instead.') + print(' WARNING: Indexes are _not_ monotonically increasing. Do not use `isel`.') + print(' Try using `.sortby()` to sort it.') + comb_ds = xr.merge(ds_) + else: + raise + + + if saveOutput: + print(f'Done concatenating. Saving {output} file.') + if output == 'zarr': comb_ds.to_zarr(outputzarr) + elif output == 'nc': comb_ds.to_netcdf(outputnc) + + + print('Finished.') + + return comb_ds + +def readTurbineOutput(caseobj, dt_openfast, dt_processing=1, saveOutput=True, output='zarr', + iCondition=0, fCondition=-1, iCase=0, fCase=-1, iSeed=0, fSeed=-1, iTurbine=0, fTurbine=-1): + ''' + caseobj: FASTFarmCaseSetup object + Object containing all the case information + dt_openfast: scalar + OpenFAST time step + dt_processing: scalar + Time step to which the processing will be saved. Default=1 + saveOutput: bool + Whether or not to save the output to a zarr file + output: str + Format to save output. Only 'zarr' and 'nc' available + ''' + + if fCondition==-1: + fCondition = caseobj.nConditions + #else: + # fCondition += 1 # The user sets the last desired condition. This if for the np.arange. + if fCondition-iCondition <= 0: + raise ValueError (f'Final condition to read needs to be larger than initial.') + + if fCase==-1: + fCase = caseobj.nCases + #else: + #fCase += 1 # The user sets the last desired case. This if for the np.arange. + if fCase-iCase <= 0: + raise ValueError (f'Final case to read needs to be larger than initial.') + + if fSeed==-1: + fSeed = caseobj.nSeeds + #else: + # fSeed += 1 # The user sets the last desired seed. This is for the np.arange + if fSeed-iSeed <= 0: + raise ValueError (f'Final seed to read needs to be larger than initial.') + + if fTurbine==-1: + fTurbine = caseobj.nTurbines + #else: + # fTurbine += 1 # The user sets the last desired turbine. This if for the np.arange. + if fTurbine-iTurbine <= 0: + raise ValueError (f'Final turbine to read needs to be larger than initial.') + + + outfilename = f'ds_turbineOutput_temp_cond{iCondition}_{fCondition}_case{iCase}_{fCase}_seed{iSeed}_{fSeed}_turb{iTurbine}_{fTurbine}_dt{dt_processing}s' + zarrstore = f'{outfilename}.zarr' + ncfile = f'{outfilename}.nc' + outputzarr = os.path.join(caseobj.path, zarrstore) + outputnc = os.path.join(caseobj.path, ncfile) + + # Read or process turbine output + if os.path.isdir(outputzarr) or os.path.isfile(outputnc): + # Data already processed. Reading output + if output == 'zarr': turbs = xr.open_zarr(outputzarr) + elif output == 'nc': turbs = xr.open_dataset(outputnc) + else: + print(f'{outfilename}.{output} does not exist. Reading output data...') + # Processed data not saved. Reading it + dt_ratio = int(dt_processing/dt_openfast) + + turbs_cond = [] + for cond in np.arange(iCondition, fCondition, 1): + turbs_case = [] + for case in np.arange(iCase, fCase, 1): + turbs_seed = [] + for seed in np.arange(iSeed, fSeed, 1): + turbs_t=[] + for t in np.arange(iTurbine, fTurbine, 1): + print(f'Processing Condition {cond}, Case {case}, Seed {seed}, turbine {t+1}') + ff_file = os.path.join(caseobj.path, caseobj.condDirList[cond], caseobj.caseDirList[case], f'Seed_{seed}', f'{_get_fstf_filename(caseobj)}.T{t+1}.outb') + df = FASTOutputFile(ff_file).toDataFrame() + # Won't be able to send to xarray if columns are non-unique + if not df.columns.is_unique: + df = df.T.groupby(df.columns).first().T + ds_t = df.rename(columns={'Time_[s]':'time'}).set_index('time').to_xarray() + ds_t = ds_t.isel(time=slice(0,None,dt_ratio)) + ds_t = ds_t.expand_dims(['cond','case','seed','turbine']).assign_coords({'cond': [caseobj.condDirList[cond]], + 'case':[caseobj.caseDirList[case]], + 'seed':[seed], + 'turbine': [t+1]}) + turbs_t.append(ds_t) + turbs_t = xr.concat(turbs_t,dim='turbine') + turbs_seed.append(turbs_t) + turbs_seed = xr.concat(turbs_seed,dim='seed') + turbs_case.append(turbs_seed) + turbs_case = xr.concat(turbs_case,dim='case') + turbs_cond.append(turbs_case) + turbs_cond = xr.concat(turbs_cond, dim='cond') + + # Rename variables to get rid of problematic characters ('-','/') + varlist = list(turbs_cond.keys()) + varlistnew = [i.replace('/','_per_').replace('-','') for i in varlist] + renameDict = dict(zip(varlist, varlistnew)) + turbs = turbs_cond.rename_vars(renameDict) + + if saveOutput: + print(f'Saving output {outfilename}.{output}...') + if output == 'zarr': turbs.to_zarr(outputzarr) + elif output == 'nc': turbs.to_netcdf(outputnc) + print(f'Saving output {outfilename}.{output}... Done.') + + return turbs + + + + + + +def readFFPlanesPar(caseobj, sliceToRead, verbose=False, saveOutput=True, iCondition=0, fCondition=-1, iCase=0, fCase=-1, iSeed=0, fSeed=-1, itime=0, ftime=-1, skiptime=1, nCores=36): + + if fCondition==-1: + fCondition = caseobj.nConditions + if fCondition-iCondition <= 0: + raise ValueError (f'Final condition to read needs to be larger than initial.') + + if fCase==-1: + fCase = caseobj.nCases + if fCase-iCase <= 0: + raise ValueError (f'Final case to read needs to be larger than initial.') + + if fSeed==-1: + fSeed = caseobj.nSeeds + if fSeed-iSeed <= 0: + raise ValueError (f'Final seed to read needs to be larger than initial.') + + + zarrstore = f'ds_{sliceToRead}Slices_temp_cond{iCondition}_{fCondition}_case{iCase}_{fCase}_seed{iSeed}_{fSeed}.zarr' + outputzarr = os.path.join(caseobj.path, zarrstore) + + if os.path.isdir(outputzarr) and saveOutput: + print(f'Output file {zarrstore} exists. Attempting to read it..') + comb_ds = xr.open_zarr(outputzarr) + return comb_ds + + + # We will loop in the variable that we have more entries. E.g. if we have several condition, and only one case, + # then we will loop on the conditions. Analogous, if we have single conditions with many cases, we will loop on + # cases. Here we figure out where the loop will take place and set the appropriate number of cores to be used. + if fCondition-iCondition > fCase-iCase: + loopOn = 'cond' + print(f'Looping on conditions.') + if fCondition-iCondition < nCores: + print(f'Total number of condtions requested ({fCondition-iCondition}) is lower than number of cores {nCores}.') + print(f'Changing the number of cores to {fCondition-iCondition}.') + nCores = fCondition-iCondition + else: + loopOn = 'case' + print(f'Looping on cases.') + if fCase-iCase < nCores: + print(f'Total number of cases requested ({fCase-iCase}) is lower than number of cores {nCores}.') + print(f'Changing the number of cores to {fCase-iCase}.') + nCores = fCase-iCase + + + print(f'Running readFFPlanes in parallel using {nCores} workers') + + if loopOn == 'cond': + + # Split all the cases in arrays of roughly the same size + chunks = np.array_split(range(iCondition,fCondition), nCores) + # Now, get the beginning and end of each separate chunk + iCond_list = [i[0] for i in chunks] + fCond_list = [i[-1]+1 for i in chunks] + print(f'iCond_list is {iCond_list}') + print(f'fCond_list is {fCond_list}') + + # For generality, we create the list of cases as a repeat + iCase_list = repeat(iCase) + fCase_list = repeat(fCase) + + elif loopOn == 'case': + + # Split all the cases in arrays of roughly the same size + chunks = np.array_split(range(iCase,fCase), nCores) + # Now, get the beginning and end of each separate chunk + iCase_list = [i[0] for i in chunks] + fCase_list = [i[-1]+1 for i in chunks] + print(f'iCase_list is {iCase_list}') + print(f'fCase_list is {fCase_list}') + + # For generality, we create the list of cond as a repeat + iCond_list = repeat(iCondition) + fCond_list = repeat(fCondition) + + else: + raise ValueError (f"This shouldn't occur. Not sure what went wrong.") + + + p = Pool() + ds_ = p.starmap(readFFPlanes, zip(repeat(caseobj), # caseobj + repeat(sliceToRead), # slicesToRead + repeat(verbose), # verbose + repeat(False), # saveOutput + iCond_list, # iCondition + fCond_list, # fCondition + iCase_list, # iCase + fCase_list, # fCase + repeat(iSeed), # iSeed + repeat(fSeed), # fSeed + repeat(itime), # itime + repeat(ftime), # ftime + repeat(skiptime) # skiptime + ) + ) + + print(f'Done reading all output. Concatenating the arrays') + comb_ds = xr.combine_by_coords(ds_) + + if saveOutput: + pass + print('Done concatenating. Saving zarr file.') + comb_ds.to_zarr(outputzarr) + + print('Finished.') + + return comb_ds + + + + + + + + + +def readFFPlanes(caseobj, slicesToRead=['x','y','z'], verbose=False, saveOutput=True, iCondition=0, fCondition=-1, iCase=0, fCase=-1, iSeed=0, fSeed=-1, itime=0, ftime=-1, skiptime=1, outformat='zarr'): + ''' + Read and process FAST.Farm planes into xarrays. + + INPUTS + ====== + i, f: int + Initial and end index of to read + itime: int + Initial timestep to open and read + ftime: int + Final timestep to open and read + skiptime: int + Read at every skiptime timestep. Used when data is too fine and not needed. + + ''' + + if fCondition==-1: + fCondition = caseobj.nConditions + #else: + # fCondition += 1 # The user sets the last desired condition. This if for the np.arange. + if fCondition-iCondition <= 0: + raise ValueError (f'Final condition to read needs to be larger than initial.') + + if fCase==-1: + fCase = caseobj.nCases + #else: + #fCase += 1 # The user sets the last desired case. This if for the np.arange. + if fCase-iCase <= 0: + raise ValueError (f'Final case to read needs to be larger than initial.') + + if fSeed==-1: + fSeed = caseobj.nSeeds + #else: + # fSeed += 1 # The user sets the last desired seed. This is for the np.arange + if fSeed-iSeed <= 0: + raise ValueError (f'Final seed to read needs to be larger than initial.') + + if skiptime<1: + raise ValueError (f'Skiptime should be 1 or greater. If 1, no slices will be skipped.') + + print(f'Requesting to save {slicesToRead} slices') + + #if nConditions is None: + # nConditions = caseobj.nConditions + #else: + # if nConditions > caseobj.nConditions: + # print(f'WARNING: Requested {nConditions} conditions, but only {caseobj.nConditions} are available. Reading {caseobj.nConditions} conditions') + # nConditions = caseobj.nConditions + + #if nCases is None: + # nCases = caseobj.nCases + #else: + # if nCases > caseobj.nCases: + # print(f'WARNING: Requested {nCases} cases, but only {caseobj.nCases} are available. Reading {caseobj.nCases} cases') + # nCases = caseobj.nCases + + #if nSeeds is None: + # nSeeds = caseobj.nSeeds + #else: + # if nSeeds > caseobj.nSeeds: + # print(f'WARNING: Requested {nSeeds} seeds, but only {caseobj.nSeeds} are available. Reading {caseobj.nSeeds} seeds') + # nSeeds = caseobj.nSeeds + + # Read all VTK output for each plane and save an nc files for each normal. Load files if present. + for slices in slicesToRead: + + storefile = f'ds_{slices}Slices_temp_cond{iCondition}_{fCondition}_case{iCase}_{fCase}_seed{iSeed}_{fSeed}' + if outformat == 'zarr': + outputfile = os.path.join(caseobj.path, f'{storefile}.zarr') + elif outformat == 'nc': + outputfile = os.path.join(caseobj.path, f'{storefile}.nc') + + if os.path.isdir(outputfile): # it's zarr + if len(slicesToRead) > 1: + print(f"!! WARNING: Asked for multiple slices. Returning only the first one, {slices}\n", + f" To load other slices, request `slicesToRead='y'`") + print(f'Processed output for slice {slices} found. Loading it.') + # Data already processed. Reading output + Slices = xr.open_zarr(outputfile) + return Slices + + elif os.path.isfile(outputfile): # it's nc + if len(slicesToRead) > 1: + print(f"!! WARNING: Asked for multiple slices. Returning only the first one, {slices}\n", + f" To load other slices, request `slicesToRead='y'`") + print(f'Processed output for slice {slices} found. Loading it.') + # Data already processed. Reading output + Slices = xr.open_dataset(outputfile) + return Slices + + + else: + + # This for-loop is due to memory allocation requirements + #print(f'Processing slices normal in the {slices} direction...') + Slices_cond = [] + for cond in np.arange(iCondition, fCondition, 1): + Slices_case = [] + for case in np.arange(iCase, fCase, 1): + Slices_seed = [] + for seed in np.arange(iSeed, fSeed, 1): + seedPath = os.path.join(caseobj.path, caseobj.condDirList[cond], caseobj.caseDirList[case], f'Seed_{seed}') + + # Read FAST.Farm input to determine outputs + ff_file = FASTInputFile(os.path.join(seedPath,f'{_get_fstf_filename(caseobj)}.fstf')) + + tmax = ff_file['TMax'] + NOutDisWindXY = ff_file['NOutDisWindXY'] + OutDisWindZ = ff_file['OutDisWindZ'] + NOutDisWindYZ = ff_file['NOutDisWindYZ'] + OutDisWindX = ff_file['OutDisWindX'] + NOutDisWindXZ = ff_file['NOutDisWindXZ'] + WrDisDT = ff_file['WrDisDT'] + + # Determine number of output VTKs + nOutputTimes = int(np.floor(tmax/WrDisDT)) + + # Determine number of output digits for reading + ndigitsplane = len(str(max(NOutDisWindXY,NOutDisWindXZ,NOutDisWindYZ))) + ndigitstime = len(str(nOutputTimes)) + 1 # this +1 is experimental. I had 1800 planes and got 5 digits. + # If this breaks again and I need to come here to fix, I need to ask Andy how the amount of digits is determined. + + + # Determine how many snapshots to read depending on input + if ftime==-1: + ftime=nOutputTimes + elif ftime>nOutputTimes: + raise ValueError (f'Final time step requested ({ftime}) is greater than the total available ({nOutputTimes})') + + + # Print info + print(f'Processing {slices} slice: Condition {cond}, Case {case}, Seed {seed}, snapshot {itime} to {ftime} ({nOutputTimes} available)') + + if slices == 'z': + # Read Low-res z-planes + Slices=[] + for zplane in range(NOutDisWindXY): + Slices_t=[] + #for t in range(nOutputTimes): + for t in np.arange(itime,ftime,skiptime): + file = f'{_get_fstf_filename(caseobj)}.Low.DisXY{zplane+1:0{ndigitsplane}d}.{t:0{ndigitstime}d}.vtk' + if verbose: print(f'Reading z plane {zplane} for time step {t}: \t {file}') + + vtk = VTKFile(os.path.join(seedPath, 'vtk_ff', file)) + ds = readAndCreateDataset(vtk, caseobj, cond=cond, case=case, seed=seed, t=t, WrDisDT=WrDisDT) + + Slices_t.append(ds) + Slices_t = xr.concat(Slices_t,dim='time') + Slices.append(Slices_t) + Slices = xr.concat(Slices,dim='z') + + elif slices == 'y': + # Read Low-res y-planes + Slices=[] + for yplane in range(NOutDisWindXZ): + Slices_t=[] + #for t in range(nOutputTimes): + for t in np.arange(itime,ftime,skiptime): + file = f'{_get_fstf_filename(caseobj)}.Low.DisXZ{yplane+1:0{ndigitsplane}d}.{t:0{ndigitstime}d}.vtk' + if verbose: print(f'Reading y plane {yplane} for time step {t}: \t {file}') + + vtk = VTKFile(os.path.join(seedPath, 'vtk_ff', file)) + ds = readAndCreateDataset(vtk, caseobj, cond=cond, case=case, seed=seed, t=t, WrDisDT=WrDisDT) + + Slices_t.append(ds) + Slices_t = xr.concat(Slices_t,dim='time') + Slices.append(Slices_t) + Slices = xr.concat(Slices,dim='y') + + elif slices == 'x': + # Read Low-res x-planes + Slices=[] + for xplane in range(NOutDisWindYZ): + Slices_t=[] + print(f'Processing {slices} slice: Condition {cond}, Case {case}, Seed {seed}, x plane {xplane}') + #for t in range(nOutputTimes): + for t in np.arange(itime,ftime,skiptime): + file = f'{_get_fstf_filename(caseobj)}.Low.DisYZ{xplane+1:0{ndigitsplane}d}.{t:0{ndigitstime}d}.vtk' + if verbose: print(f'Reading x plane {xplane} for time step {t}: \t {file}') + + vtk = VTKFile(os.path.join(seedPath, 'vtk_ff', file)) + ds = readAndCreateDataset(vtk, caseobj, cond=cond, case=case, seed=seed, t=t, WrDisDT=WrDisDT) + + Slices_t.append(ds) + Slices_t = xr.concat(Slices_t,dim='time') + Slices.append(Slices_t) + Slices = xr.concat(Slices,dim='x') + + else: + raise ValueError(f'Only slices x, y, z are available. Slice {slices} was requested. Stopping.') + + Slices_seed.append(Slices) + Slices_seed = xr.concat(Slices_seed, dim='seed') + Slices_case.append(Slices_seed) + Slices_case = xr.concat(Slices_case, dim='case') + Slices_cond.append(Slices_case) + + Slices = xr.concat(Slices_cond, dim='cond') + + if saveOutput: + print(f'Saving {slices} slice file {outputfile}...') + if outformat == 'zarr': + Slices.to_zarr(outputfile) + elif outformat == 'nc': + Slices.to_netcdf(outputfile) + + if len(slicesToRead) == 1: + # Single slice was requested + print(f'Since single slice was requested, returning it.') + return Slices + + +def readAndCreateDataset(vtk, caseobj, cond=None, case=None, seed=None, t=None, WrDisDT=None): + + # Get info from VTK + x = vtk.xp_grid + y = vtk.yp_grid + z = vtk.zp_grid + u = vtk.point_data_grid['Velocity'][:,:,:,0] + v = vtk.point_data_grid['Velocity'][:,:,:,1] + w = vtk.point_data_grid['Velocity'][:,:,:,2] + + if t is None and WrDisDT is None: + t=1 + WrDisDT = 1 + + ds = xr.Dataset({ + 'u': (['x', 'y', 'z'], u), + 'v': (['x', 'y', 'z'], v), + 'w': (['x', 'y', 'z'], w), }, + coords={ + 'x': (['x'], x), + 'y': (['y'], y), + 'z': (['z'], z), + 'time': [t*WrDisDT] }, + ) + + if cond is not None: ds = ds.expand_dims('cond').assign_coords({'cond': [caseobj.condDirList[cond]]}) + if case is not None: ds = ds.expand_dims('case').assign_coords({'case': [caseobj.caseDirList[case]]}) + if seed is not None: ds = ds.expand_dims('seed').assign_coords({'seed': [seed]}) + + return ds + + +def readVTK_structuredPoints (vtkpath): + ''' + Function to read the VTK written by utilities/postprocess_amr_boxes2vtk.py + Input + ----- + vtkpath: str + Full path of the vtk, including its extension + ''' + + import vtk + + reader = vtk.vtkStructuredPointsReader() + reader.SetFileName(vtkpath) + reader.Update() + + output = reader.GetOutput() + + dims = output.GetDimensions() + spacing = output.GetSpacing() + origin = output.GetOrigin() + nx, ny, nz = dims + + data_type = output.GetScalarTypeAsString() + point_data = output.GetPointData() + vector_array = point_data.GetArray(0) + num_components = vector_array.GetNumberOfComponents() + + + # Convert vector array to a NumPy array + vector_data = np.zeros((nx, ny, nz, num_components), dtype=np.float32) + for i in range(nx): + for j in range(ny): + for k in range(nz): + index = i + nx * (j + ny * k) + vector = vector_array.GetTuple(index) + vector_data[i, j, k, :] = vector + + # Create coordinates along x, y, and z dimensions + x_coords = origin[0] + spacing[0] * np.arange(nx) + y_coords = origin[1] + spacing[1] * np.arange(ny) + z_coords = origin[2] + spacing[2] * np.arange(nz) + + # Create the xarray dataset + ds = xr.Dataset(data_vars = { + 'u': (['x', 'y', 'z'], vector_data[:,:,:,0]), + 'v': (['x', 'y', 'z'], vector_data[:,:,:,1]), + 'w': (['x', 'y', 'z'], vector_data[:,:,:,2]), + }, + coords = { + 'x': x_coords, + 'y': y_coords, + 'z': z_coords, + } + ) + + return ds + + +def compute_load_rose(turbs, nSectors=18): + + channel_pairs = [['TwrBsMxt_[kNm]', 'TwrBsMyt_[kNm]'], + ['RootMxc1_[kNm]', 'RootMyc1_[kNm]'], + ['LSSGagMya_[kNm]','LSSGagMza_[kNm]']] + channel_out = ['TwrBsMt_[kNm]', 'RootMc1_[kNm]', 'LSSGagMa_[kNm]'] + + if nSectors%2 != 0: + print(f'WARNING: it is recommended an even number of sectors') + + # Create the sector bins + theta_bin = np.linspace(0,180, nSectors+1) + + # Bin the loads for each pair + for p, curr_pair in enumerate(channel_pairs): + print(f'Processing pair {curr_pair[0]}, {curr_pair[1]}.') + + load_0deg = turbs[curr_pair[0]] + load_90deg = turbs[curr_pair[1]] + all_load = [] + for i, theta in enumerate(theta_bin[:-1]): + curr_theta = (theta_bin[i] + theta_bin[i+1])/2 + curr_load = load_0deg*cosd(curr_theta) + load_90deg*sind(curr_theta) + all_load.append(curr_load.expand_dims('theta').assign_coords({'theta': [curr_theta]})) + + all_load = xr.concat(all_load, dim='theta').to_dataset(name=channel_out[p]) + + turbs = xr.merge([turbs,all_load]) + + return turbs + + + +def compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=0.0, rainflow_bins=100, return_damage=False, goodman_correction=False): + """ + Function from pCrunch. + + Computes damage equivalent load of input `ts`. + + Parameters + ---------- + ts : np.array + Time series to calculate DEL for. + elapsed : int | float + Elapsed time of the time series. + lifetime : int | float + Design lifetime of the component / material in years + load2stress : float (optional) + Linear scaling coefficient to convert an applied load to stress such that S = load2stress * L + slope : int | float + Slope of the fatigue curve. + Sult : float (optional) + Ultimate stress for use in Goodman equivalent stress calculation + Sc : float (optional) + Stress-axis intercept of log-log S-N Wohler curve. Taken as ultimate stress unless specified + rainflow_bins : int + Number of bins used in rainflow analysis. + Default: 100 + return_damage: boolean + Whether to compute both DEL and damage + Default: False + goodman_correction: boolean + Whether to apply Goodman mean correction to loads and stress + Default: False + + """ + + import fatpack + + Scin = Sc if Sc > 0.0 else Sult + + try: + F, Fmean = fatpack.find_rainflow_ranges(ts, return_means=True) + fatpack_rainflow_successful = 1 + except: + print(f'Fatpack call for find_rainflow_ranges did not work. Setting F=Fmean=0') + fatpack_rainflow_successful = 0 + F = Fmean = np.zeros(1) + + if goodman_correction and np.abs(load2stress) > 0.0: + F = fatpack.find_goodman_equivalent_stress(F, Fmean, Sult/np.abs(load2stress)) + + Nrf, Frf = fatpack.find_range_count(F, rainflow_bins) + DELs = Frf ** slope * Nrf / elapsed + DEL = DELs.sum() ** (1.0 / slope) + # With fatpack do: + #curve = fatpack.LinearEnduranceCurve(1.) + #curve.m = slope + #curve.Nc = elapsed + #DEL = curve.find_miner_sum(np.c_[Frf, Nrf]) ** (1 / slope) + + # Compute Palmgren/Miner damage using stress + if not return_damage: + return DEL + + D = np.nan # default return value + if return_damage and np.abs(load2stress) > 0.0: + try: + S, Mrf = fatpack.find_rainflow_ranges(ts*load2stress, return_means=True) + except: + S = Mrf = np.zeros(1) + if goodman_correction: + S = fatpack.find_goodman_equivalent_stress(S, Mrf, Sult) + Nrf, Srf = fatpack.find_range_count(S, rainflow_bins) + curve = fatpack.LinearEnduranceCurve(Scin) + curve.m = slope + curve.Nc = 1 + D = curve.find_miner_sum(np.c_[Srf, Nrf]) + if lifetime > 0.0: + D *= lifetime*365.0*24.0*60.0*60.0 / elapsed + + return DEL, D, fatpack_rainflow_successful + + + + + + +def calcDEL_theta (ds, var): + + # Set constants + lifetime = 25 # Design lifetime of the component / material in years + #load2stress = 1 # Linear scaling coefficient to convert an applied load to stress such that S = load2stress * L + #slope = 10 # Wohler exponent in the traditional SN-curve of S = A * N ^ -(1/m) (rthedin: 4 for tower, 10 for blades) + #Sult=6e8 # Ultimate stress for use in Goodman equivalent stress calculation + Sc = 0 # Stress-axis intercept of log-log S-N Wohler curve. Taken as ultimate stress unless specified + rainflow_bins = 100 + + # rotorse.rs.strains.axial_root_sparU_load2stress,m**2,[ 0. 0. -0.06122281 -0.02535384 0.04190673 0. ],Linear conversion factors between loads [Fx-z; Mx-z] and axial stress in the upper spar cap at blade root + # rotorse.rs.strains.axial_root_sparL_load2stress,m**2,[ 0. 0. -0.06122281 0.02462415 -0.0423436 0. ],Linear conversion factors between loads [Fx-z; Mx-z] and axial stress in the lower spar cap at blade root + # drivese.lss_axial_load2stress,m**2,[1.0976203 0. 0. 0. 1.56430446 1.56430446], + # drivese.lss_shear_load2stress,m**2,[0. 1.77770979 1.77770979 0.78215223 0. 0. ], + # towerse.member.axial_load2stress,m**2,"[[0. 0. 0.80912515 0.32621673 0.32621673 0. ] + # towerse.member.shear_load2stress,m**2,"[[1.33022717 1.33022717 0. 0. 0. 0.16310837] + # From Garrett 2023-11-20 for the blade root: [[0. , 0. , 0.61747941, 0.49381254, 0.49381254, 0. ]] + # + # These are the values we are interested in (JJ pointed out the positions in the array, except for the blade bending) + # blade bending 0.49381254 # from cylinder/blade axial + # lss bending 1.56430446 # from lss axial + # tower bending 0.32621673 # from tower axial + # tower torsion 0.16310837 # from tower shear + + + # Ultimate stress values from https://github.com/IEAWindTask37/IEA-15-240-RWT/blob/master/WT_Ontology/IEA-15-240-RWT.yaml#L746 + if var == 'RootMc1_[kNm]': # blade root bending + slope = 10 + Sult = 1047e6 # compression. Getting the lowest of compression/tension + load2stress = 0.49381254 + elif var == 'TwrBsMt_[kNm]': # tower bending + slope = 4 + Sult = 450e6 + load2stress = 0.32621673 + elif var == 'LSSGagMa_[kNm]': # lss bending + slope = 4 + Sult = 814e6 + load2stress = 1.56430446 + else: + raise ValueError('Variable not recognized') + + # Initialize variable + full_del_withgoodman = np.zeros((len(ds.theta), len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_del_woutgoodman = np.zeros((len(ds.theta), len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_damage = np.zeros((len(ds.theta), len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_fatpack = np.zeros((len(ds.theta), len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + + # Loop through everything and compute DEL + for i, theta in enumerate(ds.theta): + for j, seed in enumerate(ds.seed): + for k, turb in enumerate(ds.turbine): + #print(f'Processing theta {i+1}/{len(ds.theta)}, seed {j+1}/{len(ds.seed)}, turb {k+1}/{len(ds.turbine)}, all {len(ds.wdir)} wdir, all {len(ds.yawCase)} yawCases. ', end='\r',flush=True) + for l, wdir in enumerate(ds.wdir): + print(f'Processing theta {i+1}/{len(ds.theta)}, seed {j+1}/{len(ds.seed)}, turb {k+1}/{len(ds.turbine)}, wdir {l+1}/{len(ds.wdir)}, all {len(ds.yawCase)} yawCases. ', end='\r', flush=True) + for m, yaw in enumerate(ds.yawCase): + ts = ds.sel(wdir=wdir, yawCase=yaw, seed=seed, turbine=turb, theta=theta).squeeze()[var]*1e3 # convert kNm to Nm + elapsed = (ts.time[-1]-ts.time[0]).values + + DEL_withgoodman, damage, fatpack_rainflow_successful = compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=Sc, rainflow_bins=rainflow_bins, return_damage=True, goodman_correction=True) + DEL_woutgoodman = compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=Sc, rainflow_bins=rainflow_bins, return_damage=False, goodman_correction=False) + + full_del_withgoodman[i,j,k,l,m] = DEL_withgoodman + full_del_woutgoodman[i,j,k,l,m] = DEL_woutgoodman + full_damage[i,j,k,l,m] = damage + full_fatpack[i,j,k,l,m] = fatpack_rainflow_successful + + + ds[f'DEL_withgoodman_[Nm]_{var}'] = (('theta', 'seed', 'turbine', 'wdir', 'yawCase'), full_del_withgoodman) + ds[f'DEL_woutgoodman_[Nm]_{var}'] = (('theta', 'seed', 'turbine', 'wdir', 'yawCase'), full_del_woutgoodman) + ds[f'damage_{var}'] = (('theta', 'seed', 'turbine', 'wdir', 'yawCase'), full_damage) + ds[f'fatpack_success_{var}'] = (('theta', 'seed', 'turbine', 'wdir', 'yawCase'), full_fatpack) + + return ds + + + + +def calcDEL_nontheta (ds, var): + + # Set constants + lifetime = 25 # Design lifetime of the component / material in years + #load2stress = 1 # Linear scaling coefficient to convert an applied load to stress such that S = load2stress * L + #slope = 10 # Wohler exponent in the traditional SN-curve of S = A * N ^ -(1/m) (rthedin: 4 for tower, 10 for blades) + #Sult=6e8 # Ultimate stress for use in Goodman equivalent stress calculation + Sc =0 # Stress-axis intercept of log-log S-N Wohler curve. Taken as ultimate stress unless specified + rainflow_bins = 100 + + # Ultimate stress values from https://github.com/IEAWindTask37/IEA-15-240-RWT/blob/master/WT_Ontology/IEA-15-240-RWT.yaml#L746 + if var =='RootMzc1_[kNm]': + raise NotImplementedError('Blade root torsional moment not implemented') + elif var == 'TwrBsMzt_[kNm]': # tower torsional + slope = 4 + Sult = 450e6 + load2stress = 0.16310837 + else: + raise ValueError('Variable not recognized') + + # Initialize variable + full_del_withgoodman = np.zeros((len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_del_woutgoodman = np.zeros((len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_damage = np.zeros((len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + full_fatpack = np.zeros((len(ds.seed), len(ds.turbine), len(ds.wdir), len(ds.yawCase))) + + # Loop through everything and compute DEL + for i, seed in enumerate(ds.seed): + for j, turb in enumerate(ds.turbine): + for k, wdir in enumerate(ds.wdir): + print(f'Processing seed {i+1}/{len(ds.seed)}, turb {j+1}/{len(ds.turbine)}, wdir {k+1}/{len(ds.wdir)}, all {len(ds.yawCase)} yawCases. ', end='\r', flush=True) + for l, yaw in enumerate(ds.yawCase): + ts = ds.sel(wdir=wdir, yawCase=yaw, seed=seed, turbine=turb).squeeze()[var]*1e3 # convert kNm to Nm + elapsed = (ts.time[-1]-ts.time[0]).values + + DEL_withgoodman, damage, fatpack_rainflow_successful = compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=Sc, rainflow_bins=rainflow_bins, return_damage=True, goodman_correction=True) + DEL_woutgoodman = compute_del(ts, elapsed, lifetime, load2stress, slope, Sult, Sc=Sc, rainflow_bins=rainflow_bins, return_damage=False, goodman_correction=False) + + full_del_withgoodman[i,j,k,l] = DEL_withgoodman + full_del_woutgoodman[i,j,k,l] = DEL_woutgoodman + full_damage[i,j,k,l] = damage + full_fatpack[i,j,k,l] = fatpack_rainflow_successful + + + ds[f'DEL_withgoodman_[Nm]_{var}'] = (('seed', 'turbine', 'wdir', 'yawCase'), full_del_withgoodman) + ds[f'DEL_woutgoodman_[Nm]_{var}'] = (('seed', 'turbine', 'wdir', 'yawCase'), full_del_woutgoodman) + ds[f'damage_{var}'] = (('seed', 'turbine', 'wdir', 'yawCase'), full_damage) + ds[f'fatpack_success_{var}'] = (('seed', 'turbine', 'wdir', 'yawCase'), full_fatpack) + + return ds + + + From b15811ac0fe55c380ce29de026949f2cf69a8440 Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Fri, 31 Jan 2025 15:55:16 -0700 Subject: [PATCH 7/9] FF: add legend option for viz --- openfast_toolbox/fastfarm/FASTFarmCaseCreation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py index 41a7a1e..e1ccc43 100644 --- a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py +++ b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py @@ -2268,7 +2268,7 @@ def FF_check_output(self): - def plot(self, figsize=(14,7), fontsize=13, saveFig=True, returnFig=False, figFormat='png', showTurbNumber=False): + def plot(self, figsize=(14,7), fontsize=13, saveFig=True, returnFig=False, figFormat='png', showTurbNumber=False, showLegend=True): import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=figsize) @@ -2337,7 +2337,8 @@ def plot(self, figsize=(14,7), fontsize=13, saveFig=True, returnFig=False, figFo # Remove duplicate entries from legend handles, labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(labels, handles)) - plt.legend(by_label.values(), by_label.keys(), loc='upper left', bbox_to_anchor=(1.02,1.015), fontsize=fontsize, ncols=int(self.nTurbines/25)) + if showLegend: + plt.legend(by_label.values(), by_label.keys(), loc='upper left', bbox_to_anchor=(1.02,1.015), fontsize=fontsize, ncols=int(self.nTurbines/25)) ax.set_xlabel("x [m]", fontsize=fontsize) ax.set_ylabel("y [m]", fontsize=fontsize) From 7ee3864263e5735dcc35b93476dfc0e9bdfcca0c Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Fri, 31 Jan 2025 15:56:39 -0700 Subject: [PATCH 8/9] FF: adds FF output filename as a variable --- .../fastfarm/FASTFarmCaseCreation.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py index e1ccc43..d944ae2 100644 --- a/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py +++ b/openfast_toolbox/fastfarm/FASTFarmCaseCreation.py @@ -1014,6 +1014,9 @@ def checkIfExists(f): self.FFfilepath = os.path.join(self.templatePath,FFfilename) checkIfExists(self.FFfilepath) self.FFfilename = FFfilename + + # Set output FAST.Farm filename for convenience + self.outputFFfilename = 'FF.fstf' self._open_template_files() @@ -1790,7 +1793,6 @@ def FF_setup(self, outlistFF=None, **kwargs): self.planes_yz = planes_yz[0:9] self.planes_xz = planes_xz[0:9] - if self.inflowStr == 'LES': self._FF_setup_LES(**kwargs) @@ -1875,7 +1877,7 @@ def _FF_setup_LES(self, seedsToKeep=1): # --------------- FAST.Farm ----------------- # templateFSTF = os.path.join(self.templatePath, self.FFfilename) - outputFSTF = os.path.join(seedPath, 'FFarm_mod.fstf') + outputFSTF = os.path.join(seedPath, self.outputFFfilename) # Write the file (mostly for turbine locations here writeFastFarm(outputFSTF, templateFSTF, xWT, yWT, zWT, FFTS=None, OutListT1=self.outlistFF, @@ -1885,7 +1887,7 @@ def _FF_setup_LES(self, seedsToKeep=1): ff_file = FASTInputFile(outputFSTF) # Open output file and change additional values manually or make sure we have the correct ones - ff_file['InflowFile'] = f'"unused"' + ff_file['InflowFile'] = f'"unused"' ff_file['Mod_AmbWind'] = self.Mod_AmbWind # LES ff_file['TMax'] = self.tmax @@ -1975,7 +1977,7 @@ def _FF_setup_TS(self): # --------------- FAST.Farm ----------------- # templateFSTF = os.path.join(self.templatePath, self.FFfilename) - outputFSTF = os.path.join(seedPath, 'FFarm_mod.fstf') + outputFSTF = os.path.join(seedPath, self.outputFFfilename) # Open TurbSim outputs for the Low box and one High box (they are all of the same size) lowbts = TurbSimFile(os.path.join(seedPath,'TurbSim', 'Low.bts')) @@ -2205,6 +2207,9 @@ def FF_slurm_prepare(self, slurmfilepath): # Write seed sed_command = f"""sed -i "s|^seed.*|seed={seed}|g" {fname}""" _ = subprocess.call(sed_command, cwd=self.path, shell=True) + # Wirte FAST.Farm filename + sed_command = f"""sed -i "s/FFarm_mod.fstf/FF.fstf/g" {fname}""" + _ = subprocess.call(sed_command, cwd=self.path, shell=True) @@ -2236,6 +2241,9 @@ def FF_slurm_submit(self, qos='normal', A=None, t=None, p=None, delay=4): _ = subprocess.call(sub_command, cwd=self.path, shell=True) time.sleep(delay) # Sometimes the same job gets submitted twice. This gets around it. +# ---------------------------------------------- +# HELPER FUNCTIONS +# --------------------------------------------- def FF_check_output(self): ''' From 435047a6963b3eb63ed00ff0715a5f537f4315eb Mon Sep 17 00:00:00 2001 From: Regis Thedin Date: Mon, 3 Feb 2025 17:16:28 -0700 Subject: [PATCH 9/9] FF: add amrwind boxes post-processing script --- .../postpro/postprocess_amr_boxes2vtk.py | 231 ++++++++++++++++++ 1 file changed, 231 insertions(+) create mode 100644 openfast_toolbox/fastfarm/postpro/postprocess_amr_boxes2vtk.py diff --git a/openfast_toolbox/fastfarm/postpro/postprocess_amr_boxes2vtk.py b/openfast_toolbox/fastfarm/postpro/postprocess_amr_boxes2vtk.py new file mode 100644 index 0000000..be3533f --- /dev/null +++ b/openfast_toolbox/fastfarm/postpro/postprocess_amr_boxes2vtk.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +#SBATCH --job-name=pp_vtk +#SBATCH --output amr2post_vtk.log.%j +#SBATCH --nodes=1 +#SBATCH --time=1:00:00 +#SBATCH --account= +#SBATCH --mem=160G +# #SBATCH --qos=high + +# ----------------------------------------------------------------------- # +# postprocess_boxes2vtk.py # +# # +# Post process AMR-Wind output boxes for FAST.Farm and save vtk files for # +# each group (each individual sampling box). This script should be called # +# once for each output netcdf file and each group. If group is not given, # +# it expects only one (e.g. low box); otherwise, the script will stop and # +# warn the user. The output vtk is saved in $case/processedData/. # +# # +# Usage: # +# postprocess_boxes2vtk.py -p -f [-g ] # +# # +# Example usage: # +# cd /full/path/to/amrwind/case # +# path=$(pwd -P) # +# cd post_processing # +# ls # see what sampling box files are available. # +# sbatch postprocess_boxes2vtk.py [-J ] -p $path # +# -f lowres40000.nc -g Low # +# sbatch postprocess_boxes2vtk.py [-J ] -p $path # +# -f highres40000.nc -g HighT1_inflow0deg # +# # +# Regis Thedin # +# Mar 13, 2023 # +# regis.thedin@nrel.gov # +# ----------------------------------------------------------------------- # + +import argparse +import numpy as np +import os, time +from itertools import repeat +import multiprocessing +from windtools.amrwind.post_processing import StructuredSampling + +def main(samplingboxfile, pppath, pptag, requestedgroup, outpath, dt, t0, itime, ftime, steptime, offsetz, vtkstartind, terrain, ncores): + + if ncores is None: + ncores = multiprocessing.cpu_count() + + # -------- CONFIGURE RUN + s = StructuredSampling(pppath) + + outpathgroup = os.path.join(outpath, group) + if not os.path.exists(outpathgroup): + # Due to potential race condition, adding exist_ok flag as well + os.makedirs(outpathgroup, exist_ok=True) + + # Redefine some variables as the user might call just this method + s.pptag = pptag + s.get_all_times_native() + if ftime == -1: + ftime = s.all_times[-1] + available_time_indexes = [n for n in s.all_times if itime <= n <= ftime] + + ## Split all the time steps in arrays of roughly the same size + chunks = np.array_split(available_time_indexes, ncores) + + # Get rid of the empty chunks (happens when the number of boxes is lower than 96) + chunks = [c for c in chunks if c.size > 0] + + # Now, get the beginning and end of each separate chunk + itime_list = [i[0] for i in chunks] + ftime_list = [i[-1]+1 for i in chunks] + print(f'itime_list is {itime_list}') + print(f'ftime_list is {ftime_list}') + + p = multiprocessing.Pool() + ds_ = p.starmap(s.to_vtk, zip(repeat(group), # group + repeat(outpathgroup), # outputPath + repeat(samplingboxfile),# file + repeat(pptag), # pptag + repeat(True), # verbose + repeat(offsetz), # offset in z + itime_list, # itime + ftime_list, # ftime + repeat(t0), # t0 + repeat(dt), # dt + repeat(vtkstartind), # vtkstartind + repeat(terrain) # terrain + ) + ) + print('Finished.') + + +if __name__ == '__main__': + + # ------------------------------------------------------------------------------ + # -------------------------------- PARSING INPUTS ------------------------------ + # ------------------------------------------------------------------------------ + + parser = argparse.ArgumentParser() + + parser.add_argument("--path", "-p", type=str, default=os.getcwd(), + help="case full path (default cwd)") + parser.add_argument("--ncfile", "-f", type=str, default=None, + help="netcdf sampling planes") + parser.add_argument("--dt", "-dt", default=None, + help="time step for naming the boxes output") + parser.add_argument("--initialtime", "-t0", default=None, + help="Time step related to first box output (optional)") + parser.add_argument("--group", "-g", type=str, default=None, + help="group within netcdf file to be read, if more than one is available") + parser.add_argument("--itime", "-itime", type=int, default=0, + help="sampling time step to start saving the data") + parser.add_argument("--ftime", "-ftime", type=int, default=-1, + help="sampling time step to end saving the data") + parser.add_argument("--steptime", "-step", type=int, default=1, + help="sampling time step increment to save the data") + parser.add_argument("--offsetz", "-offsetz", type=float, default=None, + help="Offset in the x direction, ensuring a point at hub height") + parser.add_argument("--vtkstartind", "-vtkstartind", default=0, + help="Index by which the names of the vtk files will be shifted") + parser.add_argument("--pptag", "-tag", type=str, default=None, + help="Post-processing tag (e.g. 'box_hr')") + parser.add_argument("--terrain", "-terrain", type=bool, default=False, + help="Whether or not to add NaN for inside-terrain domain") + parser.add_argument("--ncores", "-ncores", type=int, default=None, + help="Number of cores to be used. Default is machine's total") + + + args = parser.parse_args() + + # Parse inputs + path = args.path + ncfile = args.ncfile + dt = args.dt + t0 = args.initialtime + group = args.group + itime = args.itime + ftime = args.ftime + steptime = args.steptime + offsetz = args.offsetz + vtkstartind = args.vtkstartind + pptag = args.pptag + terrain = args.terrain + ncores = args.ncores + + # ------------------------------------------------------------------------------ + # --------------------------- DONE PARSING INPUTS ------------------------------ + # ------------------------------------------------------------------------------ + + if path == '.': + path = os.getcwd() + + # We assume the case path was given, but maybe it was $case/post* or $case/proc* + # Let's check for that and fix it + if os.path.basename(path) == 'post_processing' or os.path.basename(path) == 'processedData': + path = os.path.split(path)[0] + + pppath = os.path.join(path,'post_processing') + outpath = os.path.join(path,'processedData') + + # ------- PERFORM CHECKS + if not os.path.exists(path): + parser.error(f"Path {path} does not exist.") + + if not os.path.exists(pppath): + raise ValueError (f'Path {pppath} does not exist.') + + if not os.path.exists(outpath): + os.makedirs(outpath) + + if ncfile is not None: + if isinstance(ncfile,str): + if not ncfile.endswith('.nc'): + raise ValueError (f"Received single string as ncfiles, but it does not end with .nc. Received {ncfile}") + if not os.path.isfile(os.path.join(pppath,ncfile)): + raise ValueError(f"File {ncfile} does not exist.") + else: + raise ValueError(f"the ncfile should be a string. Received {ncfile}.") + + if isinstance(dt,str): + try: dt=float(dt) + except: pass + if dt == 'None': dt=None + if dt is not None and not isinstance(dt,(float,int)): + raise ValueError(f'dt should be a scalar. Received {dt}.') + + if isinstance(t0,str): + try: t0=float(t0) + except: pass + if t0 == 'None': t0=None + if t0 is not None and not isinstance(t0,(float,int)): + raise ValueError(f't0 should be a scalar. Received {t0}, type {type(t0)}.') + + if steptime < 1: + raise ValueError(f'The time step increment should be >= 1.') + + if itime < 0: + raise ValueError(f'The initial time step should be >= 0.') + + if ftime != -1: + if ftime < itime: + raise ValueError(f'The final time step should be larger than the'\ + f'initial. Received itime={itime} and ftime={ftime}.') + + if offsetz is None: + print(f'!!! WARNING: no offset in z has been given. Ensure you will have a point at hub height.') + offsetz=0 + + if isinstance(vtkstartind,str): + try: vtkstartind=int(vtkstartind) + except: pass + if vtkstartind == 'None': vtkstartind=None + if vtkstartind is not None and not isinstance(vtkstartind,int): + raise ValueError(f'vtkstartind should be either None or an integer. Received {vtkstartind}.') + + # ------------------------------------------------------------------------------ + # ---------------------------- DONE WITH CHECKS -------------------------------- + # ------------------------------------------------------------------------------ + + if ncfile: + print(f'Reading {path}/{ncfile}') + else: + print(f'Reading {pppath}/{group}*') + + print(f'Starting job at {time.ctime()}') + multiprocessing.freeze_support() + main(ncfile, pppath, pptag, group, outpath, dt, t0, itime, ftime, steptime, offsetz, vtkstartind, terrain, ncores) + print(f'Ending job at {time.ctime()}') +