From 054f7594c8a8642d83f642bc93d8a9693813ae0d Mon Sep 17 00:00:00 2001 From: John Lindsay Date: Sun, 23 Oct 2022 10:19:09 -0400 Subject: [PATCH] prep for v2.2 --- .DS_Store | Bin 18436 -> 18436 bytes Cargo.lock | 6 +- UserManual.txt | 2 +- build.py | 105 +- readme.txt | 15 +- settings.json | 6 + whitebox-common/src/structures/array2d.rs | 4 +- whitebox-plugins/.DS_Store | Bin 6148 -> 6148 bytes whitebox-plugins/Cargo.toml | 4 + whitebox-plugins/src/.DS_Store | Bin 14340 -> 14340 bytes .../conditioned_latin_hypercube.json | 98 ++ .../src/conditioned_latin_hypercube/main.rs | 942 ++++++++++++++++++ whitebox-tools-app/Cargo.toml | 2 +- whitebox-tools-app/src/main.rs | 4 +- .../direct_decorrelation_stretch.rs | 37 +- .../highpass_bilateral_filter.rs | 446 +++++++++ .../src/tools/image_analysis/line_thin.rs | 6 +- .../src/tools/image_analysis/mod.rs | 2 + whitebox-tools-app/src/tools/mod.rs | 2 + .../src/tools/terrain_analysis/aspect.rs | 22 +- .../terrain_analysis/gaussian_curvature.rs | 14 +- .../src/tools/terrain_analysis/hillshade.rs | 339 +++++-- .../terrain_analysis/maximal_curvature.rs | 17 +- .../tools/terrain_analysis/mean_curvature.rs | 21 +- .../terrain_analysis/minimal_curvature.rs | 14 +- .../multidirectional_hillshade.rs | 517 +++++++--- .../tools/terrain_analysis/plan_curvature.rs | 17 +- .../tools/terrain_analysis/prof_curvature.rs | 17 +- .../tools/terrain_analysis/relative_aspect.rs | 257 ++++- .../src/tools/terrain_analysis/slope.rs | 17 +- .../tools/terrain_analysis/tan_curvature.rs | 14 +- .../tools/terrain_analysis/total_curvature.rs | 2 +- whitebox-vector/src/shapefile/geometry.rs | 42 +- whitebox_tools.py | 113 ++- 34 files changed, 2619 insertions(+), 485 deletions(-) create mode 100644 settings.json create mode 100644 whitebox-plugins/src/conditioned_latin_hypercube/conditioned_latin_hypercube.json create mode 100644 whitebox-plugins/src/conditioned_latin_hypercube/main.rs create mode 100644 whitebox-tools-app/src/tools/image_analysis/highpass_bilateral_filter.rs diff --git a/.DS_Store b/.DS_Store index 6381cc30dbed3dc63f57f8dacc6b08d8effffbc4..b361c336cae3c24b212deca0705892362740b0b3 100755 GIT binary patch delta 941 zcmbW0T}YEr7{||l&iTF_%~z-I=BAm=rs0aM=BzGc^nukZD`$Gshf51-Z0exM5z{RD z4)eJvDT9il?}(Q^iMojBCi;|;G75q&ioVMmZ#r#*=%Ow<7w7Q*A0Bvq&-tG%z1Y%= z-WtrmxOkVrn8NDWEaqQd+a3yrLu(mhpifiZ>FT^xW6rnOyiQ?>B{PW?h!b(eB(7yi zs$R3FpG(DMO0%{}YBcI}jCVV!%bJhvbl6&m9kfI(rVH6Y8k&$R^9+l>&cCd_f!b_F zTBh%zNo6^Dp2Jlt@|fv?%?aLD?5yD>mPxYGDET8T9nFK8N>(;^@F}tqk^SphLQQeU z;^NaJrAe*}HU^r53CqK$ONvjfU*FswX$glCW)7!V@yR|zMeGi{&z`bj_LhBNU)c{f z!bYJ-1~Q=_3l>;WfN>~;8|Cm|7JR72BGkZ-Gm)3a{}V-|>T5CmT~Lbyf6cqK*8j1IMUK%@IQ>teRIf ze}OMKl+Se0~IK}N_a6Fb1+u`EyhyR#-UaUqySbUh>*Z)6JSwnLJXbg!cOeQo;X}T z4&o37Z~`ZB3a6pr60YDXuHiavs$pNJN#o-O zGPGL-)jA2-aQ c%&4V#NAyanAx)CU@brxQs(Ipp|D%(hpI6H6`Tzg` delta 386 zcmZpfz}PZ@ae_ai`^JE7Hc2*y5{5*EB8GH^RECnW;G(>o{JeAq1_s8-pV^)=wQx+{ zXC^aQU6q#yBnHGlz`?y)kVA}Rvl`D@*2$@wrR*kVIts>yMw1_E+D{IaRGhp{C}(n! zmMA9=uT)5ASU6AQWIMH$o98PRF*4hKV1SyqlQoDboGD^*hpyt}KuP7vnPS|NRaroq z 1: + if "t" in sys.argv[1].lower(): + print("Cleaning old files...") + result = subprocess.run(['cargo', 'clean'], stdout=subprocess.PIPE) + if len(result.stdout) > 0: + print(result.stdout) -# result = subprocess.run(["pwd"], stdout=subprocess.PIPE) -# print("pwd: ", result.stdout) + if os.path.exists(output_dir): + rmtree(output_dir) -# result = subprocess.run(["codesign -h"], stdout=subprocess.PIPE) -# print("", result.stdout) +print("Compiling...") +result = subprocess.run(['cargo', 'build', '--release'], stdout=subprocess.PIPE) +if len(result.stdout) > 0: + print(result.stdout) -# args = [] -# args.append("./WBT/whitebox_tools") -# args.append("-run='recreate_pass_lines'") -# args.append("--inputFile='/Users/johnlindsay/Documents/data/yield/Woodrill_UTM.shp' --yieldFieldName='Yld_Mass_D' --outputFile='/Users/johnlindsay/Documents/data/yield/pass_lines.shp' --outputPointsFile='/Users/johnlindsay/Documents/data/yield/points.shp' --maxChangeInHeading=25.0") -# result = subprocess.run(args, stdout=subprocess.PIPE) -# print(result.stdout) +if not os.path.exists(output_plugin_dir): + os.makedirs(output_plugin_dir) + + +ext = '' +if platform.system() == 'Windows': + ext = '.exe' + +# Copy the whitebox executable over +exe_file = os.path.join(target_dir, 'whitebox_tools') + ext +dst = os.path.join(output_dir, 'whitebox_tools') + ext +copyfile(exe_file, dst) +os.system("chmod 755 " + dst) # grant executable permission + +# Copy the ancillary files +src = os.path.join(app_dir, 'LICENSE.txt') +dst = os.path.join(output_dir, 'LICENSE.txt') +copyfile(src, dst) + +src = os.path.join(app_dir, 'readme.txt') +dst = os.path.join(output_dir, 'readme.txt') +copyfile(src, dst) + +src = os.path.join(app_dir, 'settings.json') +dst = os.path.join(output_dir, 'settings.json') +copyfile(src, dst) + +src = os.path.join(app_dir, 'UserManual.txt') +dst = os.path.join(output_dir, 'UserManual.txt') +copyfile(src, dst) + +src = os.path.join(app_dir, 'wb_runner.py') +dst = os.path.join(output_dir, 'wb_runner.py') +copyfile(src, dst) +os.system("chmod 755 " + dst) # grant executable permission + +src = os.path.join(app_dir, 'whitebox_tools.py') +dst = os.path.join(output_dir, 'whitebox_tools.py') +copyfile(src, dst) +os.system("chmod 755 " + dst) # grant executable permission + +src = os.path.join(app_dir, 'img') +dst = os.path.join(output_dir, 'img') +copytree(src, dst) + +plugins = os.listdir(plugins_dir) +for plugin in plugins: + if ".DS" not in plugin: + print(f'Copying plugin: {plugin}') + + # Copy the json file into the plugins directory + json_file = os.path.join(plugins_dir, plugin, plugin) + '.json' + dst = os.path.join(output_plugin_dir, plugin) + '.json' + copyfile(json_file, dst) + + # Copy the executable file into the plugins directory + exe_file = os.path.join(target_dir, plugin) + ext + dst = os.path.join(output_plugin_dir, plugin) + ext + copyfile(exe_file, dst) + os.system("chmod 755 " + dst) # grant executable permission print("Done!") \ No newline at end of file diff --git a/readme.txt b/readme.txt index 5c62373a3..fd8444341 100755 --- a/readme.txt +++ b/readme.txt @@ -55,11 +55,15 @@ for more details. ****************** * Release Notes: * ****************** -Version 2.X.X (XX-XX-20XX) + +Version 2.2.0 (23-10-2022) - Added the TravellingSalesmanProblem tool for identifying short routes connecting multiple locations. - Added the HeatMap tool for performing kernel density estimation (KDE) from vector points. - Added the MultiplyOverlay tool. - Added the MaxUpslopeValue tool. +- Added the ConditionedLatinHypercube tool for stratified random sampling (credit Dr. Dan Newman). +- Added the HighPassBilateralFilter tool, useful for emphasizing image texture. +- Fixed a bug with the DirectDecorrelationStretch tool. - Fixed a bug in the automatic install of the Whitebox extensions that affected Windows users. - Fixed a bug with the persistence of the compress_rasters parameter. Python users were unable to turn off the compress flag previously. @@ -79,7 +83,14 @@ Version 2.X.X (XX-XX-20XX) - Fixed a bug with the ConstructVectorTIN tool that resulted in an error when no field data are used. - Modified the code for writing to the settings.json file so that rather than issuing an error when the app doesn't have write permission, it simply prints a warning and carries on. -- Fixed bugs in the Geomorphons tool (submitted by Dan Newman). +- Fixed bugs in the Geomorphons tool (submitted by Dr. Dan Newman). +- Fixed a bug with the writing of PolyLineZ vectors. +- Updated the Hillshade, MultidirectionalHillshade, and RelativeAspect tools to use the more robust + 5x5 3rd order bivariate polynomial method of Florinsky (2016) for rectangular grid DEMs, and the + 3x3 method, also described by Florinsky (2016), for DEMs in geographic coordinates. This is a large + improvement in accuracy for calculating these surface morphology parameters on geographic coordinates + compared with the 'z-conversion fudge factor' method used previously. +- Added support for Apple Silicon; you can now download WhiteboxTools binaries compiled on an M1 Mac. Version 2.1.0 (30-01-2022) - The Geomorphons tool for landform classification is now available. diff --git a/settings.json b/settings.json new file mode 100644 index 000000000..0ae41f83b --- /dev/null +++ b/settings.json @@ -0,0 +1,6 @@ +{ + "verbose_mode": true, + "working_directory": "/Users/johnlindsay/Downloads/", + "compress_rasters": true, + "max_procs": -1 +} \ No newline at end of file diff --git a/whitebox-common/src/structures/array2d.rs b/whitebox-common/src/structures/array2d.rs index eafd6ce86..d3e350b7f 100755 --- a/whitebox-common/src/structures/array2d.rs +++ b/whitebox-common/src/structures/array2d.rs @@ -1,10 +1,12 @@ ///////////////////////////////////////////// // A generic 2-dimensional array structure // ///////////////////////////////////////////// + use std::io::Error; use std::io::ErrorKind; use std::ops::{AddAssign, Index, IndexMut, SubAssign}; + /// A simple in-memory 2-D raster data structure that is not connected to a file. /// Pixel values can contain any data type or structure that implements the Copy, /// AddAssign, and SubAssign traits. @@ -211,7 +213,7 @@ where impl IndexMut<(isize, isize)> for Array2D where - T: Copy + AddAssign + SubAssign, + T: Copy + AddAssign + SubAssign + PartialEq, { fn index_mut<'a>(&'a mut self, index: (isize, isize)) -> &'a mut T { let row = index.0; diff --git a/whitebox-plugins/.DS_Store b/whitebox-plugins/.DS_Store index fa7a892569dbc2fa02fdf4d5fde5737635904294..5320be32191bad85bdf0b4f8f8b24c8e2cf3d245 100755 GIT binary patch delta 26 hcmZoMXffC@jfvgRTt~s!!gTUnCO<}o&2N|_L;-0^2U`FD delta 26 hcmZoMXffC@jfvgFOh>`k&}i~pCO<~T&2N|_L;+~_2UGw6 diff --git a/whitebox-plugins/Cargo.toml b/whitebox-plugins/Cargo.toml index 4e6f87ee1..0650597da 100755 --- a/whitebox-plugins/Cargo.toml +++ b/whitebox-plugins/Cargo.toml @@ -8,6 +8,10 @@ edition = "2021" name = "conditional_evaluation" path = "src/conditional_evaluation/main.rs" +[[bin]] +name = "conditioned_latin_hypercube" +path = "src/conditioned_latin_hypercube/main.rs" + [[bin]] name = "edge_contamination" path = "src/edge_contamination/main.rs" diff --git a/whitebox-plugins/src/.DS_Store b/whitebox-plugins/src/.DS_Store index 1a401c9cf40516aa83583f95ec217f858ef1af26..78a6751085a912cd7c214f0465323eb68252ad11 100755 GIT binary patch delta 564 zcmZoEXepTB<8bV7EM@)?`6NgUL1u+>F~MD{2>Vid9z|=qQ*Nm`$FmEzQ_5xn5YF zv2(MaVgsYOG($2&K0_Wu3PUDC2@r$WsX$&lLk>eCSR@a~&S0oyC;*BV0aceWBr&8; z{w>kSX0D@PY+(vAbMjnCm3lIb%1Jj2PR`FQU_bz}OsZ!{&CPdlNy^Dj0-D58WV2HF zV#yIlAe$`XK&GXTVp=_cAS=kAjz1Yv%YuvYa`N-ifnH+-Vj*yZrBAj}k(*pD)<1c^ ks^{iDWgm7%hRwIN)-i6pB*(a!-QXL`WP24!Bn4tj03ch2SO5S3 delta 82 zcmV-Y0ImOoaD;G>PXSP|P`e)iB$NLkFq1qW3zK^%&65HmEwe5mPyv&ZCMA=17LSwv oCrPvJB}@ka0kb = env::args().collect(); + + if args[1].trim() == "run" { + match run(&args) { + Ok(_) => {} + Err(e) => panic!("{:?}", e), + } + } + + if args.len() <= 1 || args[1].trim() == "help" { + // print help + help(); + } + + if args[1].trim() == "version" { + // print version information + version(); + } +} + +fn help() { + let mut ext = ""; + if cfg!(target_os = "windows") { + ext = ".exe"; + } + + let exe_name = &format!("clhs{}", ext); + let sep: String = path::MAIN_SEPARATOR.to_string(); + let s = r#" + ConditionedLatinHypercube Help + + The ConditionedLatinHypercube tool is used to identify sample sites base on stratified random sampling of + ancillary information. + + The following commands are recognized: + help Prints help information. + run Runs the tool. + version Prints the tool version information. + + The following flags can be used with the 'run' command: + -i, --input Name of the input raster file. + -o, --output Name of the output shapefile. + --samples Number of sites to select. + --iterations Maximum number of resampling iterations. + --seed Seed the random number generator for consisent results. + --prob Probability of resampling randomly vs. the worst strata [0,1]. + --threshhold Objective function threshold below which reamping is stopped. + --temp Initial annealing temperature between [0,1]. + --temp_decay Annealing temperature decay rate between [0,1]. + --cycle Annealing cycle length in iterations. + --average Weight the continuous objective funtion by the 1/N contributing strata. + + Input/output file names can be fully qualified, or can rely on the working directory contained in + the WhiteboxTools settings.json file. + + Example Usage: + >> .*EXE_NAME run -i=Raster1.tif;Raster2.tif --output=sites.shp --samples=500 + "# + .replace("*", &sep) + .replace("EXE_NAME", exe_name); + println!("{}", s); +} + +fn version() { + const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); + println!( + "cLHS v{} by Dan Newman (c) 2022.", + VERSION.unwrap_or("Unknown version") + ); +} + +fn get_tool_name() -> String { + String::from("ConditionedLatinHypercube") // This should be camel case and is a reference to the tool name. +} + +fn run(args: &Vec) -> Result<(), std::io::Error> { + let tool_name = get_tool_name(); + + let sep: String = path::MAIN_SEPARATOR.to_string(); + + // Read in the environment variables and get the necessary values + let configurations = whitebox_common::configs::get_configs()?; + let mut working_directory = configurations.working_directory.clone(); + if !working_directory.is_empty() && !working_directory.ends_with(&sep) { + working_directory += &sep; + } + let verbose = configurations.verbose_mode; + + let mut input_files_str = String::new(); + let mut output_file = String::new(); + let mut num_samples = 500isize; + let mut max_iter = 25000isize; + let mut temp = 1f64; + let mut temp_decay = 0.05f64; + let mut anneal_cycle = 10isize; + let mut rs_prob = 0.5f64; + let mut o_thresh = f64::MIN; + let mut rng_seed = -1isize; + let mut norm_o1 = false; + let mut weights = [1f64, 1f64, 1f64]; // add weights agruments later + + if args.len() == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "Tool run with no parameters.", + )); + } + for i in 0..args.len() { + let mut arg = args[i].replace("\"", ""); + arg = arg.replace("\'", ""); + let cmd = arg.split("="); // in case an equals sign was used + let vec = cmd.collect::>(); + let mut keyval = false; + if vec.len() > 1 { + keyval = true; + } + + let flag_val = vec[0].to_lowercase().replace("--", "-"); + if flag_val == "-i" || flag_val == "-inputs" { + input_files_str = if keyval { + vec[1].to_string() + } else { + args[i + 1].to_string() + }; + } else if flag_val == "-o" || flag_val == "-output" { + if keyval { + output_file = vec[1].to_string(); + } else { + output_file = args[i + 1].to_string(); + } + } else if flag_val == "-samples" { + if keyval { + num_samples = vec[1].to_string().parse::().unwrap(); + } else { + num_samples = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-iterations" { + if keyval { + max_iter = vec[1].to_string().parse::().unwrap(); + } else { + max_iter = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-threshold" { + if keyval { + o_thresh = vec[1].to_string().parse::().unwrap(); + } else { + o_thresh = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-temp" { + if keyval { + temp = vec[1].to_string().parse::().unwrap(); + } else { + temp = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-temp_decay" { + if keyval { + temp_decay = vec[1].to_string().parse::().unwrap(); + } else { + temp_decay = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-cycle" { + if keyval { + anneal_cycle = vec[1].to_string().parse::().unwrap(); + } else { + anneal_cycle = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-prob" { + if keyval { + rs_prob = vec[1].to_string().parse::().unwrap(); + } else { + rs_prob = args[i + 1].to_string().parse::().unwrap(); + } + } else if flag_val == "-seed" { + if keyval { + rng_seed = vec[1].to_string().parse::().unwrap().abs(); + } else { + rng_seed = args[i + 1].to_string().parse::().unwrap().abs(); + } + } else if flag_val == "-average" { + if vec.len() == 1 || !vec[1].to_string().to_lowercase().contains("false") { + norm_o1 = true; + } + } + } + + if configurations.verbose_mode { + let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28); + // 28 = length of the 'Powered by' by statement. + println!("{}", "*".repeat(welcome_len)); + println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len())); + println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28)); + println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23)); + println!("{}", "*".repeat(welcome_len)); + } + + let mut progress: usize; + let mut old_progress: usize = 1; + + let num_samples = num_samples.abs() as usize; + if num_samples == 0 { + return Err(Error::new(ErrorKind::InvalidInput, + "Number of samples must be > 0.", )); + } + let anneal_cycle = anneal_cycle.abs() as usize; + if anneal_cycle == 0 { + return Err(Error::new(ErrorKind::InvalidInput, + "Anneal cycle length must be > 0.", )); + } + let max_iter = max_iter.abs() as usize; + if max_iter == 0 { + return Err(Error::new(ErrorKind::InvalidInput, + "Max iterations must be > 0.", )); + } + if temp < 0f64 || temp > 1f64 { + return Err(Error::new(ErrorKind::InvalidInput, + "Initial temperature must be between [0,1].", )); + } + if temp_decay < 0f64 || temp_decay > 1f64 { + return Err(Error::new(ErrorKind::InvalidInput, + "Temperature decay rate must be between [0,1].", )); + } + let temp_decay = 1f64 - temp_decay; + if rs_prob < 0f64 || rs_prob > 1f64 { + return Err(Error::new(ErrorKind::InvalidInput, + "Resample probability must be between [0,1].", )); + } + + let mut cmd = input_files_str.split(";"); + let mut input_files = cmd.collect::>(); + if input_files.len() == 1 { + cmd = input_files_str.split(","); + input_files = cmd.collect::>(); + } + + let wd = if working_directory.is_empty() { + // set the working directory to that of the first input file. + let p = path::Path::new(input_files[0].trim()); + // let wd = p.parent().unwrap().to_str().unwrap().to_owned(); + format!( + "{}{}", + p.parent().unwrap().to_str().unwrap().to_owned(), + sep + ) + } else { + working_directory.clone().to_owned() + }; + + if !output_file.contains(&sep) && !output_file.contains("/") { + output_file = format!("{}{}", wd, output_file); + } + + if !output_file.ends_with(".shp") { + output_file.push_str(".shp"); + } + + let num_files = input_files.len(); + + let seed: u64 = if rng_seed < 0 { // no seed specified, random seed + thread_rng().gen_range(u64::MIN, u64::MAX) + } else { rng_seed as u64 }; + let mut rng : StdRng = StdRng::seed_from_u64(seed); + + //let num_samp_input = num_samples; + if num_samples < (25*num_files) { + println!("Warning: Too few samples to calculate sample correlation matrices."); + println!("Increase number of samples to at least 25 per input for better results"); + //println!("Setting the sample size to {:?} (i.e., 25 * the number of inputs).", 25*num_files); + //println!("{:?} random samples will be drawn from the larger sample pool.", num_samp_input); + //num_samples = 25*num_files; + } + + let mut num_procs = num_cpus::get() as isize; + let configs = whitebox_common::configs::get_configs()?; + let max_procs = configs.max_procs; + if max_procs > 0 && max_procs < num_procs { + num_procs = max_procs; + } + + let start = Instant::now(); + + let mut strata = vec![0f64; num_samples]; + for i in 0..num_samples { + strata[i] = (i+1) as f64 / num_samples as f64; + } + + let mut totals = vec![0f64; num_files]; + let mut averages = vec![0f64; num_files]; + let mut num_cells = vec![0f64; num_files]; + let mut k_is_cont = vec![true; num_files]; // true for continuous, false for catagorical + + let num_bins = 25000isize; + let mut quantiles = vec![vec![]; num_files]; + + if verbose { + println!("Estimating strata and collecting initial samples..."); + } + // Precalculate stats for later and collect samples + let k_pool: Vec = (0..num_files).map(|_| rng.gen()).collect(); + let k_pool_sum = k_pool.iter().fold(0f64, |a,v| a+v); + let k_pool: Vec = k_pool.into_iter() + .map(|k| ((k / k_pool_sum) * max_iter as f64) + .ceil() as usize).collect(); + let reservoir_size = (num_files * num_samples) + + k_pool.iter().fold(0, |a,v| a+v); + let mut reservoir: Vec = Vec::with_capacity(reservoir_size); + let mut projection: String = String::new(); + // currently, each input is treated independently + // if all inputs are to have the same structure (i.e., row, col, x, y) + // then the HashSet can be used to store all unique Nodata indices for all inputs + //let mut nodata_indices = HashSet::new(); + let (mut rows, mut columns): (isize, isize); + + for k in 0..num_files { + let input = Raster::new(&input_files[k].trim(), "r")?; + let nodata = input.configs.nodata; + let minval = input.configs.minimum; + let maxval = input.configs.maximum; + //if k == 0 { + rows = input.configs.rows as isize; + columns = input.configs.columns as isize; + //} else if input.configs.rows as isize != rows + // || input.configs.columns != columns { + // return Err(Error::new(ErrorKind::InvalidInput, + // "All input images must share the same dimensions (rows and columns) and spatial extent.")); + //} + let valrange = (maxval - minval).ceil(); + let binsize = valrange / num_bins as f64; + let is_cont = input.configs.data_type.is_float(); + if input.configs.coordinate_ref_system_wkt.len() > projection.len(){ + projection = input.configs.coordinate_ref_system_wkt.clone(); + } + k_is_cont[k] = is_cont; + + // NOTE: parallelizing this step tends to be slower overall, and requires more memory + // serial version commented out below for now + let input = Arc::new(input); + let (tx, rx) = mpsc::channel(); + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z: f64; + let mut bin: isize; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut histo = vec![0f64; num_bins as usize]; + let mut data = vec![nodata; columns as usize]; + let (mut tot, mut count) = (0f64, 0f64); + for col in 0..columns { + z = input.get_value(row, col); + if z != nodata { + tot += z; + count += 1f64; + data[col as usize] = z; + if is_cont { + // is continuous data + bin = ((z - minval) / binsize).floor() as isize; + if bin >= num_bins { + bin = num_bins - 1; + } + histo[bin as usize] += 1f64; + } + } + } + tx.send((row, tot, count, histo, data)).unwrap(); + } + }); + } + + let mut histogram = vec![0f64; num_bins as usize]; + let mut class_histo = HashMap::new(); + // use valid_indices to hold index if not nodata, else nodata + let mut valid_indices = vec![nodata; (rows*columns) as usize]; + let (mut idx, mut val): (usize, f64); + for _ in 0..rows { + let (row, tot, count, histo, data) = rx.recv() + .expect("Error receiving data from thread."); + totals[k] += tot; + num_cells[k] += count; + for c in 0..columns { + val = data[c as usize]; + if val != nodata { + idx = rc2i(row, c, columns); + valid_indices[idx] = idx as f64; + // because data holds cell values, it can also be used to hash classes + if !is_cont { + // is catagorical data, where val is the class key + let counter = class_histo.entry(val as isize) + .or_insert(0); + *counter += 1; // increment hashmap value + } + } + } + if is_cont { + for b in 0..num_bins as usize { + histogram[b] += histo[b] + } + } + } + averages[k] = totals[k] / num_cells[k]; + // remove all the nodata "indices" so that only valid indices remain + let mut valid_indices: Vec = + valid_indices.into_iter() + .filter(|i| *i != nodata) + .map(|i| i as usize).collect(); + +/* + let mut z: f64; + let mut bin: isize; + let (mut total, mut count) = (0f64, 0f64); + let mut histogram = vec![0f64; num_bins as usize]; + let mut class_histo = HashMap::new(); + let mut valid_indices: Vec = Vec::with_capacity((rows * columns) as usize); + for row in 0..rows { // don't thread, it randomizes the order when pushing into valid_indices + for col in 0..columns { + z = input.get_value(row, col); + if z != nodata { + total += z; + count += 1f64; + valid_indices.push(rc2i(row, col, columns)); + if is_cont { + // is continuous data + bin = ((z - minval) / binsize).floor() as isize; + if bin >= num_bins { + bin = num_bins - 1; + } + histogram[bin as usize] += 1f64; + } else { + // is catagorical data, where z is the class key + let counter = class_histo.entry(z as isize) + .or_insert(0); + *counter += 1; // increment hashmap value + } + } //else { + //nodata_indices.insert(rc2i(row, col, columns)); + //} + } + } + totals[k] += total; + num_cells[k] += count; + averages[k] = total / count; +*/ + // if continuous, use histogram to estimate quantiles + if is_cont { + let mut cdf = vec![0f64; num_bins as usize]; + cdf[0] = histogram[0]; + for i in 1..num_bins as usize { + cdf[i] = cdf[i - 1] + histogram[i]; + } + for i in 0..num_bins as usize { + cdf[i] = cdf[i] / num_cells[k] as f64; + } + + // estimate quantile from cdf. faster than sorting? + quantiles[k] = vec![0f64; num_samples]; + let mut bin: usize; + for s in 0..num_samples { + bin = 0usize; + for b in 0..num_bins as usize { + if cdf[b] <= strata[s] { + bin = b; // exceeded stata bound. best estimate is previous bin. + } else { + break; + } + } + quantiles[k][s] = minval + (bin as f64 * binsize) + } + } else { + // catagorical data + // use quantiles[k] to hold unique classes for now + quantiles[k] = class_histo.clone().into_keys() + .map(|c| c as f64).collect::>(); + } + + // collect samples data from input k + let (mut zs, mut qs): (f64, usize); + let (mut rs, mut cs): (isize, isize); + valid_indices.shuffle(&mut rng); + let mut repeater = 0usize; + for _ in 0..(num_samples + k_pool[k]) { + // dont need to sample all, only enough for initial samples and all iterations + (rs, cs) = i2rc(valid_indices[repeater], columns); + zs = input.get_value(rs,cs); + if is_cont { // get strata index + qs = quantiles[k].iter().position(|q| zs <= *q).unwrap(); + //assert_eq!(zs <= quantiles[k][qs], true); + } else { // get class index + qs = quantiles[k].iter().position(|c| zs == *c).unwrap(); + //assert_eq!(zs, quantiles[k][qs]); + } + + reservoir.push( + Sample { + k:k, + x:input.get_x_from_column(cs), + y:input.get_y_from_row(rs), + q:qs, v:zs + } + ); + + repeater += 1; + if repeater == valid_indices.len() { // next iteration will panic + // not enough samples, draw from a new shuffled deck + valid_indices = valid_indices.clone(); + valid_indices.shuffle(&mut rng); + repeater = 0; + } + } + + if !is_cont { + // convert class values into histogram counts + // divide each count to get proportion Kappa of class j for input k + quantiles[k] = quantiles[k].iter() + .map(|j| *class_histo.get(&(*j as isize)).unwrap() as f64) + .map(|c| c / num_cells[k]).collect::>(); + //assert_eq!(quantiles[k].iter().fold(0f64, |a,v| a+v), 1f64); + } + + if verbose { + progress = (100.0_f64 * k as f64 / (num_files - 1) as f64) as usize; + if progress != old_progress { + println!( + "Extracting data on file {} of {}: {}%", + (k + 1), + num_files, + progress + ); + old_progress = progress; + } + } + } + // ramdomize reservoir sample order + reservoir.shuffle(&mut rng); + + if verbose { println!("Calculating the correlation matrix..."); } + let averages = Arc::new(averages); + let mut cormat = vec![vec![1f64; num_files]; num_files]; + + // Calculate corrlation matrix + let (tx, rx) = mpsc::channel(); + for a in 0..num_files { + let mut data1 = get_flattened_valid_data(Raster::new(&input_files[a], "r")?); + data1.shuffle(&mut rng); + let len1 = data1.len(); + let data1 = Arc::new(data1); + for b in (a + 1)..num_files { + let mut data2 = get_flattened_valid_data(Raster::new(&input_files[b], "r")?); + data2.shuffle(&mut rng); + let len2 = data2.len(); + let data2 = Arc::new(data2); + for tid in 0..num_procs { + let data1 = data1.clone(); + let data2 = data2.clone(); + let averages = averages.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z1: f64; + let mut z2: f64; + let mut d1_totaldev = 0f64; + let mut d2_totaldev = 0f64; + let mut sum_productdev = 0f64; + for i in (0..len1.min(len2) as isize).filter(|i| i % num_procs == tid) { + z1 = data1[i as usize]; + z2 = data2[i as usize]; + d1_totaldev += + (z1 - averages[a]) * (z1 - averages[a]); + d2_totaldev += + (z2 - averages[b]) * (z2 - averages[b]); + sum_productdev += + (z1 - averages[a]) * (z2 - averages[b]); + } + tx.send((d1_totaldev, d2_totaldev, sum_productdev)).unwrap(); + }); + } + let mut d1_total_deviation = 0f64; + let mut d2_total_deviation = 0f64; + let mut t_product_deviations = 0f64; + for _ in 0..num_procs { + let (val1, val2, val3) = + rx.recv().expect("Error receiving data from thread."); + d1_total_deviation += val1; + d2_total_deviation += val2; + t_product_deviations += val3; + } + cormat[a][b] = t_product_deviations + / (d1_total_deviation * d2_total_deviation).sqrt(); + cormat[b][a] = cormat[a][b]; + } + if verbose { + progress = (100.0_f64 * a as f64 / (num_files - 1) as f64) as usize; + if progress != old_progress { + println!( + "Calculating the correlation matrix ({} of {}): {}%", + (a + 1), + num_files, + progress + ); + old_progress = progress; + } + } + } + + // perform analysis + if verbose { println!("Staring resampling operation..."); } + + if norm_o1 { + // use the weighted average for obj_fn 1 so that it doesn't dominate the others. + weights[0] = 1f64 / (0..quantiles.len()) + .filter(|k| k_is_cont[*k]) + .fold(0, |a, v| a + quantiles[v].len()) as f64; + } + + let mut samples = reservoir.split_off(reservoir.len() - num_samples); + let mut old_samples = samples.clone(); + let mut worst_indices: Vec; + let (mut rand1, mut rand2, mut ridx): (f64, f64, usize); + let (mut o1, mut o2, mut o3): (f64, f64, f64); + let (mut obj, mut obj_old) = (f64::MAX, f64::MAX); + + for it in 0..max_iter { + // populate counts matrix from samples + let counts_matrix = get_eta(&samples, &quantiles); + // corrlation matrix of samples + let sample_cormat = sample_correlation_matrix(&samples, num_files); + // objective functions + o1 = obj_fn_continuous(&counts_matrix, &k_is_cont); + o2 = obj_fn_catagorical(&counts_matrix, &quantiles, &k_is_cont, samples.len()); + o3 = obj_fn_correlation(&cormat, &sample_cormat); + // calculate weighted objective function + obj = (weights[0]*o1) + (weights[1]*o2) + (weights[2]*o3); + if obj <= o_thresh { // exit with current samples if obj <= o_thresh + if verbose { + println!("Objective function has fallen below the threshold."); + } + break + } + + // determine worst strata + let (mut worst_k, mut worst_q, mut max) = (0usize, 0usize, 0usize); + for k in 0..num_files { + for q in 0..counts_matrix[k].len() { + if counts_matrix[k][q] >= max { + worst_k = k; + worst_q = q; + max = counts_matrix[k][q]; + } + } + } + + // generate uniform random numbers + rand1 = rng.gen(); + rand2 = rng.gen(); + + // anneal + let o_delta = obj - obj_old; + let metro: f64 = (-o_delta / temp).exp(); + if it % anneal_cycle == anneal_cycle-1 { temp *= temp_decay; } + + if o_delta < 0f64 || rand1 < metro { + // keep changes + old_samples = samples.clone(); + } else { + // discard changes, return samples to previous iteration + samples = old_samples.clone(); + } + + obj_old = obj; + + // draw new samples + if rand2 < rs_prob || max <= 1 { // random sample + // if max <= 1, there is no worst strata, do a random sample instead + ridx = rng.gen_range(0, samples.len()); + samples[ridx] = reservoir.pop().unwrap(); + } else { //swap random worst strata sample with fresh random sample + //pick random worst sample by index + worst_indices = (0..samples.len()) + .filter(|s| samples[*s].k == worst_k + && samples[*s].q == worst_q).collect(); + ridx = rng.gen_range(0, worst_indices.len()); + samples[ridx] = reservoir.pop().unwrap(); + } + + if verbose { + progress = (100.0_f64 * it as f64 / (max_iter - 1) as f64) as usize; + if progress != old_progress { + println!("Resampling progress: {}%.", progress); + old_progress = progress; + } + } + } + + // collect site information on final samples + if verbose { + println!("Final objective function value: {:.5}", obj); + } + + //if num_samp_input < num_samples { + // println!("Returning samples according specified sample number."); + // samples.shuffle(&mut rng); + // samples = samples.split_off(samples.len() - num_samp); + //} + + let elapsed_time = get_formatted_elapsed_time(start); + + if verbose { + println!("Saving data...") + }; + + let filenames: Vec<&str> = input_files.iter().map(|p| path::Path::new(p.trim()) + .file_name().unwrap().to_str().unwrap()).collect(); + let mut max_len = 0usize; + for f in 0..filenames.len() { + let l = filenames[f].len(); + max_len = max_len.max(l); + } + if max_len > 255 { + max_len = 255 + } + + // Output shapefile file + let mut output = Shapefile::new(&output_file, ShapeType::Point)?; + + // set the projection information + output.projection = projection; + + // add the attributes + output + .attributes + .add_field(&AttributeField::new("FID", FieldDataType::Int, 12u8, 0u8)); + output.attributes.add_field(&AttributeField::new( + "VALUE", + FieldDataType::Real, + 12u8, + 4u8, + )); + output.attributes.add_field(&AttributeField::new( + "SOURCE", + FieldDataType::Text, + max_len as u8, + 4u8, + )); + + for s in 0..samples.len() { + output.add_point_record(samples[s].x, samples[s].y); + output + .attributes + .add_record(vec![FieldData::Int((s+1) as i32), + FieldData::Real(samples[s].v), + FieldData::Text(filenames[samples[s].k].to_string())], false); + } + + let _ = match output.write() { + Ok(_) => { + if verbose { + println!("Output file written") + } + } + Err(e) => return Err(e), + }; + + if verbose { + println!( + "{}", + &format!("Elapsed Time (excluding I/O): {}", elapsed_time) + ); + } + + Ok(()) +} + +#[derive(Clone, Copy)] +struct Sample { + pub k: usize, + pub q: usize, + pub x: f64, + pub y:f64, + pub v: f64, +} + +fn sample_correlation_matrix(samples: &Vec, size: usize) -> Vec> { + let mut output = vec![vec![1f64; size]; size]; + for a in 0..size { + let a_samples: Vec<&Sample> = samples.iter().filter(|s| s.k == a).collect(); + if a_samples.len() == 0 { + for b in (a+1)..size { + output[a][b] = 0f64; + output[b][a] = 0f64; + } + continue + } + let a_mean = a_samples.iter().fold(0f64, |acc, s| acc + s.v) + / a_samples.len() as f64; + let a_tdev = a_samples.iter() + .fold(0f64, |acc, s| acc + ((s.v - a_mean) * (s.v - a_mean))); + for b in (a+1)..size { + let b_samples: Vec<&Sample> = samples.iter().filter(|s| s.k == b).collect(); + if b_samples.len() == 0 { + output[a][b] = 0f64; + output[b][a] = 0f64; + continue; + } + let b_mean = b_samples.iter().fold(0f64, |acc, s| acc + s.v) + / b_samples.len() as f64; + let b_tdev = b_samples.iter() + .fold(0f64, |acc, s| acc + ((s.v - b_mean) * (s.v - b_mean))); + let mut prod_tdev = 0f64; + for i in 0..min(a_samples.len(), b_samples.len()) { + prod_tdev += (a_samples[i].v - a_mean) * (b_samples[i].v - b_mean) + } + output[a][b] = prod_tdev / (a_tdev * b_tdev).sqrt(); + output[b][a] = output[a][b]; + } + } + output +} + +fn get_eta(samples: &Vec, quantiles: &Vec>) -> Vec> { + let mut counts_matrix: Vec> = quantiles.iter() + .map(|q| vec![0usize; q.len()]).collect(); // set a copy of quantiles to 0; + for s in 0..samples.len() { + counts_matrix[samples[s].k][samples[s].q] += 1; + } + counts_matrix +} + +fn obj_fn_continuous(counts: &Vec>, is_cont: &Vec) -> f64 { + let mut o1 = 0f64; + for k in (0..counts.len()).filter(|k| is_cont[*k]) { + for j in 0..counts[k].len() { + o1 += (counts[k][j] as isize - 1).abs() as f64 + } + } + o1 +} + +fn obj_fn_catagorical(counts: &Vec>, + kappa: &Vec>, + is_cont: &Vec, + n: usize) -> f64 +{ + let mut o2 = 0f64; + for k in (0..counts.len()).filter(|k| !is_cont[*k]) { + for j in 0..counts[k].len() { + o2 += ( (counts[k][j] as f64 / n as f64) - kappa[k][j] ).abs() + } + } + o2 +} + +fn obj_fn_correlation(c: &Vec>, t: &Vec>) -> f64 { + let mut o3 = 0f64; + for i in 0..c.len() { + for j in 0..c.len() { + o3 += (c[i][j] - t[i][j]).abs() + } + } + o3 +} + +fn rc2i(r:isize, c:isize, nc:isize) -> usize { + (r * nc + c) as usize +} + +fn i2rc(i:usize, nc:isize) -> (isize, isize) { + let r = i as isize / nc; + (r, i as isize - (r * nc)) +} + +fn get_flattened_valid_data(data:Raster) -> Vec { + // consumes Raster + //self.data.clone().into_iter() + // .filter(|x| *x != self.configs.nodata).collect::>() + let mut output: Vec = Vec::with_capacity( + data.configs.rows * data.configs.columns); + let mut z: f64; + for row in 0..data.configs.rows as isize { + for col in 0..data.configs.columns as isize { + z = data.get_value(row, col); + if z != data.configs.nodata { + output.push(z); + } + } + } + output.shrink_to(output.len()); + output +} diff --git a/whitebox-tools-app/Cargo.toml b/whitebox-tools-app/Cargo.toml index be2bf8828..7794fd334 100755 --- a/whitebox-tools-app/Cargo.toml +++ b/whitebox-tools-app/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "whitebox_tools" -version = "2.1.0" +version = "2.2.0" authors = ["John Lindsay "] edition = "2021" diff --git a/whitebox-tools-app/src/main.rs b/whitebox-tools-app/src/main.rs index 901dbff22..c20460acb 100755 --- a/whitebox-tools-app/src/main.rs +++ b/whitebox-tools-app/src/main.rs @@ -431,7 +431,7 @@ Example Usage: fn license() { let license_text = "WhiteboxTools License -Copyright 2017-2021 John Lindsay +Copyright 2017-2022 John Lindsay Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, @@ -453,7 +453,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."; fn version() { const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); println!( - "WhiteboxTools v{} by Dr. John B. Lindsay (c) 2017-2022 + "WhiteboxTools v{} by Dr. John B. Lindsay (c) 2017-2023 WhiteboxTools is an advanced geospatial data analysis platform developed at the University of Guelph's Geomorphometry and Hydrogeomatics Research diff --git a/whitebox-tools-app/src/tools/image_analysis/direct_decorrelation_stretch.rs b/whitebox-tools-app/src/tools/image_analysis/direct_decorrelation_stretch.rs index c0aefa4de..2c9fc4920 100755 --- a/whitebox-tools-app/src/tools/image_analysis/direct_decorrelation_stretch.rs +++ b/whitebox-tools-app/src/tools/image_analysis/direct_decorrelation_stretch.rs @@ -264,7 +264,7 @@ impl WhiteboxTool for DirectDecorrelationStretch { let rows = input.configs.rows as isize; let columns = input.configs.columns as isize; let nodata = input.configs.nodata; - let rgb_nodata = 0f64; + // let rgb_nodata = 0f64; let mut num_procs = num_cpus::get() as isize; let configs = whitebox_common::configs::get_configs()?; @@ -296,13 +296,7 @@ impl WhiteboxTool for DirectDecorrelationStretch { green = (z as u32 >> 8) & 0xFF; blue = (z as u32 >> 16) & 0xFF; - min_val = red; - if green < min_val { - min_val = green; - } - if blue < min_val { - min_val = blue; - } + min_val = red.min(green).min(blue); r_out = red as f64 - achromatic_factor * min_val as f64; g_out = green as f64 - achromatic_factor * min_val as f64; @@ -473,39 +467,24 @@ impl WhiteboxTool for DirectDecorrelationStretch { let mut z: f64; let (mut red, mut green, mut blue, mut a): (u32, u32, u32, u32); for row in (0..rows).filter(|row_val| row_val % num_procs == tid) { - let mut data = vec![rgb_nodata; columns as usize]; + let mut data = vec![nodata; columns as usize]; for col in 0..columns { z = input[(row, col)]; if z != nodata { red = red_band[(row, col)] as u32; - if red < stretch_min as u32 { - red = stretch_min as u32; - } - if red > stretch_max as u32 { - red = stretch_max as u32; - } + red = red.clamp(stretch_min as u32, stretch_max as u32); green = green_band[(row, col)] as u32; - if green < stretch_min as u32 { - green = stretch_min as u32; - } - if green > stretch_max as u32 { - green = stretch_max as u32; - } + green = green.clamp(stretch_min as u32, stretch_max as u32); blue = blue_band[(row, col)] as u32; - if blue < stretch_min as u32 { - blue = stretch_min as u32; - } - if blue > stretch_max as u32 { - blue = stretch_max as u32; - } + blue = blue.clamp(stretch_min as u32, stretch_max as u32); red = (((red as f64 - stretch_min) / stretch_range) * 255f64) as u32; green = (((green as f64 - stretch_min) / stretch_range) * 255f64) as u32; blue = (((blue as f64 - stretch_min) / stretch_range) * 255f64) as u32; - a = (z as u32 >> 24) & 0xFF; + a = 255; // (z as u32 >> 24) & 0xFF; data[col as usize] = ((a << 24) | (blue << 16) | (green << 8) | red) as f64; @@ -517,7 +496,7 @@ impl WhiteboxTool for DirectDecorrelationStretch { } let mut output = Raster::initialize_using_file(&output_file, &input); - output.configs.nodata = rgb_nodata; + output.configs.nodata = nodata; output.configs.photometric_interp = PhotometricInterpretation::RGB; output.configs.data_type = DataType::RGBA32; for row in 0..rows { diff --git a/whitebox-tools-app/src/tools/image_analysis/highpass_bilateral_filter.rs b/whitebox-tools-app/src/tools/image_analysis/highpass_bilateral_filter.rs new file mode 100644 index 000000000..f0a40b71e --- /dev/null +++ b/whitebox-tools-app/src/tools/image_analysis/highpass_bilateral_filter.rs @@ -0,0 +1,446 @@ +/* +This tool is part of the WhiteboxTools geospatial analysis library. +Authors: Dr. John Lindsay +Created: 27/06/2017 +Last Modified: 30/01/2020 +License: MIT +*/ + +use whitebox_raster::*; +use crate::tools::*; +use num_cpus; +use std::env; +use std::f64; +use std::f64::consts::PI; +use std::io::{Error, ErrorKind}; +use std::path; +use std::sync::mpsc; +use std::sync::Arc; +use std::thread; + +/// This tool can be used to perform a high-pass bilateral filter. A high-pass filter is one which emphasizes short-scale +/// variation within an image, usually by differencing the input image value from the result of a low-pass, smoothing filter. +/// In this case, the low-pass filter is an edge-preserving bilateral filter (`BilateralFilter`). High-pass filters are +/// often dominated by edges (transitions from high values to low values or vice versa) within an image. Because the +/// bilateral filter is an edge-preserving filter, the high-pass bilateral filter output is not dominated by edges, instead +/// emphasizing local-scale image texture patters. This filter is excellent for mapping image textures. +/// +/// The size of the filter is determined by setting the standard deviation distance parameter (`--sigma_dist`); the +/// larger the standard deviation the larger the resulting filter kernel. The standard deviation can be any number in +/// the range 0.5-20 and is specified in the unit of pixels. The standard deviation intensity parameter (`--sigma_int`), +/// specified in the same units as the image values, determines the intensity domain contribution to kernel weightings. +/// If the input image is an RGB composite, the intensity value is filtered, and the intensity parameter should +/// lie 0 > parameter < 1, with typical values ranging from 0.05 to 0.25. If the input image is not an RGB colour +/// composite, appropriate values of this parameter will depend on the range of input values and will likely be +/// considerably higher. +/// +/// # References +/// Tomasi, C., & Manduchi, R. (1998, January). Bilateral filtering for gray and color images. In null (p. 839). IEEE. +/// +/// # See Also +/// `BilateralFilter` +pub struct HighPassBilateralFilter { + name: String, + description: String, + toolbox: String, + parameters: Vec, + example_usage: String, +} + +impl HighPassBilateralFilter { + pub fn new() -> HighPassBilateralFilter { + // public constructor + let name = "HighPassBilateralFilter".to_string(); + let toolbox = "Image Processing Tools/Filters".to_string(); + let description = "Performs a high-pass bilateral filter, by differencing an input image by the bilateral filter by Tomasi and Manduchi (1998).".to_string(); + + let mut parameters = vec![]; + parameters.push(ToolParameter { + name: "Input File".to_owned(), + flags: vec!["-i".to_owned(), "--input".to_owned()], + description: "Input raster file.".to_owned(), + parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), + default_value: None, + optional: false, + }); + + parameters.push(ToolParameter { + name: "Output File".to_owned(), + flags: vec!["-o".to_owned(), "--output".to_owned()], + description: "Output raster file.".to_owned(), + parameter_type: ParameterType::NewFile(ParameterFileType::Raster), + default_value: None, + optional: false, + }); + + parameters.push(ToolParameter { + name: "Distance Standard Deviation (pixels)".to_owned(), + flags: vec!["--sigma_dist".to_owned()], + description: "Standard deviation in distance in pixels.".to_owned(), + parameter_type: ParameterType::Float, + default_value: Some("0.75".to_owned()), + optional: true, + }); + + parameters.push(ToolParameter { + name: "Intensity Standard Deviation (intensity units)".to_owned(), + flags: vec!["--sigma_int".to_owned()], + description: "Standard deviation in intensity in pixels.".to_owned(), + parameter_type: ParameterType::Float, + default_value: Some("1.0".to_owned()), + optional: true, + }); + + let sep: String = path::MAIN_SEPARATOR.to_string(); + let e = format!("{}", env::current_exe().unwrap().display()); + let mut parent = env::current_exe().unwrap(); + parent.pop(); + let p = format!("{}", parent.display()); + let mut short_exe = e + .replace(&p, "") + .replace(".exe", "") + .replace(".", "") + .replace(&sep, ""); + if e.contains(".exe") { + short_exe += ".exe"; + } + let usage = format!(">>.*{} -r={} -v --wd=\"*path*to*data*\" -i=image.tif -o=output.tif --sigma_dist=2.5 --sigma_int=4.0", short_exe, name).replace("*", &sep); + + HighPassBilateralFilter { + name: name, + description: description, + toolbox: toolbox, + parameters: parameters, + example_usage: usage, + } + } +} + +impl WhiteboxTool for HighPassBilateralFilter { + fn get_source_file(&self) -> String { + String::from(file!()) + } + + fn get_tool_name(&self) -> String { + self.name.clone() + } + + fn get_tool_description(&self) -> String { + self.description.clone() + } + + fn get_tool_parameters(&self) -> String { + match serde_json::to_string(&self.parameters) { + Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str), + Err(err) => return format!("{:?}", err), + } + } + + fn get_example_usage(&self) -> String { + self.example_usage.clone() + } + + fn get_toolbox(&self) -> String { + self.toolbox.clone() + } + + fn run<'a>( + &self, + args: Vec, + working_directory: &'a str, + verbose: bool, + ) -> Result<(), Error> { + let mut input_file = String::new(); + let mut output_file = String::new(); + let mut filter_size = 0usize; + let mut sigma_dist = 0.75; + let mut sigma_int = 1.0; + if args.len() == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "Tool run with no parameters.", + )); + } + for i in 0..args.len() { + let mut arg = args[i].replace("\"", ""); + arg = arg.replace("\'", ""); + let cmd = arg.split("="); // in case an equals sign was used + let vec = cmd.collect::>(); + let mut keyval = false; + if vec.len() > 1 { + keyval = true; + } + let flag_val = vec[0].to_lowercase().replace("--", "-"); + if flag_val == "-i" || flag_val == "-input" { + if keyval { + input_file = vec[1].to_string(); + } else { + input_file = args[i + 1].to_string(); + } + } else if flag_val == "-o" || flag_val == "-output" { + if keyval { + output_file = vec[1].to_string(); + } else { + output_file = args[i + 1].to_string(); + } + } else if flag_val == "-sigma_dist" { + if keyval { + sigma_dist = vec[1] + .to_string() + .parse::() + .expect(&format!("Error parsing {}", flag_val)); + } else { + sigma_dist = args[i + 1] + .to_string() + .parse::() + .expect(&format!("Error parsing {}", flag_val)); + } + } else if flag_val == "-sigma_int" { + if keyval { + sigma_int = vec[1] + .to_string() + .parse::() + .expect(&format!("Error parsing {}", flag_val)); + } else { + sigma_int = args[i + 1] + .to_string() + .parse::() + .expect(&format!("Error parsing {}", flag_val)); + } + } + } + + if verbose { + let tool_name = self.get_tool_name(); + let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28); + // 28 = length of the 'Powered by' by statement. + println!("{}", "*".repeat(welcome_len)); + println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len())); + println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28)); + println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23)); + println!("{}", "*".repeat(welcome_len)); + } + + let sep: String = path::MAIN_SEPARATOR.to_string(); + + if !input_file.contains(&sep) && !input_file.contains("/") { + input_file = format!("{}{}", working_directory, input_file); + } + if !output_file.contains(&sep) && !output_file.contains("/") { + output_file = format!("{}{}", working_directory, output_file); + } + + if sigma_dist < 0.5 { + sigma_dist = 0.5; + } else if sigma_dist > 20.0 { + sigma_dist = 20.0; + } + + if sigma_int < 0.001 { + sigma_int = 0.001; + } + + let recip_root_2_pi_times_sigma_d = 1.0 / ((2.0 * PI).sqrt() * sigma_dist); + let two_sigma_sqr_d = 2.0 * sigma_dist * sigma_dist; + + let recip_root_2_pi_times_sigma_i = 1.0 / ((2.0 * PI).sqrt() * sigma_int); + let two_sigma_sqr_i = 2.0 * sigma_int * sigma_int; + + // figure out the size of the filter + let mut weight: f64; + for i in 0..250 { + weight = + recip_root_2_pi_times_sigma_d * (-1.0 * ((i * i) as f64) / two_sigma_sqr_d).exp(); + if weight <= 0.001 { + filter_size = i * 2 + 1; + break; + } + } + + // the filter dimensions must be odd numbers such that there is a middle pixel + if filter_size % 2 == 0 { + filter_size += 1; + } + + if filter_size < 3 { + filter_size = 3; + } + + let num_pixels_in_filter = filter_size * filter_size; + let mut dx = vec![0isize; num_pixels_in_filter]; + let mut dy = vec![0isize; num_pixels_in_filter]; + let mut weights_d = vec![0.0; num_pixels_in_filter]; + + // fill the filter d_x and d_y values and the distance-weights + let midpoint: isize = (filter_size as f64 / 2f64).floor() as isize; // + 1; + let mut a = 0; + let (mut x, mut y): (isize, isize); + for row in 0..filter_size { + for col in 0..filter_size { + x = col as isize - midpoint; + y = row as isize - midpoint; + dx[a] = x; + dy[a] = y; + weight = recip_root_2_pi_times_sigma_d + * (-1.0 * ((x * x + y * y) as f64) / two_sigma_sqr_d).exp(); + weights_d[a] = weight; + a += 1; + } + } + + let mut progress: usize; + let mut old_progress: usize = 1; + + if verbose { + println!("Reading data...") + }; + + let input = Arc::new(Raster::new(&input_file, "r")?); + let dx = Arc::new(dx); + let dy = Arc::new(dy); + let weights_d = Arc::new(weights_d); + + let start = Instant::now(); + + let rows = input.configs.rows as isize; + let columns = input.configs.columns as isize; + let nodata = input.configs.nodata; + + let is_rgb_image = if input.configs.data_type == DataType::RGB24 + || input.configs.data_type == DataType::RGBA32 + || input.configs.photometric_interp == PhotometricInterpretation::RGB + { + true + } else { + false + }; + + let mut output = Raster::initialize_using_file(&output_file, &input); + output.configs.data_type = DataType::F32; + output.configs.photometric_interp = PhotometricInterpretation::Continuous; + + let mut num_procs = num_cpus::get() as isize; + let configs = whitebox_common::configs::get_configs()?; + let max_procs = configs.max_procs; + if max_procs > 0 && max_procs < num_procs { + num_procs = max_procs; + } + let (tx, rx) = mpsc::channel(); + for tid in 0..num_procs { + let input = input.clone(); + let dx = dx.clone(); + let dy = dy.clone(); + let weights_d = weights_d.clone(); + let tx1 = tx.clone(); + thread::spawn(move || { + let input_fn: Box f64> = if !is_rgb_image { + Box::new(|row: isize, col: isize| -> f64 { input.get_value(row, col) }) + } else { + Box::new(|row: isize, col: isize| -> f64 { + let value = input.get_value(row, col); + if value != nodata { + return value2i(value); + } + nodata + }) + }; + + let (mut sum, mut z_final): (f64, f64); + let mut z: f64; + let mut zn: f64; + let (mut x, mut y): (isize, isize); + let mut weight: f64; + let mut weights_i = vec![0.0; num_pixels_in_filter]; + + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z = input_fn(row, col); + if z != nodata { + //fill weights_i with the appropriate intensity weights + sum = 0.0; + for a in 0..num_pixels_in_filter { + x = col + dx[a]; + y = row + dy[a]; + zn = input_fn(y, x); + if zn != nodata { + weight = recip_root_2_pi_times_sigma_i + * (-1.0 * ((zn - z) * (zn - z)) / two_sigma_sqr_i).exp(); + weight *= weights_d[a]; + weights_i[a] = weight; + sum += weight; + } + } + + z_final = 0.0; + for a in 0..num_pixels_in_filter { + x = col + dx[a]; + y = row + dy[a]; + zn = input_fn(y, x); + if zn != nodata { + z_final += weights_i[a] * zn / sum; + } + } + + data[col as usize] = z - z_final; + } + } + + tx1.send((row, data)).unwrap(); + } + }); + } + + for row in 0..rows { + let data = rx.recv().expect("Error receiving data from thread."); + output.set_row_data(data.0, data.1); + if verbose { + progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize; + if progress != old_progress { + println!("Progress: {}%", progress); + old_progress = progress; + } + } + } + + let elapsed_time = get_formatted_elapsed_time(start); + output.configs.palette = "grey.plt".to_string(); + output.add_metadata_entry(format!( + "Created by whitebox_tools\' {} tool", + self.get_tool_name() + )); + output.add_metadata_entry(format!("Input file: {}", input_file)); + output.add_metadata_entry(format!("Sigma distance: {}", sigma_dist)); + output.add_metadata_entry(format!("Sigma intensity: {}", sigma_int)); + output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time)); + + if verbose { + println!("Saving data...") + }; + let _ = match output.write() { + Ok(_) => { + if verbose { + println!("Output file written") + } + } + Err(e) => return Err(e), + }; + + if verbose { + println!( + "{}", + &format!("Elapsed Time (excluding I/O): {}", elapsed_time) + ); + } + + Ok(()) + } +} + +fn value2i(value: f64) -> f64 { + let r = (value as u32 & 0xFF) as f64 / 255f64; + let g = ((value as u32 >> 8) & 0xFF) as f64 / 255f64; + let b = ((value as u32 >> 16) & 0xFF) as f64 / 255f64; + + (r + g + b) / 3f64 +} diff --git a/whitebox-tools-app/src/tools/image_analysis/line_thin.rs b/whitebox-tools-app/src/tools/image_analysis/line_thin.rs index 9e7583922..34a2e16e2 100755 --- a/whitebox-tools-app/src/tools/image_analysis/line_thin.rs +++ b/whitebox-tools-app/src/tools/image_analysis/line_thin.rs @@ -266,11 +266,11 @@ impl WhiteboxTool for LineThinning { for a in 0..4 { for row in 0..rows { for col in 0..columns { - z = output[(row, col)]; + z = output.get_value(row, col); if z > 0.0 && z != nodata { // fill the neighbours array for i in 0..8 { - neighbours[i] = output[(row + dy[i], col + dx[i])]; + neighbours[i] = output.get_value(row + dy[i], col + dx[i]); } // scan through element @@ -282,7 +282,7 @@ impl WhiteboxTool for LineThinning { } if pattern_match { - output[(row, col)] = 0.0; + output.set_value(row, col, 0.0); did_something = true; } else { pattern_match = true; diff --git a/whitebox-tools-app/src/tools/image_analysis/mod.rs b/whitebox-tools-app/src/tools/image_analysis/mod.rs index e70d92150..278a59267 100755 --- a/whitebox-tools-app/src/tools/image_analysis/mod.rs +++ b/whitebox-tools-app/src/tools/image_analysis/mod.rs @@ -18,6 +18,7 @@ mod flip_image; mod gamma_correction; mod gaussian_contrast_stretch; mod gaussian_filter; +mod highpass_bilateral_filter; mod highpass_filter; mod highpass_median_filter; mod histogram_equalization; @@ -87,6 +88,7 @@ pub use self::flip_image::FlipImage; pub use self::gamma_correction::GammaCorrection; pub use self::gaussian_contrast_stretch::GaussianContrastStretch; pub use self::gaussian_filter::GaussianFilter; +pub use self::highpass_bilateral_filter::HighPassBilateralFilter; pub use self::highpass_filter::HighPassFilter; pub use self::highpass_median_filter::HighPassMedianFilter; pub use self::histogram_equalization::HistogramEqualization; diff --git a/whitebox-tools-app/src/tools/mod.rs b/whitebox-tools-app/src/tools/mod.rs index 83a89fbe6..e287fbd65 100755 --- a/whitebox-tools-app/src/tools/mod.rs +++ b/whitebox-tools-app/src/tools/mod.rs @@ -225,6 +225,7 @@ impl ToolManager { tool_names.push("GammaCorrection".to_string()); tool_names.push("GaussianContrastStretch".to_string()); tool_names.push("GaussianFilter".to_string()); + tool_names.push("HighPassBilateralFilter".to_string()); tool_names.push("HighPassFilter".to_string()); tool_names.push("HighPassMedianFilter".to_string()); tool_names.push("HistogramEqualization".to_string()); @@ -781,6 +782,7 @@ impl ToolManager { Some(Box::new(image_analysis::GaussianContrastStretch::new())) } "gaussianfilter" => Some(Box::new(image_analysis::GaussianFilter::new())), + "highpassbilateralfilter" => Some(Box::new(image_analysis::HighPassBilateralFilter::new())), "highpassfilter" => Some(Box::new(image_analysis::HighPassFilter::new())), "highpassmedianfilter" => Some(Box::new(image_analysis::HighPassMedianFilter::new())), "histogramequalization" => Some(Box::new(image_analysis::HistogramEqualization::new())), diff --git a/whitebox-tools-app/src/tools/terrain_analysis/aspect.rs b/whitebox-tools-app/src/tools/terrain_analysis/aspect.rs index a3b4ebd4f..ebfcd7e76 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/aspect.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/aspect.rs @@ -264,7 +264,7 @@ impl WhiteboxTool for Aspect { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -274,18 +274,14 @@ impl WhiteboxTool for Aspect { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. - - I don't fully understand why this is the case, but in order to make this work such that - hillslopes have aspects that face the appropriate direction, you need to swap p and q and - reverse their signs. I've spoken to Florinsky about this, and he maintains that his - equations are correct. + of Digital Terrain Modelling, Chapter 4, pg. 117. I can't figure out why, but this + only works if you change the sign of q below. */ - q = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -413,16 +409,16 @@ impl WhiteboxTool for Aspect { of Digital Terrain Modelling, Chapter 4, pg. 117. */ - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); if p != 0f64 { // slope is greater than zero data[col as usize] = 180f64 - (q / p).atan().to_degrees() + 90f64 * (p / p.abs()); diff --git a/whitebox-tools-app/src/tools/terrain_analysis/gaussian_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/gaussian_curvature.rs index 0433fc88e..bf16574da 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/gaussian_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/gaussian_curvature.rs @@ -315,7 +315,7 @@ impl WhiteboxTool for GaussianCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -339,11 +339,11 @@ impl WhiteboxTool for GaussianCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -505,16 +505,16 @@ impl WhiteboxTool for GaussianCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); /* The following equation has been taken from Florinsky (2016) Principles and Methods diff --git a/whitebox-tools-app/src/tools/terrain_analysis/hillshade.rs b/whitebox-tools-app/src/tools/terrain_analysis/hillshade.rs index 37393165d..14206e55b 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/hillshade.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/hillshade.rs @@ -17,6 +17,10 @@ use std::path; use std::sync::mpsc; use std::sync::Arc; use std::thread; +use whitebox_common::utils::{ + haversine_distance, + vincenty_distance +}; /// This tool performs a hillshade operation (also called shaded relief) on an input digital elevation model (DEM). /// The user must specify the name of the input DEM and the output hillshade image name. Other parameters that must @@ -279,111 +283,261 @@ impl WhiteboxTool for Hillshade { let start = Instant::now(); + let nodata = input.configs.nodata; + let rows = input.configs.rows as isize; + let columns = input.configs.columns as isize; + let resx = input.configs.resolution_x; + let resy = input.configs.resolution_y; + let res = (resx + resy) / 2.; + azimuth = (azimuth - 90f64).to_radians(); altitude = altitude.to_radians(); let sin_theta = altitude.sin(); let cos_theta = altitude.cos(); - let eight_grid_res = input.configs.resolution_x * 8.0; + // let eight_grid_res = input.configs.resolution_x * 8.0; let mut configs = input.configs.clone(); configs.data_type = DataType::I16; configs.nodata = -32768f64; let mut output = Raster::initialize_using_config(&output_file, &configs); - let out_nodata = output.configs.nodata; - let rows = input.configs.rows as isize; - + let mut num_procs = num_cpus::get() as isize; let configs = whitebox_common::configs::get_configs()?; let max_procs = configs.max_procs; if max_procs > 0 && max_procs < num_procs { num_procs = max_procs; } + let (tx, rx) = mpsc::channel(); - for tid in 0..num_procs { - let input = input.clone(); - let tx1 = tx.clone(); - thread::spawn(move || { - let nodata = input.configs.nodata; - let columns = input.configs.columns as isize; - let d_x = [1, 1, 1, 0, -1, -1, -1, 0]; - let d_y = [-1, 0, 1, 1, 1, 0, -1, -1]; - let mut n: [f64; 8] = [0.0; 8]; - let mut z: f64; - let (mut term1, mut term2, mut term3): (f64, f64, f64); - let (mut fx, mut fy): (f64, f64); - let mut tan_slope: f64; - let mut aspect: f64; - let half_pi = PI / 2f64; - let mut z_factor_array = Vec::with_capacity(rows as usize); - if input.is_in_geographic_coordinates() && z_factor < 0.0 { - // calculate a new z-conversion factor - for row in 0..rows { - let lat = input.get_y_from_row(row); - z_factor_array.push(1.0 / (111320.0 * lat.cos())); - } - } else { - if z_factor < 0.0 { - z_factor = 1.0; - } - z_factor_array = vec![z_factor; rows as usize]; - } - for row in (0..rows).filter(|r| r % num_procs == tid) { - let mut data = vec![out_nodata; columns as usize]; - for col in 0..columns { - z = input.get_value(row, col); - if z != nodata { - z = z * z_factor_array[row as usize]; - for c in 0..8 { - n[c] = input.get_value(row + d_y[c], col + d_x[c]); - if n[c] != nodata { - n[c] = n[c] * z_factor_array[row as usize]; + if !input.is_in_geographic_coordinates() { + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z12: f64; + let mut p: f64; + let mut q: f64; + let offsets = [ + [-2, -2], [-1, -2], [0, -2], [1, -2], [2, -2], + [-2, -1], [-1, -1], [0, -1], [1, -1], [2, -1], + [-2, 0], [-1, 0], [0, 0], [1, 0], [2, 0], + [-2, 1], [-1, 1], [0, 1], [1, 1], [2, 1], + [-2, 2], [-1, 2], [0, 2], [1, 2], [2, 2] + ]; + let mut z = [0f64; 25]; + let mut val: f64; + let (mut term1, mut term2, mut term3): (f64, f64, f64); + let mut tan_slope: f64; + let mut aspect: f64; + let half_pi = PI / 2f64; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z12 = input.get_value(row, col); + if z12 != nodata { + for n in 0..25 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z12 * z_factor; + } + } + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + */ + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + + 5. * (z[9] + z[19] - z[5] - z[15])); + + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + + 5. * (z[1] + z[3] - z[21] - z[23])); + + tan_slope = (p * p + q * q).sqrt(); + if tan_slope < 0.00017 { + tan_slope = 0.00017; + } + aspect = if p != 0f64 { + PI - ((q / p).atan()) + half_pi * (p / (p).abs()) } else { - n[c] = z; + PI + }; + // let sign_p = if p != 0.0 { p.signum() } else { 0.0 }; + // let sign_q = if q != 0.0 { q.signum() } else { 0.0 }; + // aspect = -half_pi*(1.0 - sign_q)*(1.0-sign_p.abs())+PI*(1.0 + sign_p) - sign_p * (-q / (p*p + q*q).sqrt()).acos(); + term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); + term2 = sin_theta / tan_slope; + term3 = cos_theta * (azimuth - aspect).sin(); + val = term1 * (term2 - term3); + val = val * 32767.0; + if val < 0.0 { + val = 0.0; } + data[col as usize] = val.round(); } - // calculate slope and aspect - fy = (n[6] - n[4] + 2.0 * (n[7] - n[3]) + n[0] - n[2]) / eight_grid_res; - fx = (n[2] - n[4] + 2.0 * (n[1] - n[5]) + n[0] - n[6]) / eight_grid_res; - tan_slope = (fx * fx + fy * fy).sqrt(); - if tan_slope < 0.00017 { - tan_slope = 0.00017; - } - aspect = if fx != 0f64 { - PI - ((fy / fx).atan()) + half_pi * (fx / (fx).abs()) - } else { - PI - }; - term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); - term2 = sin_theta / tan_slope; - term3 = cos_theta * (azimuth - aspect).sin(); - z = term1 * (term2 - term3); - // } else { - // z = 0.5; - // } - z = z * 32767.0; - if z < 0.0 { - z = 0.0; + } + + tx.send((row, data)).expect("Error sending data to thread."); + } + }); + } + } else { // geographic coordinates + + let phi1 = input.get_y_from_row(0); + let lambda1 = input.get_x_from_column(0); + + let phi2 = phi1; + let lambda2 = input.get_x_from_column(-1); + + let linear_res = vincenty_distance((phi1, lambda1), (phi2, lambda2)); + let lr2 = haversine_distance((phi1, lambda1), (phi2, lambda2)); + let diff = 100. * (linear_res - lr2).abs() / linear_res; + let use_haversine = diff < 0.5; // if the difference is less than 0.5%, use the faster haversine method to calculate distances. + + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z4: f64; + let mut p: f64; + let mut q: f64; + let mut a: f64; + let mut b: f64; + let mut c: f64; + let mut d: f64; + let mut e: f64; + let mut phi1: f64; + let mut lambda1: f64; + let mut phi2: f64; + let mut lambda2: f64; + let offsets = [ + [-1, -1], [0, -1], [1, -1], + [-1, 0], [0, 0], [1, 0], + [-1, 1], [0, 1], [1, 1] + ]; + let mut z = [0f64; 25]; + let mut val: f64; + let (mut term1, mut term2, mut term3): (f64, f64, f64); + let mut tan_slope: f64; + let mut aspect: f64; + let half_pi = PI / 2f64; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z4 = input.get_value(row, col); + if z4 != nodata { + for n in 0..9 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z4 * z_factor; + } + } + + // Calculate a, b, c, d, and e. + phi1 = input.get_y_from_row(row); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + b = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row+1); + lambda2 = lambda1; + + d = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row-1); + lambda2 = lambda1; + + e = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row+1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + a = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row-1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + c = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + */ + + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); + + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) + - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) + - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); + + tan_slope = (p * p + q * q).sqrt(); + if tan_slope < 0.00017 { + tan_slope = 0.00017; + } + aspect = if p != 0f64 { + PI - ((q / p).atan()) + half_pi * (p / (p).abs()) + } else { + PI + }; + term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); + term2 = sin_theta / tan_slope; + term3 = cos_theta * (azimuth - aspect).sin(); + val = term1 * (term2 - term3); + val = val * 32767.0; + if val < 0.0 { + val = 0.0; + } + data[col as usize] = val.round(); } - data[col as usize] = z.round(); } + + tx.send((row, data)).expect("Error sending data to thread."); } - tx1.send((row, data)).unwrap(); - } - }); + }); + } } - let mut histo: [f64; 32768] = [0.0; 32768]; - let mut num_cells = 0.0; for row in 0..rows { let data = rx.recv().expect("Error receiving data from thread."); - let mut bin: usize; - for col in 0..data.1.len() { - if data.1[col] != out_nodata { - bin = data.1[col] as usize; - histo[bin] += 1.0; - num_cells += 1.0; - } - } output.set_row_data(data.0, data.1); if verbose { @@ -395,33 +549,6 @@ impl WhiteboxTool for Hillshade { } } - let mut new_min = 0; - let mut new_max = 0; - let clip_percent = 0.01; - let target_cell_num = num_cells * clip_percent; - let mut sum = 0.0; - for c in 0..32768 { - sum += histo[c]; - if sum >= target_cell_num { - new_min = c; - break; - } - } - - sum = 0.0; - for c in (0..32768).rev() { - sum += histo[c]; - if sum >= target_cell_num { - new_max = c; - break; - } - } - - if new_max > new_min { - output.configs.display_min = new_min as f64; - output.configs.display_max = new_max as f64; - } - let elapsed_time = get_formatted_elapsed_time(start); output.configs.palette = "grey.plt".to_string(); output.add_metadata_entry(format!( diff --git a/whitebox-tools-app/src/tools/terrain_analysis/maximal_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/maximal_curvature.rs index 78a568f6a..a64bb9906 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/maximal_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/maximal_curvature.rs @@ -318,7 +318,7 @@ impl WhiteboxTool for MaximalCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -328,8 +328,7 @@ impl WhiteboxTool for MaximalCurvature { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. Note that I believe Florinsky reversed - the equations for q and p. + of Digital Terrain Modelling, Chapter 4, pg. 117. */ r = 1f64 / (35f64 * res * res) * (2. * (z[0] + z[4] + z[5] + z[9] + z[10] + z[14] + z[15] + z[19] + z[20] + z[24]) - 2. * (z[2] + z[7] + z[12] + z[17] + z[22]) - z[1] - z[3] - z[6] - z[8] @@ -342,11 +341,11 @@ impl WhiteboxTool for MaximalCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -512,16 +511,16 @@ impl WhiteboxTool for MaximalCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); /* The following equation has been taken from Florinsky (2016) Principles and Methods diff --git a/whitebox-tools-app/src/tools/terrain_analysis/mean_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/mean_curvature.rs index 9eab9e969..b0aab7dd3 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/mean_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/mean_curvature.rs @@ -319,7 +319,7 @@ impl WhiteboxTool for MeanCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -329,8 +329,7 @@ impl WhiteboxTool for MeanCurvature { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. Note that I believe Florinsky reversed - the equations for q and p. + of Digital Terrain Modelling, Chapter 4, pg. 117. */ r = 1f64 / (35f64 * res * res) * (2. * (z[0] + z[4] + z[5] + z[9] + z[10] + z[14] + z[15] + z[19] + z[20] + z[24]) - 2. * (z[2] + z[7] + z[12] + z[17] + z[22]) - z[1] - z[3] - z[6] - z[8] @@ -343,11 +342,11 @@ impl WhiteboxTool for MeanCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] - + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) - + 5. * (z[9] + z[19] - z[5] - z[15])); + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -508,16 +507,16 @@ impl WhiteboxTool for MeanCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); /* The following equation has been taken from Florinsky (2016) Principles and Methods diff --git a/whitebox-tools-app/src/tools/terrain_analysis/minimal_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/minimal_curvature.rs index 0734152ee..c11070587 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/minimal_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/minimal_curvature.rs @@ -318,7 +318,7 @@ impl WhiteboxTool for MinimalCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -342,11 +342,11 @@ impl WhiteboxTool for MinimalCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -512,16 +512,16 @@ impl WhiteboxTool for MinimalCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); /* The following equation has been taken from Florinsky (2016) Principles and Methods diff --git a/whitebox-tools-app/src/tools/terrain_analysis/multidirectional_hillshade.rs b/whitebox-tools-app/src/tools/terrain_analysis/multidirectional_hillshade.rs index 2583c6801..fb02ceea4 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/multidirectional_hillshade.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/multidirectional_hillshade.rs @@ -17,6 +17,10 @@ use std::path; use std::sync::mpsc; use std::sync::Arc; use std::thread; +use whitebox_common::utils::{ + haversine_distance, + vincenty_distance +}; /// This tool performs a hillshade operation (also called shaded relief) on an input digital elevation model (DEM) /// with multiple sources of illumination. The user must specify the name of the input DEM (`--dem`) and the output @@ -290,28 +294,22 @@ impl WhiteboxTool for MultidirectionalHillshade { let start = Instant::now(); + let rows = input.configs.rows as isize; + let columns = input.configs.columns as isize; + let nodata = input.configs.nodata; + let resx = input.configs.resolution_x; + let resy = input.configs.resolution_y; + let res = (resx + resy) / 2.; + altitude = altitude.to_radians(); let sin_theta = altitude.sin(); let cos_theta = altitude.cos(); - let eight_grid_res = input.configs.resolution_x * 8.0; - - if input.is_in_geographic_coordinates() && z_factor < 0.0 { - // calculate a new z-conversion factor - let mut mid_lat = (input.configs.north - input.configs.south) / 2.0; - if mid_lat <= 90.0 && mid_lat >= -90.0 { - mid_lat = mid_lat.to_radians(); - z_factor = 1.0 / (111320.0 * mid_lat.cos()); - } - } else if z_factor < 0.0 { - z_factor = 1.0; - } + // let eight_grid_res = input.configs.resolution_x * 8.0; let mut configs = input.configs.clone(); configs.data_type = DataType::I16; configs.nodata = -32768f64; let mut output = Raster::initialize_using_config(&output_file, &configs); - let out_nodata = output.configs.nodata; - let rows = input.configs.rows as isize; let mut num_procs = num_cpus::get() as isize; let configs = whitebox_common::configs::get_configs()?; @@ -319,109 +317,389 @@ impl WhiteboxTool for MultidirectionalHillshade { if max_procs > 0 && max_procs < num_procs { num_procs = max_procs; } + // let (tx, rx) = mpsc::channel(); + // for tid in 0..num_procs { + // let input = input.clone(); + // let tx1 = tx.clone(); + // thread::spawn(move || { + // let nodata = input.configs.nodata; + // let columns = input.configs.columns as isize; + // let d_x = [1, 1, 1, 0, -1, -1, -1, 0]; + // let d_y = [-1, 0, 1, 1, 1, 0, -1, -1]; + // let azimuths = if multidirection360mode { + // vec![ + // (0f64 - 90f64).to_radians(), + // (45f64 - 90f64).to_radians(), + // (90f64 - 90f64).to_radians(), + // (135f64 - 90f64).to_radians(), + // (180f64 - 90f64).to_radians(), + // (225f64 - 90f64).to_radians(), + // (270f64 - 90f64).to_radians(), + // (315f64 - 90f64).to_radians(), + // ] + // } else { + // vec![ + // (225f64 - 90f64).to_radians(), + // (270f64 - 90f64).to_radians(), + // (315f64 - 90f64).to_radians(), + // (360f64 - 90f64).to_radians(), + // ] + // }; + + // let weights = if multidirection360mode { + // vec![ + // 0.15f64, 0.125f64, 0.1f64, 0.05f64, 0.1f64, 0.125f64, 0.15f64, 0.20f64, + // ] + // } else { + // vec![0.1f64, 0.4f64, 0.4f64, 0.1f64] + // }; + // let mut n: [f64; 8] = [0.0; 8]; + // let mut z: f64; + // let mut azimuth: f64; + // let (mut term1, mut term2, mut term3): (f64, f64, f64); + // let (mut fx, mut fy): (f64, f64); + // let mut tan_slope: f64; + // let mut aspect: f64; + // let half_pi = PI / 2f64; + // for row in (0..rows).filter(|r| r % num_procs == tid) { + // let mut data = vec![out_nodata; columns as usize]; + // for col in 0..columns { + // z = input.get_value(row, col); + // if z != nodata { + // z = z * z_factor; + // for c in 0..8 { + // n[c] = input.get_value(row + d_y[c], col + d_x[c]); + // if n[c] != nodata { + // n[c] = n[c] * z_factor; + // } else { + // n[c] = z; + // } + // } + // // calculate slope and aspect + // fy = (n[6] - n[4] + 2.0 * (n[7] - n[3]) + n[0] - n[2]) / eight_grid_res; + // fx = (n[2] - n[4] + 2.0 * (n[1] - n[5]) + n[0] - n[6]) / eight_grid_res; + // tan_slope = (fx * fx + fy * fy).sqrt(); + // if tan_slope < 0.00017 { + // tan_slope = 0.00017; + // } + // aspect = if fx != 0f64 { + // PI - ((fy / fx).atan()) + half_pi * (fx / (fx).abs()) + // } else { + // PI + // }; + // term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); + // term2 = sin_theta / tan_slope; + + // z = 0f64; + // for a in 0..azimuths.len() { + // azimuth = azimuths[a]; + // term3 = cos_theta * (azimuth - aspect).sin(); + // z += term1 * (term2 - term3) * weights[a]; + // } + // z = z * 32767.0; + // if z < 0.0 { + // z = 0.0; + // } + // data[col as usize] = z.round(); + // } + // } + // tx1.send((row, data)).unwrap(); + // } + // }); + // } + let (tx, rx) = mpsc::channel(); - for tid in 0..num_procs { - let input = input.clone(); - let tx1 = tx.clone(); - thread::spawn(move || { - let nodata = input.configs.nodata; - let columns = input.configs.columns as isize; - let d_x = [1, 1, 1, 0, -1, -1, -1, 0]; - let d_y = [-1, 0, 1, 1, 1, 0, -1, -1]; - let azimuths = if multidirection360mode { - vec![ - (0f64 - 90f64).to_radians(), - (45f64 - 90f64).to_radians(), - (90f64 - 90f64).to_radians(), - (135f64 - 90f64).to_radians(), - (180f64 - 90f64).to_radians(), - (225f64 - 90f64).to_radians(), - (270f64 - 90f64).to_radians(), - (315f64 - 90f64).to_radians(), - ] - } else { - vec![ - (225f64 - 90f64).to_radians(), - (270f64 - 90f64).to_radians(), - (315f64 - 90f64).to_radians(), - (360f64 - 90f64).to_radians(), - ] - }; - - let weights = if multidirection360mode { - vec![ - 0.15f64, 0.125f64, 0.1f64, 0.05f64, 0.1f64, 0.125f64, 0.15f64, 0.20f64, - ] - } else { - vec![0.1f64, 0.4f64, 0.4f64, 0.1f64] - }; - let mut n: [f64; 8] = [0.0; 8]; - let mut z: f64; - let mut azimuth: f64; - let (mut term1, mut term2, mut term3): (f64, f64, f64); - let (mut fx, mut fy): (f64, f64); - let mut tan_slope: f64; - let mut aspect: f64; - let half_pi = PI / 2f64; - for row in (0..rows).filter(|r| r % num_procs == tid) { - let mut data = vec![out_nodata; columns as usize]; - for col in 0..columns { - z = input.get_value(row, col); - if z != nodata { - z = z * z_factor; - for c in 0..8 { - n[c] = input.get_value(row + d_y[c], col + d_x[c]); - if n[c] != nodata { - n[c] = n[c] * z_factor; + if !input.is_in_geographic_coordinates() { + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z12: f64; + let mut p: f64; + let mut q: f64; + let offsets = [ + [-2, -2], [-1, -2], [0, -2], [1, -2], [2, -2], + [-2, -1], [-1, -1], [0, -1], [1, -1], [2, -1], + [-2, 0], [-1, 0], [0, 0], [1, 0], [2, 0], + [-2, 1], [-1, 1], [0, 1], [1, 1], [2, 1], + [-2, 2], [-1, 2], [0, 2], [1, 2], [2, 2] + ]; + let mut z = [0f64; 25]; + let mut val: f64; + let (mut term1, mut term2, mut term3): (f64, f64, f64); + let mut tan_slope: f64; + let mut aspect: f64; + let half_pi = PI / 2f64; + let mut azimuth: f64; + let azimuths = if multidirection360mode { + vec![ + (0f64 - 90f64).to_radians(), + (45f64 - 90f64).to_radians(), + (90f64 - 90f64).to_radians(), + (135f64 - 90f64).to_radians(), + (180f64 - 90f64).to_radians(), + (225f64 - 90f64).to_radians(), + (270f64 - 90f64).to_radians(), + (315f64 - 90f64).to_radians(), + ] + } else { + vec![ + (225f64 - 90f64).to_radians(), + (270f64 - 90f64).to_radians(), + (315f64 - 90f64).to_radians(), + (360f64 - 90f64).to_radians(), + ] + }; + + let weights = if multidirection360mode { + vec![ + 0.15f64, 0.125f64, 0.1f64, 0.05f64, 0.1f64, 0.125f64, 0.15f64, 0.20f64, + ] + } else { + vec![0.1f64, 0.4f64, 0.4f64, 0.1f64] + }; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z12 = input.get_value(row, col); + if z12 != nodata { + for n in 0..25 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z12 * z_factor; + } + } + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + */ + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + + 5. * (z[9] + z[19] - z[5] - z[15])); + + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + + 5. * (z[1] + z[3] - z[21] - z[23])); + + tan_slope = (p * p + q * q).sqrt(); + if tan_slope < 0.00017 { + tan_slope = 0.00017; + } + aspect = if p != 0f64 { + PI - ((q / p).atan()) + half_pi * (p / (p).abs()) } else { - n[c] = z; + PI + }; + term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); + term2 = sin_theta / tan_slope; + + val = 0f64; + for a in 0..azimuths.len() { + azimuth = azimuths[a]; + term3 = cos_theta * (azimuth - aspect).sin(); + val += term1 * (term2 - term3) * weights[a]; } + val = val * 32767.0; + if val < 0.0 { + val = 0.0; + } + data[col as usize] = val.round(); } - // calculate slope and aspect - fy = (n[6] - n[4] + 2.0 * (n[7] - n[3]) + n[0] - n[2]) / eight_grid_res; - fx = (n[2] - n[4] + 2.0 * (n[1] - n[5]) + n[0] - n[6]) / eight_grid_res; - tan_slope = (fx * fx + fy * fy).sqrt(); - if tan_slope < 0.00017 { - tan_slope = 0.00017; - } - aspect = if fx != 0f64 { - PI - ((fy / fx).atan()) + half_pi * (fx / (fx).abs()) - } else { - PI - }; - term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); - term2 = sin_theta / tan_slope; - - z = 0f64; - for a in 0..azimuths.len() { - azimuth = azimuths[a]; - term3 = cos_theta * (azimuth - aspect).sin(); - z += term1 * (term2 - term3) * weights[a]; - } - z = z * 32767.0; - if z < 0.0 { - z = 0.0; + } + + tx.send((row, data)).expect("Error sending data to thread."); + } + }); + } + } else { // geographic coordinates + + let phi1 = input.get_y_from_row(0); + let lambda1 = input.get_x_from_column(0); + + let phi2 = phi1; + let lambda2 = input.get_x_from_column(-1); + + let linear_res = vincenty_distance((phi1, lambda1), (phi2, lambda2)); + let lr2 = haversine_distance((phi1, lambda1), (phi2, lambda2)); + let diff = 100. * (linear_res - lr2).abs() / linear_res; + let use_haversine = diff < 0.5; // if the difference is less than 0.5%, use the faster haversine method to calculate distances. + + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z4: f64; + let mut p: f64; + let mut q: f64; + let mut a: f64; + let mut b: f64; + let mut c: f64; + let mut d: f64; + let mut e: f64; + let mut phi1: f64; + let mut lambda1: f64; + let mut phi2: f64; + let mut lambda2: f64; + let offsets = [ + [-1, -1], [0, -1], [1, -1], + [-1, 0], [0, 0], [1, 0], + [-1, 1], [0, 1], [1, 1] + ]; + let mut z = [0f64; 25]; + let mut val: f64; + let (mut term1, mut term2, mut term3): (f64, f64, f64); + let mut tan_slope: f64; + let mut aspect: f64; + let mut azimuth: f64; + let half_pi = PI / 2f64; + let azimuths = if multidirection360mode { + vec![ + (0f64 - 90f64).to_radians(), + (45f64 - 90f64).to_radians(), + (90f64 - 90f64).to_radians(), + (135f64 - 90f64).to_radians(), + (180f64 - 90f64).to_radians(), + (225f64 - 90f64).to_radians(), + (270f64 - 90f64).to_radians(), + (315f64 - 90f64).to_radians(), + ] + } else { + vec![ + (225f64 - 90f64).to_radians(), + (270f64 - 90f64).to_radians(), + (315f64 - 90f64).to_radians(), + (360f64 - 90f64).to_radians(), + ] + }; + + let weights = if multidirection360mode { + vec![ + 0.15f64, 0.125f64, 0.1f64, 0.05f64, 0.1f64, 0.125f64, 0.15f64, 0.20f64, + ] + } else { + vec![0.1f64, 0.4f64, 0.4f64, 0.1f64] + }; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z4 = input.get_value(row, col); + if z4 != nodata { + for n in 0..9 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z4 * z_factor; + } + } + + // Calculate a, b, c, d, and e. + phi1 = input.get_y_from_row(row); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + b = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row+1); + lambda2 = lambda1; + + d = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row-1); + lambda2 = lambda1; + + e = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row+1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + a = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row-1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + c = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + */ + + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); + + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) + - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) + - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); + + tan_slope = (p * p + q * q).sqrt(); + if tan_slope < 0.00017 { + tan_slope = 0.00017; + } + aspect = if p != 0f64 { + PI - ((q / p).atan()) + half_pi * (p / (p).abs()) + } else { + PI + }; + term1 = tan_slope / (1f64 + tan_slope * tan_slope).sqrt(); + term2 = sin_theta / tan_slope; + val = 0f64; + for a in 0..azimuths.len() { + azimuth = azimuths[a]; + term3 = cos_theta * (azimuth - aspect).sin(); + val += term1 * (term2 - term3) * weights[a]; + } + val = val * 32767.0; + if val < 0.0 { + val = 0.0; + } + data[col as usize] = val.round(); } - data[col as usize] = z.round(); } + + tx.send((row, data)).expect("Error sending data to thread."); } - tx1.send((row, data)).unwrap(); - } - }); + }); + } } - let mut histo: [f64; 32768] = [0.0; 32768]; - let mut num_cells = 0.0; for row in 0..rows { let data = rx.recv().expect("Error receiving data from thread."); - let mut bin: usize; - for col in 0..data.1.len() { - if data.1[col] != out_nodata { - bin = data.1[col] as usize; - histo[bin] += 1.0; - num_cells += 1.0; - } - } output.set_row_data(data.0, data.1); if verbose { @@ -433,33 +711,6 @@ impl WhiteboxTool for MultidirectionalHillshade { } } - let mut new_min = 0; - let mut new_max = 0; - let clip_percent = 0.01; - let target_cell_num = num_cells * clip_percent; - let mut sum = 0.0; - for c in 0..32768 { - sum += histo[c]; - if sum >= target_cell_num { - new_min = c; - break; - } - } - - sum = 0.0; - for c in (0..32768).rev() { - sum += histo[c]; - if sum >= target_cell_num { - new_max = c; - break; - } - } - - if new_max > new_min { - output.configs.display_min = new_min as f64; - output.configs.display_max = new_max as f64; - } - let elapsed_time = get_formatted_elapsed_time(start); output.configs.palette = "grey.plt".to_string(); output.add_metadata_entry(format!( diff --git a/whitebox-tools-app/src/tools/terrain_analysis/plan_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/plan_curvature.rs index 66e3fb22c..2ca7f07fb 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/plan_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/plan_curvature.rs @@ -320,7 +320,7 @@ impl WhiteboxTool for PlanCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -330,8 +330,7 @@ impl WhiteboxTool for PlanCurvature { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. Note that I believe Florinsky reversed - the equations for q and p. + of Digital Terrain Modelling, Chapter 4, pg. 117. */ r = 1f64 / (35f64 * res * res) * (2. * (z[0] + z[4] + z[5] + z[9] + z[10] + z[14] + z[15] + z[19] + z[20] + z[24]) - 2. * (z[2] + z[7] + z[12] + z[17] + z[22]) - z[1] - z[3] - z[6] - z[8] @@ -344,11 +343,11 @@ impl WhiteboxTool for PlanCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -512,16 +511,16 @@ impl WhiteboxTool for PlanCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); if (p + q).abs() > 0. { /* diff --git a/whitebox-tools-app/src/tools/terrain_analysis/prof_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/prof_curvature.rs index a1ceb3874..759ca6015 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/prof_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/prof_curvature.rs @@ -317,7 +317,7 @@ impl WhiteboxTool for ProfileCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -327,8 +327,7 @@ impl WhiteboxTool for ProfileCurvature { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. Note that I believe Florinsky reversed - the equations for q and p. + of Digital Terrain Modelling, Chapter 4, pg. 117. */ r = 1f64 / (35f64 * res * res) * (2. * (z[0] + z[4] + z[5] + z[9] + z[10] + z[14] + z[15] + z[19] + z[20] + z[24]) - 2. * (z[2] + z[7] + z[12] + z[17] + z[22]) - z[1] - z[3] - z[6] - z[8] @@ -341,11 +340,11 @@ impl WhiteboxTool for ProfileCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -509,16 +508,16 @@ impl WhiteboxTool for ProfileCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); if (p + q).abs() > 0. { /* diff --git a/whitebox-tools-app/src/tools/terrain_analysis/relative_aspect.rs b/whitebox-tools-app/src/tools/terrain_analysis/relative_aspect.rs index f3f70fd15..31c848b38 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/relative_aspect.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/relative_aspect.rs @@ -16,6 +16,10 @@ use std::path; use std::sync::mpsc; use std::sync::Arc; use std::thread; +use whitebox_common::utils::{ + haversine_distance, + vincenty_distance +}; /// This tool creates a new raster in which each grid cell is assigned the terrain aspect relative to a user-specified /// direction (`--azimuth`). Relative terrain aspect is the angular distance (measured in degrees) between the land-surface @@ -249,21 +253,14 @@ impl WhiteboxTool for RelativeAspect { let start = Instant::now(); - let eight_grid_res = input.configs.resolution_x * 8.0; - - if input.is_in_geographic_coordinates() && z_factor < 0.0 { - // calculate a new z-conversion factor - let mut mid_lat = (input.configs.north - input.configs.south) / 2.0; - if mid_lat <= 90.0 && mid_lat >= -90.0 { - mid_lat = mid_lat.to_radians(); - z_factor = 1.0 / (111320.0 * mid_lat.cos()); - } - } else if z_factor < 0.0 { - z_factor = 1.0; - } + let rows = input.configs.rows as isize; + let columns = input.configs.columns as isize; + let nodata = input.configs.nodata; + let resx = input.configs.resolution_x; + let resy = input.configs.resolution_y; + let res = (resx + resy) / 2.; let mut output = Raster::initialize_using_file(&output_file, &input); - let rows = input.configs.rows as isize; if output.configs.data_type != DataType::F32 && output.configs.data_type != DataType::F64 { output.configs.data_type = DataType::F32; } @@ -274,51 +271,211 @@ impl WhiteboxTool for RelativeAspect { if max_procs > 0 && max_procs < num_procs { num_procs = max_procs; } + let (tx, rx) = mpsc::channel(); - for tid in 0..num_procs { - let input = input.clone(); - let tx1 = tx.clone(); - thread::spawn(move || { - let nodata = input.configs.nodata; - let columns = input.configs.columns as isize; - let d_x = [1, 1, 1, 0, -1, -1, -1, 0]; - let d_y = [-1, 0, 1, 1, 1, 0, -1, -1]; - let mut n: [f64; 8] = [0.0; 8]; - let mut z: f64; - let (mut fx, mut fy): (f64, f64); - for row in (0..rows).filter(|r| r % num_procs == tid) { - let mut data = vec![nodata; columns as usize]; - for col in 0..columns { - z = input[(row, col)]; - if z != nodata { - for c in 0..8 { - n[c] = input[(row + d_y[c], col + d_x[c])]; - if n[c] != nodata { - n[c] = n[c] * z_factor; + if !input.is_in_geographic_coordinates() { + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z12: f64; + let mut p: f64; + let mut q: f64; + // let mut sign_p: f64; + // let mut sign_q: f64; + // const PI: f64 = std::f64::consts::PI; + let offsets = [ + [-2, -2], [-1, -2], [0, -2], [1, -2], [2, -2], + [-2, -1], [-1, -1], [0, -1], [1, -1], [2, -1], + [-2, 0], [-1, 0], [0, 0], [1, 0], [2, 0], + [-2, 1], [-1, 1], [0, 1], [1, 1], [2, 1], + [-2, 2], [-1, 2], [0, 2], [1, 2], [2, 2] + ]; + let mut z = [0f64; 25]; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z12 = input.get_value(row, col); + if z12 != nodata { + for n in 0..25 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z12 * z_factor; + } + } + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + + I don't fully understand why this is the case, but in order to make this work such that + hillslopes have aspects that face the appropriate direction, you need to reverse their + signs of p and q. + */ + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + + 5. * (z[9] + z[19] - z[5] - z[15])); + + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + + 5. * (z[1] + z[3] - z[21] - z[23])); + + // sign_p = if p != 0. { p.signum() } else { 0. }; + // sign_q = if q != 0. { q.signum() } else { 0. }; + // data[col as usize] = ((-90.*(1. - sign_q)*(1. - sign_p.abs()) + 180.*(1. + sign_p) - 180. / PI * sign_p * (-q / (p*p + q*q).sqrt()).acos()) - azimuth).abs(); + + if p != 0f64 { // slope is greater than zero + data[col as usize] = (180f64 - (q / p).atan().to_degrees() + 90f64 * (p / p.abs()) - azimuth).abs(); + if data[col as usize] > 180.0 { + data[col as usize] = 360.0 - data[col as usize]; + } } else { - n[c] = z * z_factor; + data[col as usize] = -1f64; // undefined for flat surfaces } } - // calculate slope - fy = (n[6] - n[4] + 2.0 * (n[7] - n[3]) + n[0] - n[2]) / eight_grid_res; - fx = (n[2] - n[4] + 2.0 * (n[1] - n[5]) + n[0] - n[6]) / eight_grid_res; - if fx != 0f64 { - z = ((180f64 - ((fy / fx).atan()).to_degrees() - + 90f64 * (fx / (fx).abs())) - - azimuth) - .abs(); - if z > 180.0 { - z = 360.0 - z; + } + + tx.send((row, data)).expect("Error sending data to thread."); + } + }); + } + } else { // geographic coordinates + + let phi1 = input.get_y_from_row(0); + let lambda1 = input.get_x_from_column(0); + + let phi2 = phi1; + let lambda2 = input.get_x_from_column(-1); + + let linear_res = vincenty_distance((phi1, lambda1), (phi2, lambda2)); + let lr2 = haversine_distance((phi1, lambda1), (phi2, lambda2)); + let diff = 100. * (linear_res - lr2).abs() / linear_res; + let use_haversine = diff < 0.5; // if the difference is less than 0.5%, use the faster haversine method to calculate distances. + + for tid in 0..num_procs { + let input = input.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let mut z4: f64; + let mut p: f64; + let mut q: f64; + let mut a: f64; + let mut b: f64; + let mut c: f64; + let mut d: f64; + let mut e: f64; + let mut phi1: f64; + let mut lambda1: f64; + let mut phi2: f64; + let mut lambda2: f64; + let offsets = [ + [-1, -1], [0, -1], [1, -1], + [-1, 0], [0, 0], [1, 0], + [-1, 1], [0, 1], [1, 1] + ]; + let mut z = [0f64; 25]; + for row in (0..rows).filter(|r| r % num_procs == tid) { + let mut data = vec![nodata; columns as usize]; + for col in 0..columns { + z4 = input.get_value(row, col); + if z4 != nodata { + for n in 0..9 { + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); + if z[n] != nodata { + z[n] *= z_factor; + } else { + z[n] = z4 * z_factor; + } + } + + // Calculate a, b, c, d, and e. + phi1 = input.get_y_from_row(row); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + b = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row+1); + lambda2 = lambda1; + + d = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi2 = input.get_y_from_row(row-1); + lambda2 = lambda1; + + e = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row+1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + a = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + phi1 = input.get_y_from_row(row-1); + lambda1 = input.get_x_from_column(col); + + phi2 = phi1; + lambda2 = input.get_x_from_column(col-1); + + c = if use_haversine { + haversine_distance((phi1, lambda1), (phi2, lambda2)) + } else { + vincenty_distance((phi1, lambda1), (phi2, lambda2)) + }; + + /* + The following equations have been taken from Florinsky (2016) Principles and Methods + of Digital Terrain Modelling, Chapter 4, pg. 117. + */ + + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); + + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) + - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) + - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); + + if p != 0f64 { // slope is greater than zero + data[col as usize] = (180f64 - (q / p).atan().to_degrees() + 90f64 * (p / p.abs()) - azimuth).abs(); + if data[col as usize] > 180.0 { + data[col as usize] = 360.0 - data[col as usize]; + } + } else { + data[col as usize] = -1f64; // undefined for flat surfaces } - data[col as usize] = z; - } else { - data[col as usize] = -1f64; } } + + tx.send((row, data)).expect("Error sending data to thread."); } - tx1.send((row, data)).unwrap(); - } - }); + }); + } } for row in 0..rows { diff --git a/whitebox-tools-app/src/tools/terrain_analysis/slope.rs b/whitebox-tools-app/src/tools/terrain_analysis/slope.rs index be355b4cf..c0aaf067d 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/slope.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/slope.rs @@ -296,7 +296,7 @@ impl WhiteboxTool for Slope { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -306,14 +306,13 @@ impl WhiteboxTool for Slope { /* The following equations have been taken from Florinsky (2016) Principles and Methods - of Digital Terrain Modelling, Chapter 4, pg. 117. Note that I believe Florinsky reversed - the equations for q and p. + of Digital Terrain Modelling, Chapter 4, pg. 117. */ - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -443,16 +442,16 @@ impl WhiteboxTool for Slope { of Digital Terrain Modelling, Chapter 4, pg. 117. */ - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); /* The following equation has been taken from Florinsky (2016) Principles and Methods diff --git a/whitebox-tools-app/src/tools/terrain_analysis/tan_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/tan_curvature.rs index 8e24ef9c4..7bbc97dfb 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/tan_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/tan_curvature.rs @@ -320,7 +320,7 @@ impl WhiteboxTool for TangentialCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { @@ -351,11 +351,11 @@ impl WhiteboxTool for TangentialCurvature { s = 1. / (100. * res * res) * (z[8] + z[16] - z[6] - z[18] + 4. * (z[4] + z[20] - z[0] - z[24]) + 2. * (z[3] + z[9] + z[15] + z[21] - z[1] - z[5] - z[19] - z[23])); - q = 1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + p = -1. / (420. * res) * (44. * (z[3] + z[23] - z[1] - z[21]) + 31. * (z[0] + z[20] - z[4] - z[24] + 2. * (z[8] + z[18] - z[6] - z[16])) + 17. * (z[14] - z[10] + 4. * (z[13] - z[11])) + 5. * (z[9] + z[19] - z[5] - z[15])); - p = 1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + q = -1. / (420. * res) * (44. * (z[5] + z[9] - z[15] - z[19]) + 31. * (z[20] + z[24] - z[0] - z[4] + 2. * (z[6] + z[8] - z[16] - z[18])) + 17. * (z[2] - z[22] + 4. * (z[7] - z[17])) + 5. * (z[1] + z[3] - z[21] - z[23])); @@ -519,16 +519,16 @@ impl WhiteboxTool for TangentialCurvature { s = (c * (a * a * (d + e) + b * b * e) * (z[2] - z[0]) - b * (a * a * d - c * c * e) * (z[3] - z[5]) + a * (c * c * (d + e) + b * b * d) * (z[6] - z[8])) / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); - p = (a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) - / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e))); + p = -((a * a * c * d * (d + e) * (z[2] - z[0]) + b * (a * a * d * d + c * c * e * e) * (z[5] - z[3]) + a * c * c * e * (d + e) * (z[8] - z[6])) + / (2. * (a * a * c * c * (d + e).powi(2) + b * b * (a * a * d * d + c * c * e * e)))); - q = 1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) + q = -(1. / (3. * d * e * (d + e) * (a.powi(4) + b.powi(4) + c.powi(4))) * ((d * d * (a.powi(4) + b.powi(4) + b * b * c * c) + c * c * e * e * (a * a - b * b)) * (z[0] + z[2]) - (d * d * (a.powi(4) + c.powi(4) + b * b * c * c) - e * e * (a.powi(4) + c.powi(4) + a * a * b * b)) * (z[3] + z[5]) - (e * e * (b.powi(4) + c.powi(4) + a * a * b * b) - a * a * d * d * (b * b - c * c)) * (z[6] + z[8]) + d * d * (b.powi(4) * (z[1] - 3. * z[4]) + c.powi(4) * (3. * z[1] - z[4]) + (a.powi(4) - 2. * b * b * c * c) * (z[1] - z[4])) + e * e * (a.powi(4) * (z[4] - 3. * z[7]) + b.powi(4) * (3. * z[4] - z[7]) + (c.powi(4) - 2. * a * a * b * b) * (z[4] - z[7])) - - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1])); + - 2. * (a * a * d * d * (b * b - c * c) * z[7] + c * c * e * e * (a * a - b * b) * z[1]))); if (p + q).abs() > 0. { /* diff --git a/whitebox-tools-app/src/tools/terrain_analysis/total_curvature.rs b/whitebox-tools-app/src/tools/terrain_analysis/total_curvature.rs index cccc15713..bba844a02 100755 --- a/whitebox-tools-app/src/tools/terrain_analysis/total_curvature.rs +++ b/whitebox-tools-app/src/tools/terrain_analysis/total_curvature.rs @@ -314,7 +314,7 @@ impl WhiteboxTool for TotalCurvature { z12 = input.get_value(row, col); if z12 != nodata { for n in 0..25 { - z[n] = input.get_value(row + offsets[n][0], col + offsets[n][1]); + z[n] = input.get_value(row + offsets[n][1], col + offsets[n][0]); if z[n] != nodata { z[n] *= z_factor; } else { diff --git a/whitebox-vector/src/shapefile/geometry.rs b/whitebox-vector/src/shapefile/geometry.rs index e1bc5c259..f1871bf6b 100755 --- a/whitebox-vector/src/shapefile/geometry.rs +++ b/whitebox-vector/src/shapefile/geometry.rs @@ -174,9 +174,6 @@ impl ShapefileGeometry { /// Adds a part of Point2Ds, measures, and z-values to the ShapefileGeometry. pub fn add_partz(&mut self, points: &[Point2D], measures: &[f64], z_values: &[f64]) { - if points.len() != measures.len() { - panic!("Error adding part to ShapefileGeometry. Points and measures array must be equal length."); - } if points.len() != z_values.len() { panic!( "Error adding part to ShapefileGeometry. Points and z array must be equal length." @@ -184,11 +181,9 @@ impl ShapefileGeometry { } self.parts.push(self.points.len() as i32); let mut p: Point2D; - let mut m: f64; let mut z: f64; for i in 0..points.len() { p = points[i]; - m = measures[i]; z = z_values[i]; self.points.push(p); if p.x < self.x_min { @@ -203,13 +198,7 @@ impl ShapefileGeometry { if p.y > self.y_max { self.y_max = p.y; } - if m < self.m_min { - self.m_min = m; - } - if m > self.m_max { - self.m_max = m; - } - self.m_array.push(m); + if z < self.z_min { self.z_min = z; } @@ -218,6 +207,22 @@ impl ShapefileGeometry { } self.z_array.push(z); } + + if points.len() != measures.len() && measures.len() != 0 { + panic!("Error adding part to ShapefileGeometry. Points and measures array must be equal length or measures must be zero-length."); + } + let mut m: f64; + for i in 0..measures.len() { + m = measures[i]; + if m < self.m_min { + self.m_min = m; + } + if m > self.m_max { + self.m_max = m; + } + self.m_array.push(m); + } + self.num_points += points.len() as i32; self.num_parts += 1i32; } @@ -258,22 +263,13 @@ impl ShapefileGeometry { } else { 40 + 16 * self.num_points + 16 + 8 * self.num_points } - // 68i32 + self.num_points * 32i32 } ShapeType::PolyLineZ | ShapeType::PolygonZ => { - // 44 + 4*NumParts + 16*NumPoints + 16 + 8*NumPoints if self.has_m_data() { - 44i32 - + 4 * self.num_parts - + 16 * self.num_points - + 16 - + 8 * self.num_points - + 16 - + 8 * self.num_points + 32i32 + 4 + 4 + 4 * self.num_parts + 16 * self.num_points + 16 + 8 * self.num_points + 16 + 8 * self.num_points } else { - 44i32 + 4 * self.num_parts + 16 * self.num_points + 16 + 8 * self.num_points + 40i32 + 4 * self.num_parts + 16 * self.num_points + 16 + 8 * self.num_points } - // 72i32 + self.num_parts * 4i32 + self.num_points * 32i32 } }; diff --git a/whitebox_tools.py b/whitebox_tools.py index 4ef75f84d..444444db8 100755 --- a/whitebox_tools.py +++ b/whitebox_tools.py @@ -723,6 +723,7 @@ def activate_license(self): + ############## @@ -835,7 +836,7 @@ def fix_dangling_arcs(self, i, output, dist="", callback=None): args.append("--dist={}".format(dist)) return self.run_tool('fix_dangling_arcs', args, callback) # returns 1 if error - def join_tables(self, input1, pkey, input2, fkey, import_field, callback=None): + def join_tables(self, input1, pkey, input2, fkey, import_field=None, callback=None): """Merge a vector's attribute table with another table based on a common field. Keyword arguments: @@ -852,7 +853,7 @@ def join_tables(self, input1, pkey, input2, fkey, import_field, callback=None): args.append("--pkey='{}'".format(pkey)) args.append("--input2='{}'".format(input2)) args.append("--fkey='{}'".format(fkey)) - args.append("--import_field='{}'".format(import_field)) + if import_field is not None: args.append("--import_field='{}'".format(import_field)) return self.run_tool('join_tables', args, callback) # returns 1 if error def lines_to_polygons(self, i, output, callback=None): @@ -1365,7 +1366,7 @@ def eliminate_coincident_points(self, i, output, tolerance, callback=None): Keyword arguments: i -- Input vector file. - output -- Output vector polygon file. + output -- Output vector points file. tolerance -- The distance tolerance for points. callback -- Custom function for handling tool text outputs. """ @@ -2482,7 +2483,7 @@ def weighted_overlay(self, factors, weights, output, cost=None, constraints=None factors -- Input factor raster files. weights -- Weight values, contained in quotes and separated by commas or semicolons. Must have the same number as factors. - cost -- Weight values, contained in quotes and separated by commas or semicolons. Must have the same number as factors. + cost -- Boolean array indicating which factors are cost factors, contained in quotes and separated by commas or semicolons. Must have the same number as factors. constraints -- Input constraints raster files. output -- Output raster file. scale_max -- Suitability scale maximum value (common values are 1.0, 100.0, and 255.0). @@ -2775,6 +2776,24 @@ def average_normal_vector_angular_deviation(self, dem, output, filter=11, callba args.append("--filter={}".format(filter)) return self.run_tool('average_normal_vector_angular_deviation', args, callback) # returns 1 if error + def breakline_mapping(self, dem, output, threshold=2.0, min_length=3, callback=None): + """This tool maps breaklines from an input DEM. + + Keyword arguments: + + dem -- Name of the input raster image file. + output -- Name of the output vector lines file. + threshold -- Threshold value (0 - infinity but typcially 1 to 5 works well). + min_length -- Minimum line length, in grid cells. + callback -- Custom function for handling tool text outputs. + """ + args = [] + args.append("--dem='{}'".format(dem)) + args.append("--output='{}'".format(output)) + args.append("--threshold={}".format(threshold)) + args.append("--min_length={}".format(min_length)) + return self.run_tool('breakline_mapping', args, callback) # returns 1 if error + def circular_variance_of_aspect(self, dem, output, filter=11, callback=None): """Calculates the circular variance of aspect at a scale for a DEM. @@ -3227,17 +3246,19 @@ def generating_function(self, dem, output, log=False, zfactor=1.0, callback=None args.append("--zfactor={}".format(zfactor)) return self.run_tool('generating_function', args, callback) # returns 1 if error - def geomorphons(self, dem, output, search=50, threshold=0.0, tdist=0, forms=True, callback=None): + def geomorphons(self, dem, output, search=50, threshold=0.0, fdist=0, skip=0, forms=True, residuals=False, callback=None): """Computes geomorphon patterns. Keyword arguments: dem -- Input raster DEM file. output -- Output raster file. - search -- Look up distance. + search -- Look up distance (in cells). threshold -- Flatness threshold for the classification function (in degrees). - tdist -- Distance (in cells) to begin reducing the flatness threshold to avoid problems with pseudo-flat lines-of-sight. - forms -- Classify geomorphons into 10 common land morphologies, else, output ternary code. + fdist -- Distance (in cells) to begin reducing the flatness threshold to avoid problems with pseudo-flat lines-of-sight. + skip -- Distance (in cells) to begin calculating lines-of-sight. + forms -- Classify geomorphons into 10 common land morphologies, else output ternary pattern. + residuals -- Convert elevation to residuals of a linear model. callback -- Custom function for handling tool text outputs. """ args = [] @@ -3245,8 +3266,10 @@ def geomorphons(self, dem, output, search=50, threshold=0.0, tdist=0, forms=True args.append("--output='{}'".format(output)) args.append("--search={}".format(search)) args.append("--threshold={}".format(threshold)) - args.append("--tdist={}".format(tdist)) + args.append("--fdist={}".format(fdist)) + args.append("--skip={}".format(skip)) if forms: args.append("--forms") + if residuals: args.append("--residuals") return self.run_tool('geomorphons', args, callback) # returns 1 if error def hillshade(self, dem, output, azimuth=315.0, altitude=30.0, zfactor=None, callback=None): @@ -3332,7 +3355,7 @@ def hypsometrically_tinted_hillshade(self, dem, output, altitude=45.0, hs_weight hs_weight -- Weight given to hillshade relative to relief (0.0-1.0). brightness -- Brightness factor (0.0-1.0). atmospheric -- Atmospheric effects weight (0.0-1.0). - palette -- Options include 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridi', 'gn_yl', 'pi_y_g', 'bl_yl_rd', and 'deep'. + palette -- Options include 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridis', 'gn_yl', 'pi_y_g', 'bl_yl_rd', and 'deep'. reverse -- Optional flag indicating whether to use reverse the palette. zfactor -- Optional multiplier for when the vertical and horizontal units are not the same. full_mode -- Optional flag indicating whether to use full 360-degrees of illumination sources. @@ -4017,20 +4040,18 @@ def rotor(self, dem, output, log=False, zfactor=1.0, callback=None): args.append("--zfactor={}".format(zfactor)) return self.run_tool('rotor', args, callback) # returns 1 if error - def ruggedness_index(self, dem, output, zfactor=None, callback=None): + def ruggedness_index(self, dem, output, callback=None): """Calculates the Riley et al.'s (1999) terrain ruggedness index from an input DEM. Keyword arguments: dem -- Input raster DEM file. output -- Output raster file. - zfactor -- Optional multiplier for when the vertical and horizontal units are not the same. callback -- Custom function for handling tool text outputs. """ args = [] args.append("--dem='{}'".format(dem)) args.append("--output='{}'".format(output)) - if zfactor is not None: args.append("--zfactor='{}'".format(zfactor)) return self.run_tool('ruggedness_index', args, callback) # returns 1 if error def sediment_transport_index(self, sca, slope, output, sca_exponent=0.4, slope_exponent=1.3, callback=None): @@ -5261,22 +5282,6 @@ def num_inflowing_neighbours(self, dem, output, callback=None): args.append("--output='{}'".format(output)) return self.run_tool('num_inflowing_neighbours', args, callback) # returns 1 if error - def pilesjo_hasan(self, dem, output, exponent=1.0, callback=None): - """This tool calculates Pilesjo and Hasan (2014) flow accumulation. - - Keyword arguments: - - dem -- Name of the input DEM raster file; must be depressionless. - output -- Name of the output raster file. - exponent -- Optional exponent parameter; default is 1.0. - callback -- Custom function for handling tool text outputs. - """ - args = [] - args.append("--dem='{}'".format(dem)) - args.append("--output='{}'".format(output)) - args.append("--exponent={}".format(exponent)) - return self.run_tool('pilesjo_hasan', args, callback) # returns 1 if error - def qin_flow_accumulation(self, dem, output, out_type="specific contributing area", exponent=10.0, max_slope=45.0, threshold=None, log=False, clip=False, callback=None): """This tool calculates Qin et al. (2007) flow accumulation. @@ -6245,6 +6250,24 @@ def gaussian_filter(self, i, output, sigma=0.75, callback=None): args.append("--sigma={}".format(sigma)) return self.run_tool('gaussian_filter', args, callback) # returns 1 if error + def high_pass_bilateral_filter(self, i, output, sigma_dist=0.75, sigma_int=1.0, callback=None): + """Performs a high-pass bilateral filter, by differencing an input image by the bilateral filter by Tomasi and Manduchi (1998). + + Keyword arguments: + + i -- Input raster file. + output -- Output raster file. + sigma_dist -- Standard deviation in distance in pixels. + sigma_int -- Standard deviation in intensity in pixels. + callback -- Custom function for handling tool text outputs. + """ + args = [] + args.append("--input='{}'".format(i)) + args.append("--output='{}'".format(output)) + args.append("--sigma_dist={}".format(sigma_dist)) + args.append("--sigma_int={}".format(sigma_int)) + return self.run_tool('high_pass_bilateral_filter', args, callback) # returns 1 if error + def high_pass_filter(self, i, output, filterx=11, filtery=11, callback=None): """Performs a high-pass filter on an input image. @@ -8743,6 +8766,38 @@ def conditional_evaluation(self, i, output, statement="", true=None, false=None, args.append("--output='{}'".format(output)) return self.run_tool('conditional_evaluation', args, callback) # returns 1 if error + def conditioned_latin_hypercube(self, inputs, output, samples=500, iterations=25000, seed=None, prob=0.5, threshold=None, temp=1.0, temp_decay=0.05, cycle=10, average=False, callback=None): + """Implements conditioned Latin Hypercube sampling. + + Keyword arguments: + + inputs -- Name of the input raster file. + output -- Output shapefile. + samples -- Number of sample sites returned. + iterations -- Maximum iterations (if stopping criteria not reached). + seed -- Seed for RNG consistency. + prob -- Probability of random resample or resampling worst strata between [0,1]. + threshold -- Objective function values below the theshold stop the resampling iterations. + temp -- Initial annealing temperature between [0,1]. + temp_decay -- Annealing temperature decay proportion between [0,1]. Reduce temperature by this proportion each annealing cycle. + cycle -- Number of iterations before decaying annealing temperature. + average -- Weight the continuous objective funtion by the 1/N contributing strata. + callback -- Custom function for handling tool text outputs. + """ + args = [] + args.append("--inputs='{}'".format(inputs)) + args.append("--output='{}'".format(output)) + args.append("--samples={}".format(samples)) + args.append("--iterations={}".format(iterations)) + if seed is not None: args.append("--seed='{}'".format(seed)) + args.append("--prob={}".format(prob)) + if threshold is not None: args.append("--threshold='{}'".format(threshold)) + args.append("--temp={}".format(temp)) + args.append("--temp_decay={}".format(temp_decay)) + args.append("--cycle={}".format(cycle)) + if average: args.append("--average") + return self.run_tool('conditioned_latin_hypercube', args, callback) # returns 1 if error + def cos(self, i, output, callback=None): """Returns the cosine (cos) of each values in a raster.